diff options
672 files changed, 51440 insertions, 9734 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl index 0f6a3edcd44b..49267ea97568 100644 --- a/Documentation/DocBook/80211.tmpl +++ b/Documentation/DocBook/80211.tmpl @@ -127,14 +127,11 @@ !Finclude/net/cfg80211.h cfg80211_ibss_params !Finclude/net/cfg80211.h cfg80211_connect_params !Finclude/net/cfg80211.h cfg80211_pmksa -!Finclude/net/cfg80211.h cfg80211_send_rx_auth -!Finclude/net/cfg80211.h cfg80211_send_auth_timeout -!Finclude/net/cfg80211.h cfg80211_send_rx_assoc -!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout -!Finclude/net/cfg80211.h cfg80211_send_deauth -!Finclude/net/cfg80211.h __cfg80211_send_deauth -!Finclude/net/cfg80211.h cfg80211_send_disassoc -!Finclude/net/cfg80211.h __cfg80211_send_disassoc +!Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt +!Finclude/net/cfg80211.h cfg80211_auth_timeout +!Finclude/net/cfg80211.h cfg80211_rx_assoc_resp +!Finclude/net/cfg80211.h cfg80211_assoc_timeout +!Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt !Finclude/net/cfg80211.h cfg80211_ibss_joined !Finclude/net/cfg80211.h cfg80211_connect_result !Finclude/net/cfg80211.h cfg80211_roamed diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt new file mode 100644 index 000000000000..b35a8d04f8b6 --- /dev/null +++ b/Documentation/devicetree/bindings/mips/ralink.txt @@ -0,0 +1,17 @@ +Ralink MIPS SoC device tree bindings + +1. SoCs + +Each device tree must specify a compatible value for the Ralink SoC +it uses in the compatible property of the root node. The compatible +value must be one of the following values: + + ralink,rt2880-soc + ralink,rt3050-soc + ralink,rt3052-soc + ralink,rt3350-soc + ralink,rt3352-soc + ralink,rt3883-soc + ralink,rt5350-soc + ralink,mt7620a-soc + ralink,mt7620n-soc diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 4d1919bf2332..6931c4348d24 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -42,6 +42,7 @@ onnn ON Semiconductor Corp. picochip Picochip Ltd powervr PowerVR (deprecated, use img) qcom Qualcomm, Inc. +ralink Mediatek/Ralink Technology Corp. ramtron Ramtron International realtek Realtek Semiconductor Corp. renesas Renesas Electronics Corporation diff --git a/MAINTAINERS b/MAINTAINERS index 3d7782b9f90d..e5069c223391 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2299,6 +2299,11 @@ M: Jaya Kumar <jayakumar.alsa@gmail.com> S: Maintained F: sound/pci/cs5535audio/ +CW1200 WLAN driver +M: Solomon Peachy <pizza@shaftnet.org> +S: Maintained +F: drivers/net/wireless/cw1200/ + CX18 VIDEO4LINUX DRIVER M: Andy Walls <awalls@md.metrocast.net> L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) @@ -1,7 +1,7 @@ VERSION = 3 -PATCHLEVEL = 9 +PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Unicycling Gorilla # *DOCUMENTATION* diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 491ae7923b10..5917099470ea 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -182,6 +182,10 @@ config ARC_CACHE_PAGES Note that Global I/D ENABLE + Per Page DISABLE works but corollary Global DISABLE + Per Page ENABLE won't work +config ARC_CACHE_VIPT_ALIASING + bool "Support VIPT Aliasing D$" + default n + endif #ARC_CACHE config ARC_HAS_ICCM diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 48af742f8b5a..d8dd660898b9 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -32,7 +32,6 @@ generic-y += resource.h generic-y += scatterlist.h generic-y += sembuf.h generic-y += shmbuf.h -generic-y += shmparam.h generic-y += siginfo.h generic-y += socket.h generic-y += sockios.h diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 6632273861fd..d5555fe4742a 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -55,9 +55,6 @@ : "r"(data), "r"(ptr)); \ }) -/* used to give SHMLBA a value to avoid Cache Aliasing */ -extern unsigned int ARC_shmlba; - #define ARCH_DMA_MINALIGN L1_CACHE_BYTES /* diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index ee1f6eae82d2..9f841af41092 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -19,6 +19,7 @@ #define _ASM_CACHEFLUSH_H #include <linux/mm.h> +#include <asm/shmparam.h> /* * Semantically we need this because icache doesn't snoop dcache/dma. @@ -33,7 +34,9 @@ void flush_cache_all(void); void flush_icache_range(unsigned long start, unsigned long end); void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); void __inv_icache_page(unsigned long paddr, unsigned long vaddr); -void __flush_dcache_page(unsigned long paddr); +void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr); +#define __flush_dcache_page(p, v) \ + ___flush_dcache_page((unsigned long)p, (unsigned long)v) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 @@ -50,18 +53,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() -/* - * VM callbacks when entire/range of user-space V-P mappings are - * torn-down/get-invalidated - * - * Currently we don't support D$ aliasing configs for our VIPT caches - * NOPS for VIPT Cache with non-aliasing D$ configurations only - */ -#define flush_cache_dup_mm(mm) /* called on fork */ +#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ + +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING + #define flush_cache_mm(mm) /* called on munmap/exit */ #define flush_cache_range(mm, u_vstart, u_vend) #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ +#else /* VIPT aliasing dcache */ + +/* To clear out stale userspace mappings */ +void flush_cache_mm(struct mm_struct *mm); +void flush_cache_range(struct vm_area_struct *vma, + unsigned long start,unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, + unsigned long user_addr, unsigned long page); + +/* + * To make sure that userspace mapping is flushed to memory before + * get_user_pages() uses a kernel mapping to access the page + */ +#define ARCH_HAS_FLUSH_ANON_PAGE +void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long u_vaddr); + +#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ + +/* + * Simple wrapper over config option + * Bootup code ensures that hardware matches kernel configuration + */ +static inline int cache_is_vipt_aliasing(void) +{ +#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING + return 1; +#else + return 0; +#endif +} + +#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) + +/* + * checks if two addresses (after page aligning) index into same cache set + */ +#define addr_not_cache_congruent(addr1, addr2) \ + cache_is_vipt_aliasing() ? \ + (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index bdf546104551..374a35514116 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -16,13 +16,27 @@ #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) #define free_user_page(page, addr) free_page(addr) -/* TBD: for now don't worry about VIPT D$ aliasing */ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING + #define clear_user_page(addr, vaddr, pg) clear_page(addr) #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) +#else /* VIPT aliasing dcache */ + +struct vm_area_struct; +struct page; + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long u_vaddr, struct vm_area_struct *vma); +void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); + +#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ + #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index b7e36684c091..1cc4720faccb 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, #include <asm-generic/pgtable.h> +/* to cope with aliasing VIPT cache */ +#define HAVE_ARCH_UNMAPPED_AREA + /* * No page table caches to initialise */ diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h new file mode 100644 index 000000000000..fffeecc04270 --- /dev/null +++ b/arch/arc/include/asm/shmparam.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ARC_ASM_SHMPARAM_H +#define __ARC_ASM_SHMPARAM_H + +/* Handle upto 2 cache bins */ +#define SHMLBA (2 * PAGE_SIZE) + +/* Enforce SHMLBA in shmat */ +#define __ARCH_FORCE_SHMLBA + +#endif diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h index fe91719866a5..85b6df839bd7 100644 --- a/arch/arc/include/asm/tlb.h +++ b/arch/arc/include/asm/tlb.h @@ -30,13 +30,20 @@ do { \ /* * This pair is called at time of munmap/exit to flush cache and TLB entries * for mappings being torn down. - * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) - * as we don't support aliasing configs in our VIPT D$. + * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range * * Note, read http://lkml.org/lkml/2004/1/15/6 */ +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING #define tlb_start_vma(tlb, vma) +#else +#define tlb_start_vma(tlb, vma) \ +do { \ + if (!tlb->fullmm) \ + flush_cache_range(vma, vma->vm_start, vma->vm_end); \ +} while(0) +#endif #define tlb_end_vma(tlb, vma) \ do { \ diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile index 168dc146a8f6..ac95cc239c1e 100644 --- a/arch/arc/mm/Makefile +++ b/arch/arc/mm/Makefile @@ -7,4 +7,4 @@ # obj-y := extable.o ioremap.o dma.o fault.o init.o -obj-y += tlb.o tlbex.o cache_arc700.o +obj-y += tlb.o tlbex.o cache_arc700.o mmap.o diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index c854cf95f706..2f12bca8aef3 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -68,6 +68,7 @@ #include <linux/mmu_context.h> #include <linux/syscalls.h> #include <linux/uaccess.h> +#include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/cachectl.h> #include <asm/setup.h> @@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void) struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; int way_pg_ratio = way_pg_ratio; + int dcache_does_alias; char str[256]; printk(arc_cache_mumbojumbo(0, str, sizeof(str))); @@ -184,9 +186,13 @@ chk_dc: panic("Cache H/W doesn't match kernel Config"); } + dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; + /* check for D-Cache aliasing */ - if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) - panic("D$ aliasing not handled right now\n"); + if (dcache_does_alias && !cache_is_vipt_aliasing()) + panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); + else if (!dcache_does_alias && cache_is_vipt_aliasing()) + panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); #endif /* Set the default Invalidate Mode to "simpy discard dirty lines" @@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop) * Per Line Operation on D-Cache * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete * It's sole purpose is to help gcc generate ZOL + * (aliasing VIPT dcache flushing needs both vaddr and paddr) */ -static inline void __dc_line_loop(unsigned long start, unsigned long sz, - int aux_reg) +static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int aux_reg) { - int num_lines, slack; + int num_lines; /* Ensure we properly floor/ceil the non-line aligned/sized requests - * and have @start - aligned to cache line and integral @num_lines. + * and have @paddr - aligned to cache line and integral @num_lines. * This however can be avoided for page sized since: - * -@start will be cache-line aligned already (being page aligned) + * -@paddr will be cache-line aligned already (being page aligned) * -@sz will be integral multiple of line size (being page sized). */ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - slack = start & ~DCACHE_LINE_MASK; - sz += slack; - start -= slack; + sz += paddr & ~DCACHE_LINE_MASK; + paddr &= DCACHE_LINE_MASK; + vaddr &= DCACHE_LINE_MASK; } num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); +#if (CONFIG_ARC_MMU_VER <= 2) + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; +#endif + while (num_lines-- > 0) { #if (CONFIG_ARC_MMU_VER > 2) /* * Just as for I$, in MMU v3, D$ ops also require * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops - * But we pass phy addr for both. This works since Linux - * doesn't support aliasing configs for D$, yet. - * Thus paddr is enough to provide both tag and index. */ - write_aux_reg(ARC_REG_DC_PTAG, start); + write_aux_reg(ARC_REG_DC_PTAG, paddr); + + write_aux_reg(aux_reg, vaddr); + vaddr += ARC_DCACHE_LINE_LEN; +#else + /* paddr contains stuffed vaddrs bits */ + write_aux_reg(aux_reg, paddr); #endif - write_aux_reg(aux_reg, start); - start += ARC_DCACHE_LINE_LEN; + paddr += ARC_DCACHE_LINE_LEN; } } +/* For kernel mappings cache operation: index is same as paddr */ +#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) + /* * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) */ -static inline void __dc_line_op(unsigned long start, unsigned long sz, - const int cacheop) +static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int cacheop) { unsigned long flags, tmp = tmp; int aux; @@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, else aux = ARC_REG_DC_FLDL; - __dc_line_loop(start, sz, aux); + __dc_line_loop(paddr, vaddr, sz, aux); if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ wait_for_flush(); @@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, #else #define __dc_entire_op(cacheop) -#define __dc_line_op(start, sz, cacheop) +#define __dc_line_op(paddr, vaddr, sz, cacheop) +#define __dc_line_op_k(paddr, sz, cacheop) #endif /* CONFIG_ARC_HAS_DCACHE */ @@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, /*********************************************************** * Machine specific helper for per line I-Cache invalidate. */ -static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, +static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, unsigned long sz) { unsigned long flags; - int num_lines, slack; - unsigned int addr; + int num_lines; /* * Ensure we properly floor/ceil the non-line aligned/sized requests: * However page sized flushes can be compile time optimised. - * -@phy_start will be cache-line aligned already (being page aligned) + * -@paddr will be cache-line aligned already (being page aligned) * -@sz will be integral multiple of line size (being page sized). */ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - slack = phy_start & ~ICACHE_LINE_MASK; - sz += slack; - phy_start -= slack; + sz += paddr & ~ICACHE_LINE_MASK; + paddr &= ICACHE_LINE_MASK; + vaddr &= ICACHE_LINE_MASK; } num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); -#if (CONFIG_ARC_MMU_VER > 2) - vaddr &= ~ICACHE_LINE_MASK; - addr = phy_start; -#else +#if (CONFIG_ARC_MMU_VER <= 2) /* bits 17:13 of vaddr go as bits 4:0 of paddr */ - addr = phy_start | ((vaddr >> 13) & 0x1F); + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; #endif local_irq_save(flags); while (num_lines-- > 0) { #if (CONFIG_ARC_MMU_VER > 2) /* tag comes from phy addr */ - write_aux_reg(ARC_REG_IC_PTAG, addr); + write_aux_reg(ARC_REG_IC_PTAG, paddr); /* index bits come from vaddr */ write_aux_reg(ARC_REG_IC_IVIL, vaddr); vaddr += ARC_ICACHE_LINE_LEN; #else /* paddr contains stuffed vaddrs bits */ - write_aux_reg(ARC_REG_IC_IVIL, addr); + write_aux_reg(ARC_REG_IC_IVIL, paddr); #endif - addr += ARC_ICACHE_LINE_LEN; + paddr += ARC_ICACHE_LINE_LEN; } local_irq_restore(flags); } @@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr, * Exported APIs */ +/* + * Handle cache congruency of kernel and userspace mappings of page when kernel + * writes-to/reads-from + * + * The idea is to defer flushing of kernel mapping after a WRITE, possible if: + * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent + * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) + * -In SMP, if hardware caches are coherent + * + * There's a corollary case, where kernel READs from a userspace mapped page. + * If the U-mapping is not congruent to to K-mapping, former needs flushing. + */ void flush_dcache_page(struct page *page) { - /* Make a note that dcache is not yet flushed for this page */ - set_bit(PG_arch_1, &page->flags); + struct address_space *mapping; + + if (!cache_is_vipt_aliasing()) { + set_bit(PG_arch_1, &page->flags); + return; + } + + /* don't handle anon pages here */ + mapping = page_mapping(page); + if (!mapping) + return; + + /* + * pagecache page, file not yet mapped to userspace + * Make a note that K-mapping is dirty + */ + if (!mapping_mapped(mapping)) { + set_bit(PG_arch_1, &page->flags); + } else if (page_mapped(page)) { + + /* kernel reading from page with U-mapping */ + void *paddr = page_address(page); + unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; + + if (addr_not_cache_congruent(paddr, vaddr)) + __flush_dcache_page(paddr, vaddr); + } } EXPORT_SYMBOL(flush_dcache_page); void dma_cache_wback_inv(unsigned long start, unsigned long sz) { - __dc_line_op(start, sz, OP_FLUSH_N_INV); + __dc_line_op_k(start, sz, OP_FLUSH_N_INV); } EXPORT_SYMBOL(dma_cache_wback_inv); void dma_cache_inv(unsigned long start, unsigned long sz) { - __dc_line_op(start, sz, OP_INV); + __dc_line_op_k(start, sz, OP_INV); } EXPORT_SYMBOL(dma_cache_inv); void dma_cache_wback(unsigned long start, unsigned long sz) { - __dc_line_op(start, sz, OP_FLUSH); + __dc_line_op_k(start, sz, OP_FLUSH); } EXPORT_SYMBOL(dma_cache_wback); @@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) local_irq_save(flags); __ic_line_inv_vaddr(paddr, vaddr, len); - __dc_line_op(paddr, len, OP_FLUSH); + __dc_line_op(paddr, vaddr, len, OP_FLUSH); local_irq_restore(flags); } @@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr) __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); } -void __flush_dcache_page(unsigned long paddr) +/* + * wrapper to clearout kernel or userspace mappings of a page + * For kernel mappings @vaddr == @paddr + */ +void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr) { - __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV); + __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); } void flush_icache_all(void) @@ -601,6 +655,87 @@ noinline void flush_cache_all(void) } +#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING + +void flush_cache_mm(struct mm_struct *mm) +{ + flush_cache_all(); +} + +void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, + unsigned long pfn) +{ + unsigned int paddr = pfn << PAGE_SHIFT; + + __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); +} + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + flush_cache_all(); +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long u_vaddr, struct vm_area_struct *vma) +{ + void *kfrom = page_address(from); + void *kto = page_address(to); + int clean_src_k_mappings = 0; + + /* + * If SRC page was already mapped in userspace AND it's U-mapping is + * not congruent with K-mapping, sync former to physical page so that + * K-mapping in memcpy below, sees the right data + * + * Note that while @u_vaddr refers to DST page's userspace vaddr, it is + * equally valid for SRC page as well + */ + if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { + __flush_dcache_page(kfrom, u_vaddr); + clean_src_k_mappings = 1; + } + + copy_page(kto, kfrom); + + /* + * Mark DST page K-mapping as dirty for a later finalization by + * update_mmu_cache(). Although the finalization could have been done + * here as well (given that both vaddr/paddr are available). + * But update_mmu_cache() already has code to do that for other + * non copied user pages (e.g. read faults which wire in pagecache page + * directly). + */ + set_bit(PG_arch_1, &to->flags); + + /* + * if SRC was already usermapped and non-congruent to kernel mapping + * sync the kernel mapping back to physical page + */ + if (clean_src_k_mappings) { + __flush_dcache_page(kfrom, kfrom); + } else { + set_bit(PG_arch_1, &from->flags); + } +} + +void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) +{ + clear_page(to); + set_bit(PG_arch_1, &page->flags); +} + +void flush_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long u_vaddr) +{ + /* TBD: do we really need to clear the kernel mapping */ + __flush_dcache_page(page_address(page), u_vaddr); + __flush_dcache_page(page_address(page), page_address(page)); + +} + +#endif + /********************************************************************** * Explicit Cache flush request from user space via syscall * Needed for JITs which generate code on the fly diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c new file mode 100644 index 000000000000..2e06d56e987b --- /dev/null +++ b/arch/arc/mm/mmap.c @@ -0,0 +1,78 @@ +/* + * ARC700 mmap + * + * (started from arm version - for VIPT alias handling) + * + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/sched.h> +#include <asm/cacheflush.h> + +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) + +/* + * Ensure that shared mappings are correctly aligned to + * avoid aliasing issues with VIPT caches. + * We need to ensure that + * a specific page of an object is always mapped at a multiple of + * SHMLBA bytes. + */ +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); + struct vm_unmapped_area_info info; + + /* + * We only need to do colour alignment if D cache aliases. + */ + if (aliasing) + do_align = filp || (flags & MAP_SHARED); + + /* + * We enforce the MAP_FIXED case. + */ + if (flags & MAP_FIXED) { + if (aliasing && flags & MAP_SHARED && + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 003d69ac6ffa..066145b5f348 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) /* * Called at the end of pagefault, for a userspace mapped page * -pre-install the corresponding TLB entry into MMU - * -Finalize the delayed D-cache flush (wback+inv kernel mapping) + * -Finalize the delayed D-cache flush of kernel mapping of page due to + * flush_dcache_page(), copy_user_page() + * + * Note that flush (when done) involves both WBACK - so physical page is + * in sync as well as INV - so any non-congruent aliases don't remain */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, pte_t *ptep) { unsigned long vaddr = vaddr_unaligned & PAGE_MASK; + unsigned long paddr = pte_val(*ptep) & PAGE_MASK; create_tlb(vma, vaddr, ptep); - /* icache doesn't snoop dcache, thus needs to be made coherent here */ - if (vma->vm_flags & VM_EXEC) { + /* + * Exec page : Independent of aliasing/page-color considerations, + * since icache doesn't snoop dcache on ARC, any dirty + * K-mapping of a code page needs to be wback+inv so that + * icache fetch by userspace sees code correctly. + * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it + * so userspace sees the right data. + * (Avoids the flush for Non-exec + congruent mapping case) + */ + if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { struct page *page = pfn_to_page(pte_pfn(*ptep)); - /* if page was dcache dirty, flush now */ int dirty = test_and_clear_bit(PG_arch_1, &page->flags); if (dirty) { - unsigned long paddr = pte_val(*ptep) & PAGE_MASK; - __flush_dcache_page(paddr); - __inv_icache_page(paddr, vaddr); + /* wback + inv dcache lines */ + __flush_dcache_page(paddr, paddr); + + /* invalidate any existing icache lines */ + if (vma->vm_flags & VM_EXEC) + __inv_icache_page(paddr, vaddr); } } } diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig index 4e121272c4e5..1d3452100f1f 100644 --- a/arch/arc/plat-tb10x/Kconfig +++ b/arch/arc/plat-tb10x/Kconfig @@ -27,10 +27,3 @@ menuconfig ARC_PLAT_TB10X Abilis Systems. TB10x is based on the ARC700 CPU architecture. Say Y if you are building a kernel for one of the SOCs in this series (e.g. TB100 or TB101). If in doubt say N. - -if ARC_PLAT_TB10X - -config GENERIC_GPIO - def_bool y - -endif diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 12f22492df4c..58125bf008d3 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -389,7 +389,7 @@ __SYSCALL(364, sys_perf_event_open) __SYSCALL(365, compat_sys_recvmmsg) __SYSCALL(366, sys_accept4) __SYSCALL(367, sys_fanotify_init) -__SYSCALL(368, compat_sys_fanotify_mark_wrapper) +__SYSCALL(368, compat_sys_fanotify_mark) __SYSCALL(369, sys_prlimit64) __SYSCALL(370, sys_name_to_handle_at) __SYSCALL(371, compat_sys_open_by_handle_at) diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S index db01aa978c41..a1b19ed7467c 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/sys32.S @@ -104,13 +104,6 @@ compat_sys_fallocate_wrapper: b sys_fallocate ENDPROC(compat_sys_fallocate_wrapper) -compat_sys_fanotify_mark_wrapper: - orr x2, x2, x3, lsl #32 - mov w3, w4 - mov w4, w5 - b sys_fanotify_mark -ENDPROC(compat_sys_fanotify_mark_wrapper) - #undef __SYSCALL #define __SYSCALL(x, y) .quad y // x diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index 66cf00095b84..1fce08632ad7 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile @@ -141,11 +141,11 @@ archclean: INSTALL_PATH ?= /tftpboot boot := arch/$(ARCH)/boot -BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip +BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip PHONY += $(BOOT_TARGETS) install -KBUILD_IMAGE := $(boot)/vmImage +KBUILD_IMAGE := $(boot)/uImage -all: vmImage +all: uImage $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile index f7d27d50d02c..3efaa094fb90 100644 --- a/arch/blackfin/boot/Makefile +++ b/arch/blackfin/boot/Makefile @@ -6,7 +6,7 @@ # for more details. # -targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip +targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip ifeq ($(CONFIG_RAMKERNEL),y) @@ -39,22 +39,22 @@ quiet_cmd_mk_bin_xip = BIN $@ $(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE $(call if_changed,mk_bin_xip) -$(obj)/vmImage.bin: $(obj)/vmlinux.bin +$(obj)/uImage.bin: $(obj)/vmlinux.bin $(call if_changed,uimage,none) -$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2 +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 $(call if_changed,uimage,bzip2) -$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz $(call if_changed,uimage,gzip) -$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma +$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma $(call if_changed,uimage,lzma) -$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo +$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo $(call if_changed,uimage,lzo) -$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip +$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip $(call if_changed,uimage,none) suffix-y := bin @@ -64,7 +64,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_ROMKERNEL) := xip -$(obj)/vmImage: $(obj)/vmImage.$(suffix-y) +$(obj)/uImage: $(obj)/uImage.$(suffix-y) @ln -sf $(notdir $<) $@ install: diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index c8db653c72d2..a107a98e9978 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -11,7 +11,9 @@ #ifdef CONFIG_SMP +#include <asm/barrier.h> #include <linux/linkage.h> +#include <linux/types.h> asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); diff --git a/arch/blackfin/include/asm/bfin_sdh.h b/arch/blackfin/include/asm/bfin_sdh.h index 6a4cfe2d3367..a99957ea9e9b 100644 --- a/arch/blackfin/include/asm/bfin_sdh.h +++ b/arch/blackfin/include/asm/bfin_sdh.h @@ -24,18 +24,27 @@ struct bfin_sd_host { #define CMD_INT_E (1 << 8) /* Command Interrupt */ #define CMD_PEND_E (1 << 9) /* Command Pending */ #define CMD_E (1 << 10) /* Command Enable */ +#ifdef RSI_BLKSZ +#define CMD_CRC_CHECK_D (1 << 11) /* CRC Check is disabled */ +#define CMD_DATA0_BUSY (1 << 12) /* Check for Busy State on the DATA0 pin */ +#endif /* SDH_PWR_CTL bitmasks */ +#ifndef RSI_BLKSZ #define PWR_ON 0x3 /* Power On */ #define SD_CMD_OD (1 << 6) /* Open Drain Output */ #define ROD_CTL (1 << 7) /* Rod Control */ +#endif /* SDH_CLK_CTL bitmasks */ #define CLKDIV 0xff /* MC_CLK Divisor */ #define CLK_E (1 << 8) /* MC_CLK Bus Clock Enable */ #define PWR_SV_E (1 << 9) /* Power Save Enable */ #define CLKDIV_BYPASS (1 << 10) /* Bypass Divisor */ -#define WIDE_BUS (1 << 11) /* Wide Bus Mode Enable */ +#define BUS_MODE_MASK 0x1800 /* Bus Mode Mask */ +#define STD_BUS_1 0x000 /* Standard Bus 1 bit mode */ +#define WIDE_BUS_4 0x800 /* Wide Bus 4 bit mode */ +#define BYTE_BUS_8 0x1000 /* Byte Bus 8 bit mode */ /* SDH_RESP_CMD bitmasks */ #define RESP_CMD 0x3f /* Response Command */ @@ -45,7 +54,13 @@ struct bfin_sd_host { #define DTX_DIR (1 << 1) /* Data Transfer Direction */ #define DTX_MODE (1 << 2) /* Data Transfer Mode */ #define DTX_DMA_E (1 << 3) /* Data Transfer DMA Enable */ +#ifndef RSI_BLKSZ #define DTX_BLK_LGTH (0xf << 4) /* Data Transfer Block Length */ +#else + +/* Bit masks for SDH_BLK_SIZE */ +#define DTX_BLK_LGTH 0x1fff /* Data Transfer Block Length */ +#endif /* SDH_STATUS bitmasks */ #define CMD_CRC_FAIL (1 << 0) /* CMD CRC Fail */ @@ -114,10 +129,14 @@ struct bfin_sd_host { /* SDH_E_STATUS bitmasks */ #define SDIO_INT_DET (1 << 1) /* SDIO Int Detected */ #define SD_CARD_DET (1 << 4) /* SD Card Detect */ +#define SD_CARD_BUSYMODE (1 << 31) /* Card is in Busy mode */ +#define SD_CARD_SLPMODE (1 << 30) /* Card in Sleep Mode */ +#define SD_CARD_READY (1 << 17) /* Card Ready */ /* SDH_E_MASK bitmasks */ #define SDIO_MSK (1 << 1) /* Mask SDIO Int Detected */ -#define SCD_MSK (1 << 6) /* Mask Card Detect */ +#define SCD_MSK (1 << 4) /* Mask Card Detect */ +#define CARD_READY_MSK (1 << 16) /* Mask Card Ready */ /* SDH_CFG bitmasks */ #define CLKS_EN (1 << 0) /* Clocks Enable */ @@ -126,7 +145,15 @@ struct bfin_sd_host { #define SD_RST (1 << 4) /* SDMMC Reset */ #define PUP_SDDAT (1 << 5) /* Pull-up SD_DAT */ #define PUP_SDDAT3 (1 << 6) /* Pull-up SD_DAT3 */ +#ifndef RSI_BLKSZ #define PD_SDDAT3 (1 << 7) /* Pull-down SD_DAT3 */ +#else +#define PWR_ON 0x600 /* Power On */ +#define SD_CMD_OD (1 << 11) /* Open Drain Output */ +#define BOOT_EN (1 << 12) /* Boot Enable */ +#define BOOT_MODE (1 << 13) /* Alternate Boot Mode */ +#define BOOT_ACK_EN (1 << 14) /* Boot ACK is expected */ +#endif /* SDH_RD_WAIT_EN bitmasks */ #define RWR (1 << 0) /* Read Wait Request */ diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index 8a0fed16058f..0ca40dd44724 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h @@ -41,6 +41,7 @@ #include <asm-generic/bitops/non-atomic.h> #else +#include <asm/barrier.h> #include <asm/byteorder.h> /* swab32 */ #include <linux/linkage.h> diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h index fe0ca03a1cb2..ca67145c6a45 100644 --- a/arch/blackfin/include/asm/def_LPBlackfin.h +++ b/arch/blackfin/include/asm/def_LPBlackfin.h @@ -622,10 +622,12 @@ do { \ #define PAGE_SIZE_4KB 0x00010000 /* 4 KB page size */ #define PAGE_SIZE_1MB 0x00020000 /* 1 MB page size */ #define PAGE_SIZE_4MB 0x00030000 /* 4 MB page size */ +#ifdef CONFIG_BF60x #define PAGE_SIZE_16KB 0x00040000 /* 16 KB page size */ #define PAGE_SIZE_64KB 0x00050000 /* 64 KB page size */ #define PAGE_SIZE_16MB 0x00060000 /* 16 MB page size */ #define PAGE_SIZE_64MB 0x00070000 /* 64 MB page size */ +#endif #define CPLB_L1SRAM 0x00000020 /* 0=SRAM mapped in L1, 0=SRAM not * mapped to L1 */ diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h index 9b33e7247864..c865b33eeb68 100644 --- a/arch/blackfin/include/asm/mem_init.h +++ b/arch/blackfin/include/asm/mem_init.h @@ -335,6 +335,7 @@ struct ddr_config { u32 ddr_clk; u32 dmc_ddrctl; + u32 dmc_effctl; u32 dmc_ddrcfg; u32 dmc_ddrtr0; u32 dmc_ddrtr1; @@ -348,6 +349,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [0] = { .ddr_clk = 125, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20705212, .dmc_ddrtr1 = 0x201003CF, @@ -358,6 +360,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [1] = { .ddr_clk = 133, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20806313, .dmc_ddrtr1 = 0x2013040D, @@ -368,6 +371,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [2] = { .ddr_clk = 150, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20A07323, .dmc_ddrtr1 = 0x20160492, @@ -378,6 +382,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [3] = { .ddr_clk = 166, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20A07323, .dmc_ddrtr1 = 0x2016050E, @@ -388,6 +393,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [4] = { .ddr_clk = 200, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20a07323, .dmc_ddrtr1 = 0x2016050f, @@ -398,6 +404,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [5] = { .ddr_clk = 225, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20E0A424, .dmc_ddrtr1 = 0x302006DB, @@ -408,6 +415,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [6] = { .ddr_clk = 250, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20E0A424, .dmc_ddrtr1 = 0x3020079E, @@ -469,6 +477,7 @@ static inline void init_dmc(u32 dmc_clk) bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2); bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr); bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1); + bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl); bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl); break; } diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 34e96ce02aa9..b49a53b583d5 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c @@ -30,6 +30,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) { int i_d, i_i; unsigned long addr; + unsigned long cplb_pageflags, cplb_pagesize; struct cplb_entry *d_tbl = dcplb_tbl[cpu]; struct cplb_entry *i_tbl = icplb_tbl[cpu]; @@ -49,11 +50,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) /* Cover kernel memory with 4M pages. */ addr = 0; - for (; addr < memory_start; addr += 4 * 1024 * 1024) { +#ifdef PAGE_SIZE_16MB + cplb_pageflags = PAGE_SIZE_16MB; + cplb_pagesize = SIZE_16M; +#else + cplb_pageflags = PAGE_SIZE_4MB; + cplb_pagesize = SIZE_4M; +#endif + + + for (; addr < memory_start; addr += cplb_pagesize) { d_tbl[i_d].addr = addr; - d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; + d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags; i_tbl[i_i].addr = addr; - i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; + i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags; } #ifdef CONFIG_ROMKERNEL diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c index e854f9066cbd..79cc0f6dcdd5 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c @@ -145,7 +145,7 @@ MGR_ATTR static int dcplb_miss(int cpu) unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); int status = bfin_read_DCPLB_STATUS(); int idx; - unsigned long d_data, base, addr1, eaddr; + unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags; nr_dcplb_miss[cpu]++; if (unlikely(status & FAULT_USERSUPV)) @@ -167,18 +167,37 @@ MGR_ATTR static int dcplb_miss(int cpu) if (unlikely(d_data == 0)) return CPLB_NO_ADDR_MATCH; - addr1 = addr & ~(SIZE_4M - 1); addr &= ~(SIZE_1M - 1); d_data |= PAGE_SIZE_1MB; - if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { + + /* BF60x support large than 4M CPLB page size */ +#ifdef PAGE_SIZE_16MB + cplb_pageflags = PAGE_SIZE_16MB; + cplb_pagesize = SIZE_16M; +#else + cplb_pageflags = PAGE_SIZE_4MB; + cplb_pagesize = SIZE_4M; +#endif + +find_pagesize: + addr1 = addr & ~(cplb_pagesize - 1); + if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) { /* * This works because * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB. */ - d_data |= PAGE_SIZE_4MB; + d_data |= cplb_pageflags; addr = addr1; + goto found_pagesize; + } else { + if (cplb_pagesize > SIZE_4M) { + cplb_pageflags = PAGE_SIZE_4MB; + cplb_pagesize = SIZE_4M; + goto find_pagesize; + } } +found_pagesize: #ifdef CONFIG_BF60x if ((addr >= ASYNC_BANK0_BASE) && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)) diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c index 404045dcc5e4..5b80d59e66e5 100644 --- a/arch/blackfin/kernel/cplbinfo.c +++ b/arch/blackfin/kernel/cplbinfo.c @@ -17,8 +17,13 @@ #include <asm/cplbinit.h> #include <asm/blackfin.h> -static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" }; -#define page(flags) (((flags) & 0x30000) >> 16) +static char const page_strtbl[][4] = { + "1K", "4K", "1M", "4M", +#ifdef CONFIG_BF60x + "16K", "64K", "16M", "64M", +#endif +}; +#define page(flags) (((flags) & 0x70000) >> 16) #define strpage(flags) page_strtbl[page(flags)] struct cplbinfo_data { diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index fb96e607adcf..107b306b06f1 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -1314,7 +1314,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); } - seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", + seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n", cclk/1000000, cclk%1000000, sclk/1000000, sclk%1000000); seq_printf(m, "bogomips\t: %lu.%02lu\n" diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 95114ed395ac..6a3a14bcd3a1 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -455,6 +455,7 @@ static struct platform_device bfin_async_nand_device = { static void bfin_plat_nand_init(void) { gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat"); + gpio_direction_input(BFIN_NAND_PLAT_READY); } #else static void bfin_plat_nand_init(void) {} diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index a4fce0370c1d..755f0dc12010 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -764,7 +764,6 @@ static struct platform_device i2c_bfin_twi1_device = { .num_resources = ARRAY_SIZE(bfin_twi1_resource), .resource = bfin_twi1_resource, }; -#endif /* CONFIG_BF542 */ #endif /* CONFIG_I2C_BLACKFIN_TWI */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) diff --git a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h index 4954cf3f7e16..102ee4025ac9 100644 --- a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h +++ b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h @@ -312,6 +312,8 @@ #define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val) #define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL) #define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val) +#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL) +#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val) #define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT) #define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val) #define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL) diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index d266787725b4..33013dfcd3e1 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -223,13 +223,25 @@ config M5307 help Motorola ColdFire 5307 processor support. +config M53xx + bool + config M532x bool "MCF532x" depends on !MMU + select M53xx select HAVE_CACHE_CB help Freescale (Motorola) ColdFire 532x processor support. +config M537x + bool "MCF537x" + depends on !MMU + select M53xx + select HAVE_CACHE_CB + help + Freescale ColdFire 537x processor support. + config M5407 bool "MCF5407" depends on !MMU diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 7240584d3439..b9ab0a69561c 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -358,6 +358,13 @@ config COBRA5329 help Support for the senTec COBRA5329 board. +config M5373EVB + bool "Freescale M5373EVB board support" + depends on M537x + select FREESCALE + help + Support for the Freescale M5373EVB board. + config M5407C3 bool "Motorola M5407C3 board support" depends on M5407 @@ -539,15 +546,6 @@ config ROMVEC 68000 type variants the vectors are at the base of the boot device on system startup. -config ROMVECSIZE - hex "Size of ROM vector region (in bytes)" - default "0x400" - depends on ROM - help - Define the size of the vector region in ROM. For most 68000 - variants this would be 0x400 bytes in size. Set to 0 if you do - not want a vector region at the start of the ROM. - config ROMSTART hex "Address of the base of system image in ROM" default "0x400" diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index 2f02acfb8edf..7f7830f2c5bc 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -45,6 +45,7 @@ cpuflags-$(CONFIG_M5441x) := $(call cc-option,-mcpu=54455,-mcfv4e) cpuflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200) cpuflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200) cpuflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307) +cpuflags-$(CONFIG_M537x) := $(call cc-option,-mcpu=537x,-m5307) cpuflags-$(CONFIG_M5307) := $(call cc-option,-mcpu=5307,-m5200) cpuflags-$(CONFIG_M528x) := $(call cc-option,-mcpu=528x,-m5307) cpuflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) diff --git a/arch/m68k/include/asm/commproc.h b/arch/m68k/include/asm/commproc.h index a73998528d26..66a36bd51aa1 100644 --- a/arch/m68k/include/asm/commproc.h +++ b/arch/m68k/include/asm/commproc.h @@ -480,23 +480,6 @@ typedef struct scc_enet { #define SICR_ENET_CLKRT ((uint)0x0000003d) #endif -#ifdef CONFIG_RPXLITE -/* This ENET stuff is for the MPC850 with ethernet on SCC2. Some of - * this may be unique to the RPX-Lite configuration. - * Note TENA is on Port B. - */ -#define PA_ENET_RXD ((ushort)0x0004) -#define PA_ENET_TXD ((ushort)0x0008) -#define PA_ENET_TCLK ((ushort)0x0200) -#define PA_ENET_RCLK ((ushort)0x0800) -#define PB_ENET_TENA ((uint)0x00002000) -#define PC_ENET_CLSN ((ushort)0x0040) -#define PC_ENET_RENA ((ushort)0x0080) - -#define SICR_ENET_MASK ((uint)0x0000ff00) -#define SICR_ENET_CLKRT ((uint)0x00003d00) -#endif - #ifdef CONFIG_BSEIP /* This ENET stuff is for the MPC823 with ethernet on SCC2. * This is unique to the BSE ip-Engine board. diff --git a/arch/m68k/include/asm/dbg.h b/arch/m68k/include/asm/dbg.h deleted file mode 100644 index 27af3270f671..000000000000 --- a/arch/m68k/include/asm/dbg.h +++ /dev/null @@ -1,6 +0,0 @@ -#define DEBUG 1 -#ifdef CONFIG_COLDFIRE -#define BREAK asm volatile ("halt") -#else -#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0 -#endif diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h index 0ff3fc6a6d9a..429fe26e320c 100644 --- a/arch/m68k/include/asm/dma.h +++ b/arch/m68k/include/asm/dma.h @@ -39,7 +39,7 @@ #define MAX_M68K_DMA_CHANNELS 4 #elif defined(CONFIG_M5272) #define MAX_M68K_DMA_CHANNELS 1 -#elif defined(CONFIG_M532x) +#elif defined(CONFIG_M53xx) #define MAX_M68K_DMA_CHANNELS 0 #else #define MAX_M68K_DMA_CHANNELS 2 diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h index cd952b0a8bd3..3177ce8331d6 100644 --- a/arch/m68k/include/asm/m53xxacr.h +++ b/arch/m68k/include/asm/m53xxacr.h @@ -55,8 +55,8 @@ #define CACHE_SIZE 0x2000 /* 8k of unified cache */ #define ICACHE_SIZE CACHE_SIZE #define DCACHE_SIZE CACHE_SIZE -#elif defined(CONFIG_M532x) -#define CACHE_SIZE 0x4000 /* 32k of unified cache */ +#elif defined(CONFIG_M53xx) +#define CACHE_SIZE 0x4000 /* 16k of unified cache */ #define ICACHE_SIZE CACHE_SIZE #define DCACHE_SIZE CACHE_SIZE #endif diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m53xxsim.h index 8668e47ced0e..faa1a2133bfd 100644 --- a/arch/m68k/include/asm/m532xsim.h +++ b/arch/m68k/include/asm/m53xxsim.h @@ -1,15 +1,15 @@ /****************************************************************************/ /* - * m532xsim.h -- ColdFire 5329 registers + * m53xxsim.h -- ColdFire 5329 registers */ /****************************************************************************/ -#ifndef m532xsim_h -#define m532xsim_h +#ifndef m53xxsim_h +#define m53xxsim_h /****************************************************************************/ -#define CPU_NAME "COLDFIRE(m532x)" +#define CPU_NAME "COLDFIRE(m53xx)" #define CPU_INSTR_PER_JIFFY 3 #define MCF_BUSCLK (MCF_CLK / 3) @@ -107,7 +107,7 @@ /* * QSPI module. */ -#define MCFQSPI_BASE 0xFC058000 /* Base address of QSPI */ +#define MCFQSPI_BASE 0xFC05C000 /* Base address of QSPI */ #define MCFQSPI_SIZE 0x40 /* Size of QSPI region */ #define MCFQSPI_CS0 84 @@ -1238,4 +1238,4 @@ #define MCFEPORT_EPFR (0xFC094006) /********************************************************************/ -#endif /* m532xsim_h */ +#endif /* m53xxsim_h */ diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h index 192bbfeabf70..6d13cae44af5 100644 --- a/arch/m68k/include/asm/m54xxacr.h +++ b/arch/m68k/include/asm/m54xxacr.h @@ -96,8 +96,13 @@ */ #define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \ ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP) +#if defined(CONFIG_CACHE_COPYBACK) #define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ - ACR_ENABLE+ACR_SUPER+ACR_SP) + ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP) +#else +#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ + ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT) +#endif #define ACR2_MODE 0 #define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ ACR_ENABLE+ACR_SUPER+ACR_SP) diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h index fa1059f50dfc..c41ebf45f1d0 100644 --- a/arch/m68k/include/asm/mcfgpio.h +++ b/arch/m68k/include/asm/mcfgpio.h @@ -104,7 +104,7 @@ static inline void gpio_free(unsigned gpio) #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \ defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \ + defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \ defined(CONFIG_M5441x) /* These parts have GPIO organized by 8 bit ports */ @@ -139,7 +139,7 @@ static inline void gpio_free(unsigned gpio) #if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) /* * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses * read-modify-write to change an output and a GPIO module which has separate @@ -195,7 +195,7 @@ static inline u32 __mcfgpio_ppdr(unsigned gpio) return MCFSIM2_GPIO1READ; #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #if !defined(CONFIG_M5441x) if (gpio < 8) return MCFEPORT_EPPDR; @@ -237,7 +237,7 @@ static inline u32 __mcfgpio_podr(unsigned gpio) return MCFSIM2_GPIO1WRITE; #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #if !defined(CONFIG_M5441x) if (gpio < 8) return MCFEPORT_EPDR; @@ -279,7 +279,7 @@ static inline u32 __mcfgpio_pddr(unsigned gpio) return MCFSIM2_GPIO1ENABLE; #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #if !defined(CONFIG_M5441x) if (gpio < 8) return MCFEPORT_EPDDR; diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h index a04fd9b2714c..bc867de8a1e9 100644 --- a/arch/m68k/include/asm/mcfsim.h +++ b/arch/m68k/include/asm/mcfsim.h @@ -36,8 +36,8 @@ #elif defined(CONFIG_M5307) #include <asm/m5307sim.h> #include <asm/mcfintc.h> -#elif defined(CONFIG_M532x) -#include <asm/m532xsim.h> +#elif defined(CONFIG_M53xx) +#include <asm/m53xxsim.h> #elif defined(CONFIG_M5407) #include <asm/m5407sim.h> #include <asm/mcfintc.h> diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h index da2fa43c2e45..089f0f150bbf 100644 --- a/arch/m68k/include/asm/mcftimer.h +++ b/arch/m68k/include/asm/mcftimer.h @@ -19,7 +19,7 @@ #define MCFTIMER_TRR 0x04 /* Timer Reference (r/w) */ #define MCFTIMER_TCR 0x08 /* Timer Capture reg (r/w) */ #define MCFTIMER_TCN 0x0C /* Timer Counter reg (r/w) */ -#if defined(CONFIG_M532x) || defined(CONFIG_M5441x) +#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #define MCFTIMER_TER 0x03 /* Timer Event reg (r/w) */ #else #define MCFTIMER_TER 0x11 /* Timer Event reg (r/w) */ diff --git a/arch/m68k/platform/coldfire/Makefile b/arch/m68k/platform/coldfire/Makefile index 02591a109f8c..68f0fac60099 100644 --- a/arch/m68k/platform/coldfire/Makefile +++ b/arch/m68k/platform/coldfire/Makefile @@ -25,7 +25,7 @@ obj-$(CONFIG_M527x) += m527x.o pit.o intc-2.o reset.o obj-$(CONFIG_M5272) += m5272.o intc-5272.o timers.o obj-$(CONFIG_M528x) += m528x.o pit.o intc-2.o reset.o obj-$(CONFIG_M5307) += m5307.o timers.o intc.o reset.o -obj-$(CONFIG_M532x) += m532x.o timers.o intc-simr.o reset.o +obj-$(CONFIG_M53xx) += m53xx.o timers.o intc-simr.o reset.o obj-$(CONFIG_M5407) += m5407.o timers.o intc.o reset.o obj-$(CONFIG_M54xx) += m54xx.o sltimers.o intc-2.o obj-$(CONFIG_M5441x) += m5441x.o pit.o intc-simr.o reset.o diff --git a/arch/m68k/platform/coldfire/m532x.c b/arch/m68k/platform/coldfire/m53xx.c index 7951d1d43357..5286f98fbed0 100644 --- a/arch/m68k/platform/coldfire/m532x.c +++ b/arch/m68k/platform/coldfire/m53xx.c @@ -1,7 +1,7 @@ /***************************************************************************/ /* - * linux/arch/m68knommu/platform/532x/config.c + * m53xx.c -- platform support for ColdFire 53xx based boards * * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2000, Lineo (www.lineo.com) @@ -118,7 +118,8 @@ static struct clk * const enable_clks[] __initconst = { &__clk_0_24, /* mcfuart.0 */ &__clk_0_25, /* mcfuart.1 */ &__clk_0_26, /* mcfuart.2 */ - + &__clk_0_28, /* mcftmr.0 */ + &__clk_0_29, /* mcftmr.1 */ &__clk_0_32, /* mcfpit.0 */ &__clk_0_33, /* mcfpit.1 */ &__clk_0_37, /* mcfeport.0 */ @@ -134,8 +135,6 @@ static struct clk * const disable_clks[] __initconst = { &__clk_0_17, /* edma */ &__clk_0_22, /* mcfi2c.0 */ &__clk_0_23, /* mcfqspi.0 */ - &__clk_0_28, /* mcftmr.0 */ - &__clk_0_29, /* mcftmr.1 */ &__clk_0_30, /* mcftmr.2 */ &__clk_0_31, /* mcftmr.3 */ &__clk_0_34, /* mcfpit.2 */ @@ -153,7 +152,7 @@ static struct clk * const disable_clks[] __initconst = { }; -static void __init m532x_clk_init(void) +static void __init m53xx_clk_init(void) { unsigned i; @@ -169,7 +168,7 @@ static void __init m532x_clk_init(void) #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) -static void __init m532x_qspi_init(void) +static void __init m53xx_qspi_init(void) { /* setup QSPS pins for QSPI with gpio CS control */ writew(0x01f0, MCFGPIO_PAR_QSPI); @@ -179,7 +178,7 @@ static void __init m532x_qspi_init(void) /***************************************************************************/ -static void __init m532x_uarts_init(void) +static void __init m53xx_uarts_init(void) { /* UART GPIO initialization */ writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART); @@ -187,7 +186,7 @@ static void __init m532x_uarts_init(void) /***************************************************************************/ -static void __init m532x_fec_init(void) +static void __init m53xx_fec_init(void) { u8 v; @@ -217,11 +216,11 @@ void __init config_BSP(char *commandp, int size) } #endif mach_sched_init = hw_timer_init; - m532x_clk_init(); - m532x_uarts_init(); - m532x_fec_init(); + m53xx_clk_init(); + m53xx_uarts_init(); + m53xx_fec_init(); #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) - m532x_qspi_init(); + m53xx_qspi_init(); #endif #ifdef CONFIG_BDM_DISABLE diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c index 51f6d2af807f..d06068e45764 100644 --- a/arch/m68k/platform/coldfire/timers.c +++ b/arch/m68k/platform/coldfire/timers.c @@ -36,7 +36,7 @@ */ void coldfire_profile_init(void); -#if defined(CONFIG_M532x) || defined(CONFIG_M5441x) +#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #define __raw_readtrr __raw_readl #define __raw_writetrr __raw_writel #else diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index d2b097a652d9..3649a8b150c0 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig @@ -17,7 +17,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_EFI_PARTITION is not set -CONFIG_OPT_LIB_ASM=y CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 41cc841091b0..d52abb6812fa 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -153,7 +153,5 @@ extern void __init xilinx_pci_init(void); static inline void __init xilinx_pci_init(void) { return; } #endif -#include <asm-generic/pci-dma-compat.h> - #endif /* __KERNEL__ */ #endif /* __ASM_MICROBLAZE_PCI_H */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index a1ab5f0009ef..efe59d881789 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -90,17 +90,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size) #else -/* - * Address is valid if: - * - "addr", "addr + size" and "size" are all below the limit - */ -#define access_ok(type, addr, size) \ - (get_fs().seg >= (((unsigned long)(addr)) | \ - (size) | ((unsigned long)(addr) + (size)))) - -/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", - type?"WRITE":"READ",addr,size,get_fs().seg)) */ - +static inline int access_ok(int type, const void __user *addr, + unsigned long size) +{ + if (!size) + goto ok; + + if ((get_fs().seg < ((unsigned long)addr)) || + (get_fs().seg < ((unsigned long)addr + size - 1))) { + pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + type ? "WRITE" : "READ ", (u32)addr, (u32)size, + (u32)get_fs().seg); + return 0; + } +ok: + pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + type ? "WRITE" : "READ ", (u32)addr, (u32)size, + (u32)get_fs().seg); + return 1; +} #endif #ifdef CONFIG_MMU diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 0b2299bcb948..410398f6db55 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -37,6 +37,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { {"8.20.a", 0x15}, {"8.20.b", 0x16}, {"8.30.a", 0x17}, + {"8.40.a", 0x18}, + {"8.40.b", 0x19}, {NULL, 0}, }; @@ -57,6 +59,9 @@ const struct family_string_key family_string_lookup[] = { {"virtex6", 0xe}, /* FIXME There is no key code defined for spartan2 */ {"spartan2", 0xf0}, + {"kintex7", 0x10}, + {"artix7", 0x11}, + {"zynq7000", 0x12}, {NULL, 0}, }; diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index eef84de5e8c8..fcc797feb9db 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -112,16 +112,16 @@ no_fdt_arg: * copy command line directly to cmd_line placed in data section. */ beqid r5, skip /* Skip if NULL pointer */ - or r6, r0, r0 /* incremment */ + or r11, r0, r0 /* incremment */ ori r4, r0, cmd_line /* load address of command line */ tophys(r4,r4) /* convert to phys address */ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ _copy_command_line: /* r2=r5+r6 - r5 contain pointer to command line */ - lbu r2, r5, r6 + lbu r2, r5, r11 beqid r2, skip /* Skip if no data */ - sb r2, r4, r6 /* addr[r4+r6]= r2*/ - addik r6, r6, 1 /* increment counting */ + sb r2, r4, r11 /* addr[r4+r6]= r2 */ + addik r11, r11, 1 /* increment counting */ bgtid r3, _copy_command_line /* loop for all entries */ addik r3, r3, -1 /* decrement loop */ addik r5, r4, 0 /* add new space for command line */ @@ -131,13 +131,13 @@ skip: #ifdef NOT_COMPILE /* save bram context */ - or r6, r0, r0 /* incremment */ + or r11, r0, r0 /* incremment */ ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */ ori r3, r0, (LMB_SIZE - 4) _copy_bram: - lw r7, r0, r6 /* r7 = r0 + r6 */ - sw r7, r4, r6 /* addr[r4 + r6] = r7*/ - addik r6, r6, 4 /* increment counting */ + lw r7, r0, r11 /* r7 = r0 + r6 */ + sw r7, r4, r11 /* addr[r4 + r6] = r7 */ + addik r11, r11, 4 /* increment counting */ bgtid r3, _copy_bram /* loop for all entries */ addik r3, r3, -4 /* descrement loop */ #endif @@ -303,8 +303,8 @@ jump_over2: * the exception vectors, using a 4k real==virtual mapping. */ /* Use temporary TLB_ID for LMB - clear this temporary mapping later */ - ori r6, r0, MICROBLAZE_LMB_TLB_ID - mts rtlbx,r6 + ori r11, r0, MICROBLAZE_LMB_TLB_ID + mts rtlbx,r11 ori r4,r0,(TLB_WR | TLB_EX) ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index 8778adf72bd3..d85fa3a2b0f8 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c @@ -172,4 +172,6 @@ void __init init_IRQ(void) * and commits this patch. ~~gcl */ root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops, (void *)intr_mask); + + irq_set_default_host(root_domain); } diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index a55893807274..7d1a9c8b1f3d 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -160,3 +160,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) return 0; /* MicroBlaze has no separate FPU registers */ } #endif /* CONFIG_MMU */ + +void arch_cpu_idle(void) +{ + local_irq_enable(); +} diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 4ec137d13ad7..b38ae3acfeb4 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -404,10 +404,11 @@ asmlinkage void __init mmu_init(void) #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ -/* if (initrd_start) { - mem_pieces_remove(&phys_avail, __pa(initrd_start), - initrd_end - initrd_start, 1); - }*/ + if (initrd_start) { + unsigned long size; + size = initrd_end - initrd_start; + memblock_reserve(virt_to_phys(initrd_start), size); + } #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 9ea521e4959e..bdb8ea100e73 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -30,7 +30,6 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_pci.h> -#include <linux/pci.h> #include <linux/export.h> #include <asm/processor.h> diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild index 7dd65cfae837..d2cfe45f332b 100644 --- a/arch/mips/Kbuild +++ b/arch/mips/Kbuild @@ -17,3 +17,7 @@ obj- := $(platform-) obj-y += kernel/ obj-y += mm/ obj-y += math-emu/ + +ifdef CONFIG_KVM +obj-y += kvm/ +endif diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a90cfc702bb1..7a58ab933b20 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -304,7 +304,6 @@ config MIPS_MALTA select HW_HAS_PCI select I8253 select I8259 - select MIPS_BOARDS_GEN select MIPS_BONITO64 select MIPS_CPU_SCACHE select PCI_GT64XXX_PCI0 @@ -335,12 +334,12 @@ config MIPS_SEAD3 select BOOT_RAW select CEVT_R4K select CSRC_R4K + select CSRC_GIC select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select DMA_NONCOHERENT select IRQ_CPU select IRQ_GIC - select MIPS_BOARDS_GEN select MIPS_CPU_SCACHE select MIPS_MSC select SYS_HAS_CPU_MIPS32_R1 @@ -352,6 +351,7 @@ config MIPS_SEAD3 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_SMARTMIPS + select SYS_SUPPORTS_MICROMIPS select USB_ARCH_HAS_EHCI select USB_EHCI_BIG_ENDIAN_DESC select USB_EHCI_BIG_ENDIAN_MMIO @@ -910,6 +910,9 @@ config CEVT_GT641XX config CEVT_R4K bool +config CEVT_GIC + bool + config CEVT_SB1250 bool @@ -982,9 +985,6 @@ config MIPS_MSC config MIPS_NILE4 bool -config MIPS_DISABLE_OBSOLETE_IDE - bool - config SYNC_R4K bool @@ -1075,9 +1075,6 @@ config IRQ_GT641XX config IRQ_GIC bool -config MIPS_BOARDS_GEN - bool - config PCI_GT64XXX_PCI0 bool @@ -1147,7 +1144,7 @@ config BOOT_ELF32 config MIPS_L1_CACHE_SHIFT int - default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL + default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X default "6" if MIPS_CPU_SCACHE default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON default "5" @@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2 select CPU_HAS_PREFETCH select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select HAVE_KVM help Choose this option to build a kernel for release 2 or later of the MIPS32 architecture. Most modern embedded systems with a 32-bit @@ -1736,6 +1734,20 @@ config 64BIT endchoice +config KVM_GUEST + bool "KVM Guest Kernel" + help + Select this option if building a guest kernel for KVM (Trap & Emulate) mode + +config KVM_HOST_FREQ + int "KVM Host Processor Frequency (MHz)" + depends on KVM_GUEST + default 500 + help + Select this option if building a guest kernel for KVM to skip + RTC emulation when determining guest CPU Frequency. Instead, the guest + processor frequency is automatically derived from the host frequency. + choice prompt "Kernel page size" default PAGE_SIZE_4KB @@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER The page size is not necessarily 4KB. Keep this in mind when choosing a value for this option. +config CEVT_GIC + bool "Use GIC global counter for clock events" + depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) + help + Use the GIC global counter for the clock events. The R4K clock + event driver is always present, so if the platform ends up not + detecting a GIC, it will fall back to the R4K timer for the + generation of clock events. + config BOARD_SCACHE bool @@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS depends on CPU_SB1 && CPU_SB1_PASS_2 default y + config 64BIT_PHYS_ADDR bool @@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS you don't know you probably don't have SmartMIPS and should say N here. +config CPU_MICROMIPS + depends on SYS_SUPPORTS_MICROMIPS + bool "Build kernel using microMIPS ISA" + help + When this option is enabled the kernel will be built using the + microMIPS ISA + config CPU_HAS_WB bool @@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM config SYS_SUPPORTS_SMARTMIPS bool +config SYS_SUPPORTS_MICROMIPS + bool + config ARCH_FLATMEM_ENABLE def_bool y depends on !NUMA && !CPU_LOONGSON2 @@ -2556,3 +2588,5 @@ source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" + +source "arch/mips/kvm/Kconfig" diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 6f7978f95090..dd58a04ef4bc 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*e cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) +cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals) cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ -fno-omit-frame-pointer diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig index c8862bdc2ff2..7032ac7ecd1b 100644 --- a/arch/mips/alchemy/Kconfig +++ b/arch/mips/alchemy/Kconfig @@ -31,7 +31,6 @@ config MIPS_DB1000 select ALCHEMY_GPIOINT_AU1000 select DMA_NONCOHERENT select HW_HAS_PCI - select MIPS_DISABLE_OBSOLETE_IDE select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -41,7 +40,6 @@ config MIPS_DB1235 select ARCH_REQUIRE_GPIOLIB select HW_HAS_PCI select DMA_COHERENT - select MIPS_DISABLE_OBSOLETE_IDE select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -57,7 +55,6 @@ config MIPS_GPR select ALCHEMY_GPIOINT_AU1000 select HW_HAS_PCI select DMA_NONCOHERENT - select MIPS_DISABLE_OBSOLETE_IDE select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform index fa1bdd1aea15..b3afcdd8d77a 100644 --- a/arch/mips/alchemy/Platform +++ b/arch/mips/alchemy/Platform @@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/ # -# AMD Alchemy Pb1100 eval board -# -platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/ -load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000 - -# -# AMD Alchemy Pb1500 eval board -# -platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/ -load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000 - -# -# AMD Alchemy Pb1550 eval board -# -platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/ -load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000 - -# -# AMD Alchemy Db1000/Db1500/Db1100 eval boards +# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards # platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/ cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000 # -# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards +# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards # platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/ cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c index 28abfeef09d6..92dfa481205b 100644 --- a/arch/mips/ar7/memory.c +++ b/arch/mips/ar7/memory.c @@ -30,7 +30,6 @@ #include <asm/sections.h> #include <asm/mach-ar7/ar7.h> -#include <asm/mips-boards/prom.h> static int __init memsize(void) { diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index d5b3c9057018..a0233a2c1988 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -51,20 +51,6 @@ static void ath79_halt(void) cpu_wait(); } -static void __init ath79_detect_mem_size(void) -{ - unsigned long size; - - for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX; - size <<= 1) { - if (!memcmp(ath79_detect_mem_size, - ath79_detect_mem_size + size, 1024)) - break; - } - - add_memory_region(0, size, BOOT_MEM_RAM); -} - static void __init ath79_detect_sys_type(void) { char *chip = "????"; @@ -212,7 +198,7 @@ void __init plat_mem_setup(void) AR71XX_DDR_CTRL_SIZE); ath79_detect_sys_type(); - ath79_detect_mem_size(); + detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); ath79_clocks_init(); _machine_restart = ath79_restart; diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig index d03e8799d1cf..5639662fd503 100644 --- a/arch/mips/bcm63xx/Kconfig +++ b/arch/mips/bcm63xx/Kconfig @@ -25,6 +25,10 @@ config BCM63XX_CPU_6358 bool "support 6358 CPU" select HW_HAS_PCI +config BCM63XX_CPU_6362 + bool "support 6362 CPU" + select HW_HAS_PCI + config BCM63XX_CPU_6368 bool "support 6368 CPU" select HW_HAS_PCI diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index 9aa7d44898ed..a9505c4867e8 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -726,11 +726,11 @@ void __init board_prom_init(void) u32 val; /* read base address of boot chip select (0) - * 6328 does not have MPI but boots from a fixed address + * 6328/6362 do not have MPI but boot from a fixed address */ - if (BCMCPU_IS_6328()) + if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) { val = 0x18000000; - else { + } else { val = bcm_mpi_readl(MPI_CSBASE_REG(0)); val &= MPI_CSBASE_BASE_MASK; } diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index b9e948d59430..c726a97fc798 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -15,7 +15,13 @@ #include <bcm63xx_io.h> #include <bcm63xx_regs.h> #include <bcm63xx_reset.h> -#include <bcm63xx_clk.h> + +struct clk { + void (*set)(struct clk *, int); + unsigned int rate; + unsigned int usage; + int id; +}; static DEFINE_MUTEX(clocks_mutex); @@ -119,11 +125,18 @@ static struct clk clk_ephy = { */ static void enetsw_set(struct clk *clk, int enable) { - if (!BCMCPU_IS_6368()) + if (BCMCPU_IS_6328()) + bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable); + else if (BCMCPU_IS_6368()) + bcm_hwclock_set(CKCTL_6368_ROBOSW_EN | + CKCTL_6368_SWPKT_USB_EN | + CKCTL_6368_SWPKT_SAR_EN, + enable); + else return; - bcm_hwclock_set(CKCTL_6368_ROBOSW_EN | - CKCTL_6368_SWPKT_USB_EN | - CKCTL_6368_SWPKT_SAR_EN, enable); + if (enable) { /* reset switch core afer clock change */ bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1); @@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable) bcm_hwclock_set(CKCTL_6328_USBH_EN, enable); else if (BCMCPU_IS_6348()) bcm_hwclock_set(CKCTL_6348_USBH_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_USBH_EN, enable); else if (BCMCPU_IS_6368()) bcm_hwclock_set(CKCTL_6368_USBH_EN, enable); } @@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable) { if (BCMCPU_IS_6328()) bcm_hwclock_set(CKCTL_6328_USBD_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_USBD_EN, enable); else if (BCMCPU_IS_6368()) bcm_hwclock_set(CKCTL_6368_USBD_EN, enable); } @@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable) mask = CKCTL_6348_SPI_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_SPI_EN; + else if (BCMCPU_IS_6362()) + mask = CKCTL_6362_SPI_EN; else /* BCMCPU_IS_6368 */ mask = CKCTL_6368_SPI_EN; @@ -236,7 +255,10 @@ static struct clk clk_xtm = { */ static void ipsec_set(struct clk *clk, int enable) { - bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable); + if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable); + else if (BCMCPU_IS_6368()) + bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable); } static struct clk clk_ipsec = { @@ -249,7 +271,10 @@ static struct clk clk_ipsec = { static void pcie_set(struct clk *clk, int enable) { - bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); + if (BCMCPU_IS_6328()) + bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable); } static struct clk clk_pcie = { @@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id) return &clk_periph; if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) return &clk_pcm; - if (BCMCPU_IS_6368() && !strcmp(id, "ipsec")) + if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec")) return &clk_ipsec; - if (BCMCPU_IS_6328() && !strcmp(id, "pcie")) + if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie")) return &clk_pcie; return ERR_PTR(-ENOENT); } diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c index a7afb289b15a..79fe32df5e96 100644 --- a/arch/mips/bcm63xx/cpu.c +++ b/arch/mips/bcm63xx/cpu.c @@ -25,7 +25,7 @@ const int *bcm63xx_irqs; EXPORT_SYMBOL(bcm63xx_irqs); static u16 bcm63xx_cpu_id; -static u16 bcm63xx_cpu_rev; +static u8 bcm63xx_cpu_rev; static unsigned int bcm63xx_cpu_freq; static unsigned int bcm63xx_memory_size; @@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = { }; +static const unsigned long bcm6362_regs_base[] = { + __GEN_CPU_REGS_TABLE(6362) +}; + +static const int bcm6362_irqs[] = { + __GEN_CPU_IRQ_TABLE(6362) + +}; + static const unsigned long bcm6368_regs_base[] = { __GEN_CPU_REGS_TABLE(6368) }; @@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void) EXPORT_SYMBOL(__bcm63xx_get_cpu_id); -u16 bcm63xx_get_cpu_rev(void) +u8 bcm63xx_get_cpu_rev(void) { return bcm63xx_cpu_rev; } @@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void) return (16 * 1000000 * n1 * n2) / m1; } + case BCM6362_CPU_ID: + { + unsigned int tmp, mips_pll_fcvo; + + tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG); + mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK) + >> STRAPBUS_6362_FCVO_SHIFT; + switch (mips_pll_fcvo) { + case 0x03: + case 0x0b: + case 0x13: + case 0x1b: + return 240000000; + case 0x04: + case 0x0c: + case 0x14: + case 0x1c: + return 160000000; + case 0x05: + case 0x0e: + case 0x16: + case 0x1e: + case 0x1f: + return 400000000; + case 0x06: + return 440000000; + case 0x07: + case 0x17: + return 384000000; + case 0x15: + case 0x1d: + return 200000000; + default: + return 320000000; + } + } case BCM6368_CPU_ID: { unsigned int tmp, p1, p2, ndiv, m1; @@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void) unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; u32 val; - if (BCMCPU_IS_6328()) + if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) return bcm_ddr_readl(DDR_CSEND_REG) << 24; if (BCMCPU_IS_6345()) { @@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void) void __init bcm63xx_cpu_init(void) { - unsigned int tmp, expected_cpu_id; + unsigned int tmp; struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int cpu = smp_processor_id(); + u32 chipid_reg; /* soc registers location depends on cpu type */ - expected_cpu_id = 0; + chipid_reg = 0; switch (c->cputype) { case CPU_BMIPS3300: - if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) { - expected_cpu_id = BCM6348_CPU_ID; - bcm63xx_regs_base = bcm6348_regs_base; - bcm63xx_irqs = bcm6348_irqs; - } else { + if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT) __cpu_name[cpu] = "Broadcom BCM6338"; - expected_cpu_id = BCM6338_CPU_ID; - bcm63xx_regs_base = bcm6338_regs_base; - bcm63xx_irqs = bcm6338_irqs; - } - break; + /* fall-through */ case CPU_BMIPS32: - expected_cpu_id = BCM6345_CPU_ID; - bcm63xx_regs_base = bcm6345_regs_base; - bcm63xx_irqs = bcm6345_irqs; + chipid_reg = BCM_6345_PERF_BASE; break; case CPU_BMIPS4350: - if ((read_c0_prid() & 0xf0) == 0x10) { - expected_cpu_id = BCM6358_CPU_ID; - bcm63xx_regs_base = bcm6358_regs_base; - bcm63xx_irqs = bcm6358_irqs; - } else { - /* all newer chips have the same chip id location */ - u16 chip_id = bcm_readw(BCM_6368_PERF_BASE); - - switch (chip_id) { - case BCM6328_CPU_ID: - expected_cpu_id = BCM6328_CPU_ID; - bcm63xx_regs_base = bcm6328_regs_base; - bcm63xx_irqs = bcm6328_irqs; - break; - case BCM6368_CPU_ID: - expected_cpu_id = BCM6368_CPU_ID; - bcm63xx_regs_base = bcm6368_regs_base; - bcm63xx_irqs = bcm6368_irqs; - break; - } - } + if ((read_c0_prid() & 0xf0) == 0x10) + chipid_reg = BCM_6345_PERF_BASE; + else + chipid_reg = BCM_6368_PERF_BASE; break; } @@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void) * really early to panic, but delaying panic would not help since we * will never get any working console */ - if (!expected_cpu_id) + if (!chipid_reg) panic("unsupported Broadcom CPU"); - /* - * bcm63xx_regs_base is set, we can access soc registers - */ - - /* double check CPU type */ - tmp = bcm_perf_readl(PERF_REV_REG); + /* read out CPU type */ + tmp = bcm_readl(chipid_reg); bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT; bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; - if (bcm63xx_cpu_id != expected_cpu_id) - panic("bcm63xx CPU id mismatch"); + switch (bcm63xx_cpu_id) { + case BCM6328_CPU_ID: + bcm63xx_regs_base = bcm6328_regs_base; + bcm63xx_irqs = bcm6328_irqs; + break; + case BCM6338_CPU_ID: + bcm63xx_regs_base = bcm6338_regs_base; + bcm63xx_irqs = bcm6338_irqs; + break; + case BCM6345_CPU_ID: + bcm63xx_regs_base = bcm6345_regs_base; + bcm63xx_irqs = bcm6345_irqs; + break; + case BCM6348_CPU_ID: + bcm63xx_regs_base = bcm6348_regs_base; + bcm63xx_irqs = bcm6348_irqs; + break; + case BCM6358_CPU_ID: + bcm63xx_regs_base = bcm6358_regs_base; + bcm63xx_irqs = bcm6358_irqs; + break; + case BCM6362_CPU_ID: + bcm63xx_regs_base = bcm6362_regs_base; + bcm63xx_irqs = bcm6362_irqs; + break; + case BCM6368_CPU_ID: + bcm63xx_regs_base = bcm6368_regs_base; + bcm63xx_irqs = bcm6368_irqs; + break; + default: + panic("unsupported broadcom CPU %x", bcm63xx_cpu_id); + break; + } bcm63xx_cpu_freq = detect_cpu_clock(); bcm63xx_memory_size = detect_memory_size(); diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c index 58371c7deac2..588d1ec622e4 100644 --- a/arch/mips/bcm63xx/dev-flash.c +++ b/arch/mips/bcm63xx/dev-flash.c @@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void) return BCM63XX_FLASH_TYPE_PARALLEL; else return BCM63XX_FLASH_TYPE_SERIAL; + case BCM6362_CPU_ID: + val = bcm_misc_readl(MISC_STRAPBUS_6362_REG); + if (val & STRAPBUS_6362_BOOT_SEL_SERIAL) + return BCM63XX_FLASH_TYPE_SERIAL; + else + return BCM63XX_FLASH_TYPE_NAND; case BCM6368_CPU_ID: val = bcm_gpio_readl(GPIO_STRAPBUS_REG); switch (val & STRAPBUS_6368_BOOT_SEL_MASK) { diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c index e97fd60e92ef..3065bb61820d 100644 --- a/arch/mips/bcm63xx/dev-spi.c +++ b/arch/mips/bcm63xx/dev-spi.c @@ -22,10 +22,6 @@ /* * register offsets */ -static const unsigned long bcm6338_regs_spi[] = { - __GEN_SPI_REGS_TABLE(6338) -}; - static const unsigned long bcm6348_regs_spi[] = { __GEN_SPI_REGS_TABLE(6348) }; @@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = { __GEN_SPI_REGS_TABLE(6358) }; -static const unsigned long bcm6368_regs_spi[] = { - __GEN_SPI_REGS_TABLE(6368) -}; - const unsigned long *bcm63xx_regs_spi; EXPORT_SYMBOL(bcm63xx_regs_spi); static __init void bcm63xx_spi_regs_init(void) { - if (BCMCPU_IS_6338()) - bcm63xx_regs_spi = bcm6338_regs_spi; - if (BCMCPU_IS_6348()) + if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) bcm63xx_regs_spi = bcm6348_regs_spi; - if (BCMCPU_IS_6358()) + if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6358_regs_spi; - if (BCMCPU_IS_6368()) - bcm63xx_regs_spi = bcm6368_regs_spi; } #else static __init void bcm63xx_spi_regs_init(void) { } @@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void) spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { - spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; - spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; - spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; - spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; + spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1; + spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE; + spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT; + spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH; } - if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { + if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index da24c2bd9b7c..c0ab3887f42e 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused; #define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358 #define ext_irq_cfg_reg2 0 #endif +#ifdef CONFIG_BCM63XX_CPU_6362 +#define irq_stat_reg PERF_IRQSTAT_6362_REG +#define irq_mask_reg PERF_IRQMASK_6362_REG +#define irq_bits 64 +#define is_ext_irq_cascaded 1 +#define ext_irq_start (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE) +#define ext_irq_end (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE) +#define ext_irq_count 4 +#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6362 +#define ext_irq_cfg_reg2 0 +#endif #ifdef CONFIG_BCM63XX_CPU_6368 #define irq_stat_reg PERF_IRQSTAT_6368_REG #define irq_mask_reg PERF_IRQMASK_6368_REG @@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void) ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; break; + case BCM6362_CPU_ID: + irq_stat_addr += PERF_IRQSTAT_6362_REG; + irq_mask_addr += PERF_IRQMASK_6362_REG; + irq_bits = 64; + ext_irq_count = 4; + is_ext_irq_cascaded = 1; + ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE; + ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362; + break; case BCM6368_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6368_REG; irq_mask_addr += PERF_IRQMASK_6368_REG; @@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, case BCM6338_CPU_ID: case BCM6345_CPU_ID: case BCM6358_CPU_ID: + case BCM6362_CPU_ID: case BCM6368_CPU_ID: if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE(irq); diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c index 10eaff458071..fd698087fbfd 100644 --- a/arch/mips/bcm63xx/prom.c +++ b/arch/mips/bcm63xx/prom.c @@ -36,6 +36,8 @@ void __init prom_init(void) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; + else if (BCMCPU_IS_6362()) + mask = CKCTL_6362_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c index 68a31bb90cbf..317931c6cf58 100644 --- a/arch/mips/bcm63xx/reset.c +++ b/arch/mips/bcm63xx/reset.c @@ -85,6 +85,20 @@ #define BCM6358_RESET_PCIE 0 #define BCM6358_RESET_PCIE_EXT 0 +#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK +#define BCM6362_RESET_ENET 0 +#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK +#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK +#define BCM6362_RESET_DSL 0 +#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK +#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK +#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK +#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK +#define BCM6362_RESET_MPI 0 +#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \ + SOFTRESET_6362_PCIE_CORE_MASK) +#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK + #define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK #define BCM6368_RESET_ENET 0 #define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK @@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = { __GEN_RESET_BITS_TABLE(6358) }; +static const u32 bcm6362_reset_bits[] = { + __GEN_RESET_BITS_TABLE(6362) +}; + static const u32 bcm6368_reset_bits[] = { __GEN_RESET_BITS_TABLE(6368) }; @@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void) } else if (BCMCPU_IS_6358()) { reset_reg = PERF_SOFTRESET_6358_REG; bcm63xx_reset_bits = bcm6358_reset_bits; + } else if (BCMCPU_IS_6362()) { + reset_reg = PERF_SOFTRESET_6362_REG; + bcm63xx_reset_bits = bcm6362_reset_bits; } else if (BCMCPU_IS_6368()) { reset_reg = PERF_SOFTRESET_6368_REG; bcm63xx_reset_bits = bcm6368_reset_bits; @@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = { #define reset_reg PERF_SOFTRESET_6358_REG #endif +#ifdef CONFIG_BCM63XX_CPU_6362 +static const u32 bcm63xx_reset_bits[] = { + __GEN_RESET_BITS_TABLE(6362) +}; +#define reset_reg PERF_SOFTRESET_6362_REG +#endif + #ifdef CONFIG_BCM63XX_CPU_6368 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6368) diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 35e18e98beb9..24a24445db64 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c @@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void) case BCM6358_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358; break; + case BCM6362_CPU_ID: + perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362; + break; } for (i = 0; i < 2; i++) { @@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p) const char *get_system_type(void) { static char buf[128]; - snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)", + snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)", board_get_name(), bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev()); return buf; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 156aa6143e11..a22f06a6f7ca 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d, if (!octeon_irq_virq_in_range(virq)) return -EINVAL; - hw += gpiod->base_hwirq; - line = hw >> 6; - bit = hw & 63; + line = (hw + gpiod->base_hwirq) >> 6; + bit = (hw + gpiod->base_hwirq) & 63; if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index cd732e5b4fd5..ce1d3eeeb737 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_MIPS_MT_SMP=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_100=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=15 -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_RELAY=y CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_PID_NS=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_RELAY=y CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_PCI=y -CONFIG_PM=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -41,8 +32,6 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y CONFIG_IPV6_PIMSM_V2=y CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_QUEUE=m -CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m @@ -247,12 +224,10 @@ CONFIG_MAC80211=m CONFIG_MAC80211_RC_PID=y CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_CONNECTOR=m CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_OOPS=m @@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -# CONFIG_MISC_DEVICES is not set CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_IDE_GENERIC=y @@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_NETDEVICES=y -CONFIG_IFB=m -CONFIG_DUMMY=m CONFIG_BONDING=m -CONFIG_MACVLAN=m +CONFIG_DUMMY=m CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_MACVLAN=m CONFIG_TUN=m CONFIG_VETH=m +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_PCNET32=y +CONFIG_CHELSIO_T3=m +CONFIG_AX88796=m +CONFIG_NETXEN_NIC=m +CONFIG_TC35815=m CONFIG_MARVELL_PHY=m CONFIG_DAVICOM_PHY=m CONFIG_QSEMI_PHY=m @@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m CONFIG_ICPLUS_PHY=m CONFIG_REALTEK_PHY=m -CONFIG_MDIO_BITBANG=m -CONFIG_NET_ETHERNET=y -CONFIG_AX88796=m -CONFIG_NET_PCI=y -CONFIG_PCNET32=y -CONFIG_TC35815=m -CONFIG_CHELSIO_T3=m -CONFIG_NETXEN_NIC=m CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m CONFIG_PRISM54=m @@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m CONFIG_HOSTAP_PCI=m CONFIG_IPW2100=m CONFIG_IPW2100_MONITOR=y -CONFIG_IPW2200=m -CONFIG_IPW2200_MONITOR=y -CONFIG_IPW2200_PROMISCUOUS=y -CONFIG_IPW2200_QOS=y CONFIG_LIBERTAS=m -CONFIG_HERMES=m -CONFIG_PLX_HERMES=m -CONFIG_TMD_HERMES=m -CONFIG_NORTEL_HERMES=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO_I8042 is not set @@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_HID=m -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_IDE_DISK=y -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_UIO=m @@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_QUOTA=y CONFIG_QFMT_V2=y -CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_ISO9660_FS=m CONFIG_JOLIET=y @@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y @@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_LRW=m diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig new file mode 100644 index 000000000000..341bb47204d6 --- /dev/null +++ b/arch/mips/configs/malta_kvm_defconfig @@ -0,0 +1,456 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_MIPS_MT_SMP=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_NAMESPACES=y +CONFIG_RELAY=y +CONFIG_EXPERT=y +CONFIG_PERF_EVENTS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PCI=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_SCTP=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_PHONET=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_CLS_IND=y +CONFIG_CFG80211=m +CONFIG_MAC80211=m +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_DEFAULT_PID=y +CONFIG_MAC80211_MESH=y +CONFIG_RFKILL=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_CONNECTOR=m +CONFIG_MTD=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_OOPS=m +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_ATA_OVER_ETH=m +CONFIG_IDE=y +CONFIG_BLK_DEV_IDECD=y +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_IT8213=m +CONFIG_BLK_DEV_TC86C001=m +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=m +CONFIG_SCSI_TGT=m +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_MACVLAN=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_PCNET32=y +CONFIG_CHELSIO_T3=m +CONFIG_AX88796=m +CONFIG_NETXEN_NIC=m +CONFIG_TC35815=m +CONFIG_MARVELL_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_LXT_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PRISM54=m +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_LIBERTAS=m +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_I8042 is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_CIRRUS=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_HID=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_AFFS_FS=m +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_RUBIN=y +CONFIG_CRAMFS=m +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRC16=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_MIPS_DYN_TRANS=y +CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y +CONFIG_VHOST_NET=m diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig new file mode 100644 index 000000000000..2b8558b71080 --- /dev/null +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@ -0,0 +1,453 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_KVM_GUEST=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_NAMESPACES=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PCI=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_SCTP=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_PHONET=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_CLS_IND=y +CONFIG_CFG80211=m +CONFIG_MAC80211=m +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_DEFAULT_PID=y +CONFIG_MAC80211_MESH=y +CONFIG_RFKILL=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_CONNECTOR=m +CONFIG_MTD=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_OOPS=m +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_ATA_OVER_ETH=m +CONFIG_VIRTIO_BLK=y +CONFIG_IDE=y +CONFIG_BLK_DEV_IDECD=y +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_IT8213=m +CONFIG_BLK_DEV_TC86C001=m +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=m +CONFIG_SCSI_TGT=m +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_MACVLAN=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=y +CONFIG_PCNET32=y +CONFIG_CHELSIO_T3=m +CONFIG_AX88796=m +CONFIG_NETXEN_NIC=m +CONFIG_TC35815=m +CONFIG_MARVELL_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_LXT_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PRISM54=m +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_LIBERTAS=m +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_I8042 is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_CIRRUS=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_HID=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_AFFS_FS=m +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_RUBIN=y +CONFIG_CRAMFS=m +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRC16=m diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig new file mode 100644 index 000000000000..93057a760dfa --- /dev/null +++ b/arch/mips/configs/maltaaprp_defconfig @@ -0,0 +1,195 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_VPE_LOADER=y +CONFIG_MIPS_VPE_APSP_API=y +CONFIG_HZ_100=y +CONFIG_LOCALVERSION="aprp" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig new file mode 100644 index 000000000000..4e54b75d89be --- /dev/null +++ b/arch/mips/configs/maltasmtc_defconfig @@ -0,0 +1,196 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_MT_SMTC=y +# CONFIG_MIPS_MT_FPAFF is not set +CONFIG_NR_CPUS=9 +CONFIG_HZ_48=y +CONFIG_LOCALVERSION="smtc" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig new file mode 100644 index 000000000000..8a666021b870 --- /dev/null +++ b/arch/mips/configs/maltasmvp_defconfig @@ -0,0 +1,199 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_MT_SMP=y +CONFIG_SCHED_SMT=y +CONFIG_MIPS_CMP=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_LOCALVERSION="cmp" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=4 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig new file mode 100644 index 000000000000..9868fc9c1133 --- /dev/null +++ b/arch/mips/configs/maltaup_defconfig @@ -0,0 +1,194 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_HZ_100=y +CONFIG_LOCALVERSION="up" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig index e3eec68d9132..0abe681c11a0 100644 --- a/arch/mips/configs/sead3_defconfig +++ b/arch/mips/configs/sead3_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_HZ_100=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y @@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y # CONFIG_FTRACE is not set -CONFIG_CRYPTO=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_ARC4=y # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig new file mode 100644 index 000000000000..2a0da5bf4b64 --- /dev/null +++ b/arch/mips/configs/sead3micro_defconfig @@ -0,0 +1,122 @@ +CONFIG_MIPS_SEAD3=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MICROMIPS=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_MODULES=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_MTD=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_GLUEBI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +CONFIG_SMSC911X=y +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MARVELL_PHY=y +CONFIG_DAVICOM_PHY=y +CONFIG_QSEMI_PHY=y +CONFIG_LXT_PHY=y +CONFIG_CICADA_PHY=y +CONFIG_VITESSE_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_BROADCOM_PHY=y +CONFIG_ICPLUS_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_CONSOLE_TRANSLATIONS is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=32 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=2 +CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_SPI=y +CONFIG_SENSORS_ADT7475=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_DEBUG=y +CONFIG_MMC_SPI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_M41T80=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_TMPFS=y +CONFIG_JFFS2_FS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_UTF8=y +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/arch/mips/fw/lib/Makefile b/arch/mips/fw/lib/Makefile index 84befc968fc4..529150516777 100644 --- a/arch/mips/fw/lib/Makefile +++ b/arch/mips/fw/lib/Makefile @@ -2,4 +2,6 @@ # Makefile for generic prom monitor library routines under Linux. # +lib-y += cmdline.o + lib-$(CONFIG_64BIT) += call_o32.o diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c new file mode 100644 index 000000000000..ffd0345780ae --- /dev/null +++ b/arch/mips/fw/lib/cmdline.c @@ -0,0 +1,101 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/string.h> + +#include <asm/addrspace.h> +#include <asm/fw/fw.h> + +int fw_argc; +int *_fw_argv; +int *_fw_envp; + +void __init fw_init_cmdline(void) +{ + int i; + + /* Validate command line parameters. */ + if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) { + fw_argc = 0; + _fw_argv = NULL; + } else { + fw_argc = (fw_arg0 & 0x0000ffff); + _fw_argv = (int *)fw_arg1; + } + + /* Validate environment pointer. */ + if (fw_arg2 < CKSEG0) + _fw_envp = NULL; + else + _fw_envp = (int *)fw_arg2; + + for (i = 1; i < fw_argc; i++) { + strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE); + if (i < (fw_argc - 1)) + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + } +} + +char * __init fw_getcmdline(void) +{ + return &(arcs_cmdline[0]); +} + +char *fw_getenv(char *envname) +{ + char *result = NULL; + + if (_fw_envp != NULL) { + /* + * Return a pointer to the given environment variable. + * YAMON uses "name", "value" pairs, while U-Boot uses + * "name=value". + */ + int i, yamon, index = 0; + + yamon = (strchr(fw_envp(index), '=') == NULL); + i = strlen(envname); + + while (fw_envp(index)) { + if (strncmp(envname, fw_envp(index), i) == 0) { + if (yamon) { + result = fw_envp(index + 1); + break; + } else if (fw_envp(index)[i] == '=') { + result = (fw_envp(index + 1) + i); + break; + } + } + + /* Increment array index. */ + if (yamon) + index += 2; + else + index += 1; + } + } + + return result; +} + +unsigned long fw_getenvl(char *envname) +{ + unsigned long envl = 0UL; + char *str; + long val; + int tmp; + + str = fw_getenv(envname); + if (str) { + tmp = kstrtol(str, 0, &val); + envl = (unsigned long)val; + } + + return envl; +} diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 164a21e65b42..879691d194af 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -296,6 +296,7 @@ symbol = value #define LONG_SUBU subu #define LONG_L lw #define LONG_S sw +#define LONG_SP swp #define LONG_SLL sll #define LONG_SLLV sllv #define LONG_SRL srl @@ -318,6 +319,7 @@ symbol = value #define LONG_SUBU dsubu #define LONG_L ld #define LONG_S sd +#define LONG_SP sdp #define LONG_SLL dsll #define LONG_SLLV dsllv #define LONG_SRL dsrl diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index b71dd5b16085..4d2cdea5aa37 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -104,6 +104,7 @@ struct boot_mem_map { extern struct boot_mem_map boot_mem_map; extern void add_memory_region(phys_t start, phys_t size, long type); +extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max); extern void prom_init(void); extern void prom_free_prom_memory(void); diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index 888766ae1f85..e28a3e0eb3cb 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -11,6 +11,14 @@ #include <asm/ptrace.h> #include <asm/inst.h> +extern int __isa_exception_epc(struct pt_regs *regs); +extern int __compute_return_epc(struct pt_regs *regs); +extern int __compute_return_epc_for_insn(struct pt_regs *regs, + union mips_instruction insn); +extern int __microMIPS_compute_return_epc(struct pt_regs *regs); +extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); + + static inline int delay_slot(struct pt_regs *regs) { return regs->cp0_cause & CAUSEF_BD; @@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs) static inline unsigned long exception_epc(struct pt_regs *regs) { - if (!delay_slot(regs)) + if (likely(!delay_slot(regs))) return regs->cp0_epc; + if (get_isa16_mode(regs->cp0_epc)) + return __isa_exception_epc(regs); + return regs->cp0_epc + 4; } #define BRANCH_LIKELY_TAKEN 0x0001 -extern int __compute_return_epc(struct pt_regs *regs); -extern int __compute_return_epc_for_insn(struct pt_regs *regs, - union mips_instruction insn); - static inline int compute_return_epc(struct pt_regs *regs) { + if (get_isa16_mode(regs->cp0_epc)) { + if (cpu_has_mmips) + return __microMIPS_compute_return_epc(regs); + if (cpu_has_mips16) + return __MIPS16e_compute_return_epc(regs); + return regs->cp0_epc; + } + if (!delay_slot(regs)) { regs->cp0_epc += 4; return 0; @@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs) return __compute_return_epc(regs); } +static inline int MIPS16e_compute_return_epc(struct pt_regs *regs, + union mips16e_instruction *inst) +{ + if (likely(!delay_slot(regs))) { + if (inst->ri.opcode == MIPS16e_extend_op) { + regs->cp0_epc += 4; + return 0; + } + regs->cp0_epc += 2; + return 0; + } + + return __MIPS16e_compute_return_epc(regs); +} + #endif /* _ASM_BRANCH_H */ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 1a57e8b4d092..e5ec8fcd8afa 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -113,6 +113,9 @@ #ifndef cpu_has_pindexed_dcache #define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) #endif +#ifndef cpu_has_local_ebase +#define cpu_has_local_ebase 1 +#endif /* * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h new file mode 100644 index 000000000000..242cbb3ca582 --- /dev/null +++ b/arch/mips/include/asm/dma-coherence.h @@ -0,0 +1,15 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> + * + */ +#ifndef __ASM_DMA_COHERENCE_H +#define __ASM_DMA_COHERENCE_H + +extern int coherentio; +extern int hw_coherentio; + +#endif diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index f8fc74b6cb47..84238c574d5e 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -2,6 +2,7 @@ #define _ASM_DMA_MAPPING_H #include <asm/scatterlist.h> +#include <asm/dma-coherence.h> #include <asm/cache.h> #include <asm-generic/dma-coherent.h> diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 3b4092705567..2abb587d5ab4 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -54,6 +54,12 @@ do { \ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc); extern int do_dsemulret(struct pt_regs *xcp); +extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, + struct mips_fpu_struct *ctx, int has_fpu, + void *__user *fault_addr); +int process_fpemu_return(int sig, void __user *fault_addr); +int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc); /* * Instruction inserted following the badinst to further tag the sequence diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h new file mode 100644 index 000000000000..d6c50a7e9ede --- /dev/null +++ b/arch/mips/include/asm/fw/fw.h @@ -0,0 +1,47 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. + */ +#ifndef __ASM_FW_H_ +#define __ASM_FW_H_ + +#include <asm/bootinfo.h> /* For cleaner code... */ + +enum fw_memtypes { + fw_dontuse, + fw_code, + fw_free, +}; + +typedef struct { + unsigned long base; /* Within KSEG0 */ + unsigned int size; /* bytes */ + enum fw_memtypes type; /* fw_memtypes */ +} fw_memblock_t; + +/* Maximum number of memory block descriptors. */ +#define FW_MAX_MEMBLOCKS 32 + +extern int fw_argc; +extern int *_fw_argv; +extern int *_fw_envp; + +/* + * Most firmware like YAMON, PMON, etc. pass arguments and environment + * variables as 32-bit pointers. These take care of sign extension. + */ +#define fw_argv(index) ((char *)(long)_fw_argv[(index)]) +#define fw_envp(index) ((char *)(long)_fw_envp[(index)]) + +extern void fw_init_cmdline(void); +extern char *fw_getcmdline(void); +extern fw_memblock_t *fw_getmdesc(void); +extern void fw_meminit(void); +extern char *fw_getenv(char *name); +extern unsigned long fw_getenvl(char *name); +extern void fw_init_early_console(char port); + +#endif /* __ASM_FW_H_ */ diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index bdc9786ab5a7..7153b32de18e 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h @@ -202,7 +202,7 @@ #define GIC_VPE_WD_COUNT0_OFS 0x0094 #define GIC_VPE_WD_INITIAL0_OFS 0x0098 #define GIC_VPE_COMPARE_LO_OFS 0x00a0 -#define GIC_VPE_COMPARE_HI 0x00a4 +#define GIC_VPE_COMPARE_HI_OFS 0x00a4 #define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 #define GIC_VPE_EIC_SS(intr) \ @@ -359,7 +359,11 @@ struct gic_shared_intr_map { /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ #define GIC_PIN_TO_VEC_OFFSET (1) -extern int gic_present; +#include <linux/clocksource.h> +#include <linux/irq.h> + +extern unsigned int gic_present; +extern unsigned int gic_frequency; extern unsigned long _gic_base; extern unsigned int gic_irq_base; extern unsigned int gic_irq_flags[]; @@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[]; extern void gic_init(unsigned long gic_base_addr, unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, unsigned int intrmap_size, unsigned int irqbase); - extern void gic_clocksource_init(unsigned int); -extern unsigned int gic_get_int(void); +extern unsigned int gic_compare_int (void); +extern cycle_t gic_read_count(void); +extern cycle_t gic_read_compare(void); +extern void gic_write_compare(cycle_t cnt); extern void gic_send_ipi(unsigned int intr); extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int); extern void gic_bind_eic_interrupt(int irq, int set); extern unsigned int gic_get_timer_pending(void); +extern unsigned int gic_get_int(void); extern void gic_enable_interrupt(int irq_vec); extern void gic_disable_interrupt(int irq_vec); extern void gic_irq_ack(struct irq_data *d); extern void gic_finish_irq(struct irq_data *d); extern void gic_platform_init(int irqs, struct irq_chip *irq_controller); - #endif /* _ASM_GICREGS_H */ diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index 44d6a5bde4a1..e3ee92d4dbe7 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h @@ -10,34 +10,13 @@ #ifndef _ASM_HAZARDS_H #define _ASM_HAZARDS_H -#ifdef __ASSEMBLY__ -#define ASMMACRO(name, code...) .macro name; code; .endm -#else - -#include <asm/cpu-features.h> - -#define ASMMACRO(name, code...) \ -__asm__(".macro " #name "; " #code "; .endm"); \ - \ -static inline void name(void) \ -{ \ - __asm__ __volatile__ (#name); \ -} - -/* - * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. - */ -extern void mips_ihb(void); - -#endif +#include <linux/stringify.h> -ASMMACRO(_ssnop, - sll $0, $0, 1 - ) +#define ___ssnop \ + sll $0, $0, 1 -ASMMACRO(_ehb, - sll $0, $0, 3 - ) +#define ___ehb \ + sll $0, $0, 3 /* * TLB hazards @@ -48,24 +27,24 @@ ASMMACRO(_ehb, * MIPSR2 defines ehb for hazard avoidance */ -ASMMACRO(mtc0_tlbw_hazard, - _ehb - ) -ASMMACRO(tlbw_use_hazard, - _ehb - ) -ASMMACRO(tlb_probe_hazard, - _ehb - ) -ASMMACRO(irq_enable_hazard, - _ehb - ) -ASMMACRO(irq_disable_hazard, - _ehb - ) -ASMMACRO(back_to_back_c0_hazard, - _ehb - ) +#define __mtc0_tlbw_hazard \ + ___ehb + +#define __tlbw_use_hazard \ + ___ehb + +#define __tlb_probe_hazard \ + ___ehb + +#define __irq_enable_hazard \ + ___ehb + +#define __irq_disable_hazard \ + ___ehb + +#define __back_to_back_c0_hazard \ + ___ehb + /* * gcc has a tradition of misscompiling the previous construct using the * address of a label as argument to inline assembler. Gas otoh has the @@ -94,24 +73,42 @@ do { \ * These are slightly complicated by the fact that we guarantee R1 kernels to * run fine on R2 processors. */ -ASMMACRO(mtc0_tlbw_hazard, - _ssnop; _ssnop; _ehb - ) -ASMMACRO(tlbw_use_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(tlb_probe_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(irq_enable_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(irq_disable_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(back_to_back_c0_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) + +#define __mtc0_tlbw_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __tlbw_use_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __tlb_probe_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __irq_enable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __irq_disable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __back_to_back_c0_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + /* * gcc has a tradition of misscompiling the previous construct using the * address of a label as argument to inline assembler. Gas otoh has the @@ -147,18 +144,18 @@ do { \ * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. */ -ASMMACRO(mtc0_tlbw_hazard, - ) -ASMMACRO(tlbw_use_hazard, - ) -ASMMACRO(tlb_probe_hazard, - ) -ASMMACRO(irq_enable_hazard, - ) -ASMMACRO(irq_disable_hazard, - ) -ASMMACRO(back_to_back_c0_hazard, - ) +#define __mtc0_tlbw_hazard + +#define __tlbw_use_hazard + +#define __tlb_probe_hazard + +#define __irq_enable_hazard + +#define __irq_disable_hazard + +#define __back_to_back_c0_hazard + #define instruction_hazard() do { } while (0) #elif defined(CONFIG_CPU_SB1) @@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard, /* * Mostly like R4000 for historic reasons */ -ASMMACRO(mtc0_tlbw_hazard, - ) -ASMMACRO(tlbw_use_hazard, - ) -ASMMACRO(tlb_probe_hazard, - ) -ASMMACRO(irq_enable_hazard, - ) -ASMMACRO(irq_disable_hazard, - _ssnop; _ssnop; _ssnop - ) -ASMMACRO(back_to_back_c0_hazard, - ) +#define __mtc0_tlbw_hazard + +#define __tlbw_use_hazard + +#define __tlb_probe_hazard + +#define __irq_enable_hazard + +#define __irq_disable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop + +#define __back_to_back_c0_hazard + #define instruction_hazard() do { } while (0) #else @@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard, * hazard so this is nice trick to have an optimal code for a range of * processors. */ -ASMMACRO(mtc0_tlbw_hazard, - nop; nop - ) -ASMMACRO(tlbw_use_hazard, - nop; nop; nop - ) -ASMMACRO(tlb_probe_hazard, - nop; nop; nop - ) -ASMMACRO(irq_enable_hazard, - _ssnop; _ssnop; _ssnop; - ) -ASMMACRO(irq_disable_hazard, - nop; nop; nop - ) -ASMMACRO(back_to_back_c0_hazard, - _ssnop; _ssnop; _ssnop; - ) +#define __mtc0_tlbw_hazard \ + nop; \ + nop + +#define __tlbw_use_hazard \ + nop; \ + nop; \ + nop + +#define __tlb_probe_hazard \ + nop; \ + nop; \ + nop + +#define __irq_enable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop + +#define __irq_disable_hazard \ + nop; \ + nop; \ + nop + +#define __back_to_back_c0_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop + #define instruction_hazard() do { } while (0) #endif @@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard, /* FPU hazards */ #if defined(CONFIG_CPU_SB1) -ASMMACRO(enable_fpu_hazard, - .set push; - .set mips64; - .set noreorder; - _ssnop; - bnezl $0, .+4; - _ssnop; - .set pop -) -ASMMACRO(disable_fpu_hazard, -) + +#define __enable_fpu_hazard \ + .set push; \ + .set mips64; \ + .set noreorder; \ + ___ssnop; \ + bnezl $0, .+4; \ + ___ssnop; \ + .set pop + +#define __disable_fpu_hazard #elif defined(CONFIG_CPU_MIPSR2) -ASMMACRO(enable_fpu_hazard, - _ehb -) -ASMMACRO(disable_fpu_hazard, - _ehb -) + +#define __enable_fpu_hazard \ + ___ehb + +#define __disable_fpu_hazard \ + ___ehb + #else -ASMMACRO(enable_fpu_hazard, - nop; nop; nop; nop -) -ASMMACRO(disable_fpu_hazard, - _ehb -) + +#define __enable_fpu_hazard \ + nop; \ + nop; \ + nop; \ + nop + +#define __disable_fpu_hazard \ + ___ehb + #endif +#ifdef __ASSEMBLY__ + +#define _ssnop ___ssnop +#define _ehb ___ehb +#define mtc0_tlbw_hazard __mtc0_tlbw_hazard +#define tlbw_use_hazard __tlbw_use_hazard +#define tlb_probe_hazard __tlb_probe_hazard +#define irq_enable_hazard __irq_enable_hazard +#define irq_disable_hazard __irq_disable_hazard +#define back_to_back_c0_hazard __back_to_back_c0_hazard +#define enable_fpu_hazard __enable_fpu_hazard +#define disable_fpu_hazard __disable_fpu_hazard + +#else + +#define _ssnop() \ +do { \ + __asm__ __volatile__( \ + __stringify(___ssnop) \ + ); \ +} while (0) + +#define _ehb() \ +do { \ + __asm__ __volatile__( \ + __stringify(___ehb) \ + ); \ +} while (0) + + +#define mtc0_tlbw_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__mtc0_tlbw_hazard) \ + ); \ +} while (0) + + +#define tlbw_use_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__tlbw_use_hazard) \ + ); \ +} while (0) + + +#define tlb_probe_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__tlb_probe_hazard) \ + ); \ +} while (0) + + +#define irq_enable_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__irq_enable_hazard) \ + ); \ +} while (0) + + +#define irq_disable_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__irq_disable_hazard) \ + ); \ +} while (0) + + +#define back_to_back_c0_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__back_to_back_c0_hazard) \ + ); \ +} while (0) + + +#define enable_fpu_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__enable_fpu_hazard) \ + ); \ +} while (0) + + +#define disable_fpu_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__disable_fpu_hazard) \ + ); \ +} while (0) + +/* + * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. + */ +extern void mips_ihb(void); + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_HAZARDS_H */ diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h index f1eadf764071..22912f78401c 100644 --- a/arch/mips/include/asm/inst.h +++ b/arch/mips/include/asm/inst.h @@ -73,4 +73,16 @@ typedef unsigned int mips_instruction; +/* microMIPS instruction decode structure. Do NOT export!!! */ +struct mm_decoded_insn { + mips_instruction insn; + mips_instruction next_insn; + int pc_inc; + int next_pc_inc; + int micro_mips_mode; +}; + +/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */ +extern const int reg16to32[]; + #endif /* _ASM_INST_H */ diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 9f3384c789d7..45c00951888b 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -14,53 +14,48 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> +#include <linux/stringify.h> #include <asm/hazards.h> #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -__asm__( - " .macro arch_local_irq_disable\n" +static inline void arch_local_irq_disable(void) +{ + __asm__ __volatile__( " .set push \n" " .set noat \n" " di \n" - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); - -static inline void arch_local_irq_disable(void) -{ - __asm__ __volatile__( - "arch_local_irq_disable" - : /* no outputs */ - : /* no inputs */ - : "memory"); + : /* no outputs */ + : /* no inputs */ + : "memory"); } +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; -__asm__( - " .macro arch_local_irq_save result \n" + asm __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" - " di \\result \n" - " andi \\result, 1 \n" - " irq_disable_hazard \n" + " di %[flags] \n" + " andi %[flags], 1 \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : [flags] "=r" (flags) + : /* no inputs */ + : "memory"); -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - asm volatile("arch_local_irq_save\t%0" - : "=r" (flags) - : /* no inputs */ - : "memory"); return flags; } +static inline void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; -__asm__( - " .macro arch_local_irq_restore flags \n" + __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set noat \n" @@ -69,7 +64,7 @@ __asm__( * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ - " beqz \\flags, 1f \n" + " beqz %[flags], 1f \n" " di \n" " ei \n" "1: \n" @@ -78,33 +73,44 @@ __asm__( * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" - " ins $1, \\flags, 0, 1 \n" + " ins $1, %[flags], 0, 1 \n" " mtc0 $1, $12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); - -static inline void arch_local_irq_restore(unsigned long flags) -{ - unsigned long __tmp1; - - __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); } static inline void __arch_local_irq_restore(unsigned long flags) { - unsigned long __tmp1; - __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#if defined(CONFIG_IRQ_CPU) + /* + * Slow, but doesn't suffer from a relatively unlikely race + * condition we're having since days 1. + */ + " beqz %[flags], 1f \n" + " di \n" + " ei \n" + "1: \n" +#else + /* + * Fast, dangerous. Life is fun, life is good. + */ + " mfc0 $1, $12 \n" + " ins $1, %[flags], 0, 1 \n" + " mtc0 $1, $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (flags) + : "0" (flags) + : "memory"); } #else /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ @@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags); #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ -__asm__( - " .macro arch_local_irq_enable \n" +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif + __asm__ __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" @@ -133,45 +149,28 @@ __asm__( " xori $1,0x1e \n" " mtc0 $1,$12 \n" #endif - " irq_enable_hazard \n" + " " __stringify(__irq_enable_hazard) " \n" " .set pop \n" - " .endm"); - -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of call overhead on each local_irq_enable() - */ - smtc_ipi_replay(); -#endif - __asm__ __volatile__( - "arch_local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); + : /* no outputs */ + : /* no inputs */ + : "memory"); } +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; -__asm__( - " .macro arch_local_save_flags flags \n" + asm __volatile__( " .set push \n" " .set reorder \n" #ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\flags, $2, 1 \n" + " mfc0 %[flags], $2, 1 \n" #else - " mfc0 \\flags, $12 \n" + " mfc0 %[flags], $12 \n" #endif " .set pop \n" - " .endm \n"); + : [flags] "=r" (flags)); -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile("arch_local_save_flags %0" : "=r" (flags)); return flags; } diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h new file mode 100644 index 000000000000..85789eacbf18 --- /dev/null +++ b/arch/mips/include/asm/kvm.h @@ -0,0 +1,55 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#ifndef __LINUX_KVM_MIPS_H +#define __LINUX_KVM_MIPS_H + +#include <linux/types.h> + +#define __KVM_MIPS + +#define N_MIPS_COPROC_REGS 32 +#define N_MIPS_COPROC_SEL 8 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + __u32 gprs[32]; + __u32 hi; + __u32 lo; + __u32 pc; + + __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +struct kvm_mips_interrupt { + /* in */ + __u32 cpu; + __u32 irq; +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +#endif /* __LINUX_KVM_MIPS_H */ diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h new file mode 100644 index 000000000000..e68781e18387 --- /dev/null +++ b/arch/mips/include/asm/kvm_host.h @@ -0,0 +1,667 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#ifndef __MIPS_KVM_HOST_H__ +#define __MIPS_KVM_HOST_H__ + +#include <linux/mutex.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/kvm.h> +#include <linux/kvm_types.h> +#include <linux/threads.h> +#include <linux/spinlock.h> + + +#define KVM_MAX_VCPUS 1 +#define KVM_USER_MEM_SLOTS 8 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 0 + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +/* Don't support huge pages */ +#define KVM_HPAGE_GFN_SHIFT(x) 0 + +/* We don't currently support large pages. */ +#define KVM_NR_PAGE_SIZES 1 +#define KVM_PAGES_PER_HPAGE(x) 1 + + + +/* Special address that contains the comm page, used for reducing # of traps */ +#define KVM_GUEST_COMMPAGE_ADDR 0x0 + +#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ + ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) + +#define KVM_GUEST_KUSEG 0x00000000UL +#define KVM_GUEST_KSEG0 0x40000000UL +#define KVM_GUEST_KSEG23 0x60000000UL +#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) +#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) + +#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) +#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) +#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) + +/* + * Map an address to a certain kernel segment + */ +#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) +#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) +#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) + +#define KVM_INVALID_PAGE 0xdeadbeef +#define KVM_INVALID_INST 0xdeadbeef +#define KVM_INVALID_ADDR 0xdeadbeef + +#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL + +#define GUEST_TICKS_PER_JIFFY (40000000/HZ) +#define MS_TO_NS(x) (x * 1E6L) + +#define CAUSEB_DC 27 +#define CAUSEF_DC (_ULCAST_(1) << 27) + +struct kvm; +struct kvm_run; +struct kvm_vcpu; +struct kvm_interrupt; + +extern atomic_t kvm_mips_instance; +extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); +extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); +extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn); + +struct kvm_vm_stat { + u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { + u32 wait_exits; + u32 cache_exits; + u32 signal_exits; + u32 int_exits; + u32 cop_unusable_exits; + u32 tlbmod_exits; + u32 tlbmiss_ld_exits; + u32 tlbmiss_st_exits; + u32 addrerr_st_exits; + u32 addrerr_ld_exits; + u32 syscall_exits; + u32 resvd_inst_exits; + u32 break_inst_exits; + u32 flush_dcache_exits; + u32 halt_wakeup; +}; + +enum kvm_mips_exit_types { + WAIT_EXITS, + CACHE_EXITS, + SIGNAL_EXITS, + INT_EXITS, + COP_UNUSABLE_EXITS, + TLBMOD_EXITS, + TLBMISS_LD_EXITS, + TLBMISS_ST_EXITS, + ADDRERR_ST_EXITS, + ADDRERR_LD_EXITS, + SYSCALL_EXITS, + RESVD_INST_EXITS, + BREAK_INST_EXITS, + FLUSH_DCACHE_EXITS, + MAX_KVM_MIPS_EXIT_TYPES +}; + +struct kvm_arch_memory_slot { +}; + +struct kvm_arch { + /* Guest GVA->HPA page table */ + unsigned long *guest_pmap; + unsigned long guest_pmap_npages; + + /* Wired host TLB used for the commpage */ + int commpage_tlb; +}; + +#define N_MIPS_COPROC_REGS 32 +#define N_MIPS_COPROC_SEL 8 + +struct mips_coproc { + unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +#endif +}; + +/* + * Coprocessor 0 register names + */ +#define MIPS_CP0_TLB_INDEX 0 +#define MIPS_CP0_TLB_RANDOM 1 +#define MIPS_CP0_TLB_LOW 2 +#define MIPS_CP0_TLB_LO0 2 +#define MIPS_CP0_TLB_LO1 3 +#define MIPS_CP0_TLB_CONTEXT 4 +#define MIPS_CP0_TLB_PG_MASK 5 +#define MIPS_CP0_TLB_WIRED 6 +#define MIPS_CP0_HWRENA 7 +#define MIPS_CP0_BAD_VADDR 8 +#define MIPS_CP0_COUNT 9 +#define MIPS_CP0_TLB_HI 10 +#define MIPS_CP0_COMPARE 11 +#define MIPS_CP0_STATUS 12 +#define MIPS_CP0_CAUSE 13 +#define MIPS_CP0_EXC_PC 14 +#define MIPS_CP0_PRID 15 +#define MIPS_CP0_CONFIG 16 +#define MIPS_CP0_LLADDR 17 +#define MIPS_CP0_WATCH_LO 18 +#define MIPS_CP0_WATCH_HI 19 +#define MIPS_CP0_TLB_XCONTEXT 20 +#define MIPS_CP0_ECC 26 +#define MIPS_CP0_CACHE_ERR 27 +#define MIPS_CP0_TAG_LO 28 +#define MIPS_CP0_TAG_HI 29 +#define MIPS_CP0_ERROR_PC 30 +#define MIPS_CP0_DEBUG 23 +#define MIPS_CP0_DEPC 24 +#define MIPS_CP0_PERFCNT 25 +#define MIPS_CP0_ERRCTL 26 +#define MIPS_CP0_DATA_LO 28 +#define MIPS_CP0_DATA_HI 29 +#define MIPS_CP0_DESAVE 31 + +#define MIPS_CP0_CONFIG_SEL 0 +#define MIPS_CP0_CONFIG1_SEL 1 +#define MIPS_CP0_CONFIG2_SEL 2 +#define MIPS_CP0_CONFIG3_SEL 3 + +/* Config0 register bits */ +#define CP0C0_M 31 +#define CP0C0_K23 28 +#define CP0C0_KU 25 +#define CP0C0_MDU 20 +#define CP0C0_MM 17 +#define CP0C0_BM 16 +#define CP0C0_BE 15 +#define CP0C0_AT 13 +#define CP0C0_AR 10 +#define CP0C0_MT 7 +#define CP0C0_VI 3 +#define CP0C0_K0 0 + +/* Config1 register bits */ +#define CP0C1_M 31 +#define CP0C1_MMU 25 +#define CP0C1_IS 22 +#define CP0C1_IL 19 +#define CP0C1_IA 16 +#define CP0C1_DS 13 +#define CP0C1_DL 10 +#define CP0C1_DA 7 +#define CP0C1_C2 6 +#define CP0C1_MD 5 +#define CP0C1_PC 4 +#define CP0C1_WR 3 +#define CP0C1_CA 2 +#define CP0C1_EP 1 +#define CP0C1_FP 0 + +/* Config2 Register bits */ +#define CP0C2_M 31 +#define CP0C2_TU 28 +#define CP0C2_TS 24 +#define CP0C2_TL 20 +#define CP0C2_TA 16 +#define CP0C2_SU 12 +#define CP0C2_SS 8 +#define CP0C2_SL 4 +#define CP0C2_SA 0 + +/* Config3 Register bits */ +#define CP0C3_M 31 +#define CP0C3_ISA_ON_EXC 16 +#define CP0C3_ULRI 13 +#define CP0C3_DSPP 10 +#define CP0C3_LPA 7 +#define CP0C3_VEIC 6 +#define CP0C3_VInt 5 +#define CP0C3_SP 4 +#define CP0C3_MT 2 +#define CP0C3_SM 1 +#define CP0C3_TL 0 + +/* Have config1, Cacheable, noncoherent, write-back, write allocate*/ +#define MIPS_CONFIG0 \ + ((1 << CP0C0_M) | (0x3 << CP0C0_K0)) + +/* Have config2, no coprocessor2 attached, no MDMX support attached, + no performance counters, watch registers present, + no code compression, EJTAG present, no FPU, no watch registers */ +#define MIPS_CONFIG1 \ +((1 << CP0C1_M) | \ + (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ + (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ + (0 << CP0C1_FP)) + +/* Have config3, no tertiary/secondary caches implemented */ +#define MIPS_CONFIG2 \ +((1 << CP0C2_M)) + +/* No config4, no DSP ASE, no large physaddr (PABITS), + no external interrupt controller, no vectored interrupts, + no 1kb pages, no SmartMIPS ASE, no trace logic */ +#define MIPS_CONFIG3 \ +((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ + (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ + (0 << CP0C3_SM) | (0 << CP0C3_TL)) + +/* MMU types, the first four entries have the same layout as the + CP0C0_MT field. */ +enum mips_mmu_types { + MMU_TYPE_NONE, + MMU_TYPE_R4000, + MMU_TYPE_RESERVED, + MMU_TYPE_FMT, + MMU_TYPE_R3000, + MMU_TYPE_R6000, + MMU_TYPE_R8000 +}; + +/* + * Trap codes + */ +#define T_INT 0 /* Interrupt pending */ +#define T_TLB_MOD 1 /* TLB modified fault */ +#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */ +#define T_TLB_ST_MISS 3 /* TLB miss on a store */ +#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */ +#define T_ADDR_ERR_ST 5 /* Address error on a store */ +#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */ +#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */ +#define T_SYSCALL 8 /* System call */ +#define T_BREAK 9 /* Breakpoint */ +#define T_RES_INST 10 /* Reserved instruction exception */ +#define T_COP_UNUSABLE 11 /* Coprocessor unusable */ +#define T_OVFLOW 12 /* Arithmetic overflow */ + +/* + * Trap definitions added for r4000 port. + */ +#define T_TRAP 13 /* Trap instruction */ +#define T_VCEI 14 /* Virtual coherency exception */ +#define T_FPE 15 /* Floating point exception */ +#define T_WATCH 23 /* Watch address reference */ +#define T_VCED 31 /* Virtual coherency data */ + +/* Resume Flags */ +#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ +#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ + +#define RESUME_GUEST 0 +#define RESUME_GUEST_DR RESUME_FLAG_DR +#define RESUME_HOST RESUME_FLAG_HOST + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_WAIT, /* WAIT instruction */ + EMULATE_PRIV_FAIL, +}; + +#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ +#define MIPS3_PG_V 0x00000002 /* Valid */ +#define MIPS3_PG_NV 0x00000000 +#define MIPS3_PG_D 0x00000004 /* Dirty */ + +#define mips3_paddr_to_tlbpfn(x) \ + (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) +#define mips3_tlbpfn_to_paddr(x) \ + ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) + +#define MIPS3_PG_SHIFT 6 +#define MIPS3_PG_FRAME 0x3fffffc0 + +#define VPN2_MASK 0xffffe000 +#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) +#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) +#define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) +#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) + +struct kvm_mips_tlb { + long tlb_mask; + long tlb_hi; + long tlb_lo0; + long tlb_lo1; +}; + +#define KVM_MIPS_GUEST_TLB_SIZE 64 +struct kvm_vcpu_arch { + void *host_ebase, *guest_ebase; + unsigned long host_stack; + unsigned long host_gp; + + /* Host CP0 registers used when handling exits from guest */ + unsigned long host_cp0_badvaddr; + unsigned long host_cp0_cause; + unsigned long host_cp0_epc; + unsigned long host_cp0_entryhi; + uint32_t guest_inst; + + /* GPRS */ + unsigned long gprs[32]; + unsigned long hi; + unsigned long lo; + unsigned long pc; + + /* FPU State */ + struct mips_fpu_struct fpu; + + /* COP0 State */ + struct mips_coproc *cop0; + + /* Host KSEG0 address of the EI/DI offset */ + void *kseg0_commpage; + + u32 io_gpr; /* GPR used as IO source/target */ + + /* Used to calibrate the virutal count register for the guest */ + int32_t host_cp0_count; + + /* Bitmask of exceptions that are pending */ + unsigned long pending_exceptions; + + /* Bitmask of pending exceptions to be cleared */ + unsigned long pending_exceptions_clr; + + unsigned long pending_load_cause; + + /* Save/Restore the entryhi register when are are preempted/scheduled back in */ + unsigned long preempt_entryhi; + + /* S/W Based TLB for guest */ + struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; + + /* Cached guest kernel/user ASIDs */ + uint32_t guest_user_asid[NR_CPUS]; + uint32_t guest_kernel_asid[NR_CPUS]; + struct mm_struct guest_kernel_mm, guest_user_mm; + + struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE]; + + + struct hrtimer comparecount_timer; + + int last_sched_cpu; + + /* WAIT executed */ + int wait; +}; + + +#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) +#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) +#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) +#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) +#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) +#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) +#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) +#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) +#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) +#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) +#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) +#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) +#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) +#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) +#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) +#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) +#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) +#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) +#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) +#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) +#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) +#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) +#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) +#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) +#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) +#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) +#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) +#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) +#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) +#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) +#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) +#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) +#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) +#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) +#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) +#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) +#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) +#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) +#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) +#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) +#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) +#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) +#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) + +#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) +#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) +#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) +#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) +#define kvm_change_c0_guest_cause(cop0, change, val) \ +{ \ + kvm_clear_c0_guest_cause(cop0, change); \ + kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ +} +#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) +#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) +#define kvm_change_c0_guest_ebase(cop0, change, val) \ +{ \ + kvm_clear_c0_guest_ebase(cop0, change); \ + kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ +} + + +struct kvm_mips_callbacks { + int (*handle_cop_unusable) (struct kvm_vcpu *vcpu); + int (*handle_tlb_mod) (struct kvm_vcpu *vcpu); + int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu); + int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu); + int (*handle_addr_err_st) (struct kvm_vcpu *vcpu); + int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu); + int (*handle_syscall) (struct kvm_vcpu *vcpu); + int (*handle_res_inst) (struct kvm_vcpu *vcpu); + int (*handle_break) (struct kvm_vcpu *vcpu); + int (*vm_init) (struct kvm *kvm); + int (*vcpu_init) (struct kvm_vcpu *vcpu); + int (*vcpu_setup) (struct kvm_vcpu *vcpu); + gpa_t(*gva_to_gpa) (gva_t gva); + void (*queue_timer_int) (struct kvm_vcpu *vcpu); + void (*dequeue_timer_int) (struct kvm_vcpu *vcpu); + void (*queue_io_int) (struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); + void (*dequeue_io_int) (struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); + int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); + int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); + int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, + struct kvm_regs *regs); + int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, + struct kvm_regs *regs); +}; +extern struct kvm_mips_callbacks *kvm_mips_callbacks; +int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); + +/* Debug: dump vcpu state */ +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); + +/* Trampoline ASM routine to start running in "Guest" context */ +extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); + +/* TLB handling */ +uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); + +uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu); + +uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu); + +extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, + struct kvm_vcpu *vcpu); + +extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, + struct kvm_vcpu *vcpu); + +extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb, + unsigned long *hpa0, + unsigned long *hpa1); + +extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern void kvm_mips_dump_host_tlbs(void); +extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); +extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu); +extern void kvm_mips_flush_host_tlb(int skip_kseg0); +extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); +extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); + +extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, + unsigned long entryhi); +extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr); +extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, + unsigned long gva); +extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, + struct kvm_vcpu *vcpu); +extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu); +extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu); +extern void kvm_local_flush_tlb_all(void); +extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu); +extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); +extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); + +/* Emulation */ +uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu); +enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause); + +extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_handle_ri(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, + struct kvm_run *run); + +enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu); + +enum emulation_result kvm_mips_check_privilege(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +enum emulation_result kvm_mips_emulate_cache(uint32_t inst, + uint32_t *opc, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); +enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, + uint32_t *opc, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); +enum emulation_result kvm_mips_emulate_store(uint32_t inst, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); +enum emulation_result kvm_mips_emulate_load(uint32_t inst, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +/* Dynamic binary translation */ +extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); + +/* Misc */ +extern void mips32_SyncICache(unsigned long addr, unsigned long size); +extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); +extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); + + +#endif /* __MIPS_KVM_HOST_H__ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h deleted file mode 100644 index 8fcf8df4418a..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef BCM63XX_CLK_H_ -#define BCM63XX_CLK_H_ - -struct clk { - void (*set)(struct clk *, int); - unsigned int rate; - unsigned int usage; - int id; -}; - -#endif /* ! BCM63XX_CLK_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index cb922b9cb0e9..336228990808 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -14,11 +14,12 @@ #define BCM6345_CPU_ID 0x6345 #define BCM6348_CPU_ID 0x6348 #define BCM6358_CPU_ID 0x6358 +#define BCM6362_CPU_ID 0x6362 #define BCM6368_CPU_ID 0x6368 void __init bcm63xx_cpu_init(void); u16 __bcm63xx_get_cpu_id(void); -u16 bcm63xx_get_cpu_rev(void); +u8 bcm63xx_get_cpu_rev(void); unsigned int bcm63xx_get_cpu_freq(void); #ifdef CONFIG_BCM63XX_CPU_6328 @@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void); # define BCMCPU_IS_6358() (0) #endif +#ifdef CONFIG_BCM63XX_CPU_6362 +# ifdef bcm63xx_get_cpu_id +# undef bcm63xx_get_cpu_id +# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() +# define BCMCPU_RUNTIME_DETECT +# else +# define bcm63xx_get_cpu_id() BCM6362_CPU_ID +# endif +# define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID) +#else +# define BCMCPU_IS_6362() (0) +#endif + + #ifdef CONFIG_BCM63XX_CPU_6368 # ifdef bcm63xx_get_cpu_id # undef bcm63xx_get_cpu_id @@ -406,6 +421,62 @@ enum bcm63xx_regs_set { /* + * 6362 register sets base address + */ +#define BCM_6362_DSL_LMEM_BASE (0xdeadbeef) +#define BCM_6362_PERF_BASE (0xb0000000) +#define BCM_6362_TIMER_BASE (0xb0000040) +#define BCM_6362_WDT_BASE (0xb000005c) +#define BCM_6362_UART0_BASE (0xb0000100) +#define BCM_6362_UART1_BASE (0xb0000120) +#define BCM_6362_GPIO_BASE (0xb0000080) +#define BCM_6362_SPI_BASE (0xb0000800) +#define BCM_6362_HSSPI_BASE (0xb0001000) +#define BCM_6362_UDC0_BASE (0xdeadbeef) +#define BCM_6362_USBDMA_BASE (0xb000c000) +#define BCM_6362_OHCI0_BASE (0xb0002600) +#define BCM_6362_OHCI_PRIV_BASE (0xdeadbeef) +#define BCM_6362_USBH_PRIV_BASE (0xb0002700) +#define BCM_6362_USBD_BASE (0xb0002400) +#define BCM_6362_MPI_BASE (0xdeadbeef) +#define BCM_6362_PCMCIA_BASE (0xdeadbeef) +#define BCM_6362_PCIE_BASE (0xb0e40000) +#define BCM_6362_SDRAM_REGS_BASE (0xdeadbeef) +#define BCM_6362_DSL_BASE (0xdeadbeef) +#define BCM_6362_UBUS_BASE (0xdeadbeef) +#define BCM_6362_ENET0_BASE (0xdeadbeef) +#define BCM_6362_ENET1_BASE (0xdeadbeef) +#define BCM_6362_ENETDMA_BASE (0xb000d800) +#define BCM_6362_ENETDMAC_BASE (0xb000da00) +#define BCM_6362_ENETDMAS_BASE (0xb000dc00) +#define BCM_6362_ENETSW_BASE (0xb0e00000) +#define BCM_6362_EHCI0_BASE (0xb0002500) +#define BCM_6362_SDRAM_BASE (0xdeadbeef) +#define BCM_6362_MEMC_BASE (0xdeadbeef) +#define BCM_6362_DDR_BASE (0xb0003000) +#define BCM_6362_M2M_BASE (0xdeadbeef) +#define BCM_6362_ATM_BASE (0xdeadbeef) +#define BCM_6362_XTM_BASE (0xb0007800) +#define BCM_6362_XTMDMA_BASE (0xb000b800) +#define BCM_6362_XTMDMAC_BASE (0xdeadbeef) +#define BCM_6362_XTMDMAS_BASE (0xdeadbeef) +#define BCM_6362_PCM_BASE (0xb000a800) +#define BCM_6362_PCMDMA_BASE (0xdeadbeef) +#define BCM_6362_PCMDMAC_BASE (0xdeadbeef) +#define BCM_6362_PCMDMAS_BASE (0xdeadbeef) +#define BCM_6362_RNG_BASE (0xdeadbeef) +#define BCM_6362_MISC_BASE (0xb0001800) + +#define BCM_6362_NAND_REG_BASE (0xb0000200) +#define BCM_6362_NAND_CACHE_BASE (0xb0000600) +#define BCM_6362_LED_BASE (0xb0001900) +#define BCM_6362_IPSEC_BASE (0xb0002800) +#define BCM_6362_IPSEC_DMA_BASE (0xb000d000) +#define BCM_6362_WLAN_CHIPCOMMON_BASE (0xb0004000) +#define BCM_6362_WLAN_D11_BASE (0xb0005000) +#define BCM_6362_WLAN_SHIM_BASE (0xb0007000) + +/* * 6368 register sets base address */ #define BCM_6368_DSL_LMEM_BASE (0xdeadbeef) @@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) #ifdef CONFIG_BCM63XX_CPU_6358 __GEN_RSET(6358) #endif +#ifdef CONFIG_BCM63XX_CPU_6362 + __GEN_RSET(6362) +#endif #ifdef CONFIG_BCM63XX_CPU_6368 __GEN_RSET(6368) #endif @@ -820,6 +894,71 @@ enum bcm63xx_irq { #define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28) /* + * 6362 irqs + */ +#define BCM_6362_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) + +#define BCM_6362_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) +#define BCM_6362_SPI_IRQ (IRQ_INTERNAL_BASE + 2) +#define BCM_6362_UART0_IRQ (IRQ_INTERNAL_BASE + 3) +#define BCM_6362_UART1_IRQ (IRQ_INTERNAL_BASE + 4) +#define BCM_6362_DSL_IRQ (IRQ_INTERNAL_BASE + 28) +#define BCM_6362_UDC0_IRQ 0 +#define BCM_6362_ENET0_IRQ 0 +#define BCM_6362_ENET1_IRQ 0 +#define BCM_6362_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 14) +#define BCM_6362_HSSPI_IRQ (IRQ_INTERNAL_BASE + 5) +#define BCM_6362_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9) +#define BCM_6362_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10) +#define BCM_6362_USBD_IRQ (IRQ_INTERNAL_BASE + 11) +#define BCM_6362_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 20) +#define BCM_6362_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 21) +#define BCM_6362_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 22) +#define BCM_6362_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 23) +#define BCM_6362_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 24) +#define BCM_6362_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 25) +#define BCM_6362_PCMCIA_IRQ 0 +#define BCM_6362_ENET0_RXDMA_IRQ 0 +#define BCM_6362_ENET0_TXDMA_IRQ 0 +#define BCM_6362_ENET1_RXDMA_IRQ 0 +#define BCM_6362_ENET1_TXDMA_IRQ 0 +#define BCM_6362_PCI_IRQ (IRQ_INTERNAL_BASE + 30) +#define BCM_6362_ATM_IRQ 0 +#define BCM_6362_ENETSW_RXDMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 0) +#define BCM_6362_ENETSW_RXDMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 1) +#define BCM_6362_ENETSW_RXDMA2_IRQ (BCM_6362_HIGH_IRQ_BASE + 2) +#define BCM_6362_ENETSW_RXDMA3_IRQ (BCM_6362_HIGH_IRQ_BASE + 3) +#define BCM_6362_ENETSW_TXDMA0_IRQ 0 +#define BCM_6362_ENETSW_TXDMA1_IRQ 0 +#define BCM_6362_ENETSW_TXDMA2_IRQ 0 +#define BCM_6362_ENETSW_TXDMA3_IRQ 0 +#define BCM_6362_XTM_IRQ 0 +#define BCM_6362_XTM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 12) + +#define BCM_6362_RING_OSC_IRQ (IRQ_INTERNAL_BASE + 1) +#define BCM_6362_WLAN_GPIO_IRQ (IRQ_INTERNAL_BASE + 6) +#define BCM_6362_WLAN_IRQ (IRQ_INTERNAL_BASE + 7) +#define BCM_6362_IPSEC_IRQ (IRQ_INTERNAL_BASE + 8) +#define BCM_6362_NAND_IRQ (IRQ_INTERNAL_BASE + 12) +#define BCM_6362_PCM_IRQ (IRQ_INTERNAL_BASE + 13) +#define BCM_6362_DG_IRQ (IRQ_INTERNAL_BASE + 15) +#define BCM_6362_EPHY_ENERGY0_IRQ (IRQ_INTERNAL_BASE + 16) +#define BCM_6362_EPHY_ENERGY1_IRQ (IRQ_INTERNAL_BASE + 17) +#define BCM_6362_EPHY_ENERGY2_IRQ (IRQ_INTERNAL_BASE + 18) +#define BCM_6362_EPHY_ENERGY3_IRQ (IRQ_INTERNAL_BASE + 19) +#define BCM_6362_IPSEC_DMA0_IRQ (IRQ_INTERNAL_BASE + 26) +#define BCM_6362_IPSEC_DMA1_IRQ (IRQ_INTERNAL_BASE + 27) +#define BCM_6362_FAP0_IRQ (IRQ_INTERNAL_BASE + 29) +#define BCM_6362_PCM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 4) +#define BCM_6362_PCM_DMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 5) +#define BCM_6362_DECT0_IRQ (BCM_6362_HIGH_IRQ_BASE + 6) +#define BCM_6362_DECT1_IRQ (BCM_6362_HIGH_IRQ_BASE + 7) +#define BCM_6362_EXT_IRQ0 (BCM_6362_HIGH_IRQ_BASE + 8) +#define BCM_6362_EXT_IRQ1 (BCM_6362_HIGH_IRQ_BASE + 9) +#define BCM_6362_EXT_IRQ2 (BCM_6362_HIGH_IRQ_BASE + 10) +#define BCM_6362_EXT_IRQ3 (BCM_6362_HIGH_IRQ_BASE + 11) + +/* * 6368 irqs */ #define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index b0184cf02575..c426cabc620a 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg) return bcm63xx_regs_spi[reg]; #else -#ifdef CONFIG_BCM63XX_CPU_6338 - __GEN_SPI_RSET(6338) -#endif -#ifdef CONFIG_BCM63XX_CPU_6348 +#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348) __GEN_SPI_RSET(6348) #endif -#ifdef CONFIG_BCM63XX_CPU_6358 +#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \ + defined(CONFIG_BCM63XX_CPU_6368) __GEN_SPI_RSET(6358) #endif -#ifdef CONFIG_BCM63XX_CPU_6368 - __GEN_SPI_RSET(6368) -#endif #endif return 0; } diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h index 0a9891f7580d..35baa1a60a64 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h @@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void) return 8; case BCM6345_CPU_ID: return 16; + case BCM6362_CPU_ID: + return 48; case BCM6368_CPU_ID: return 38; case BCM6348_CPU_ID: diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 81b4702f792a..3203fe49b34d 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -10,7 +10,7 @@ #define REV_CHIPID_SHIFT 16 #define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT) #define REV_REVID_SHIFT 0 -#define REV_REVID_MASK (0xffff << REV_REVID_SHIFT) +#define REV_REVID_MASK (0xff << REV_REVID_SHIFT) /* Clock Control register */ #define PERF_CKCTL_REG 0x4 @@ -112,6 +112,39 @@ CKCTL_6358_USBSU_EN | \ CKCTL_6358_EPHY_EN) +#define CKCTL_6362_ADSL_QPROC_EN (1 << 1) +#define CKCTL_6362_ADSL_AFE_EN (1 << 2) +#define CKCTL_6362_ADSL_EN (1 << 3) +#define CKCTL_6362_MIPS_EN (1 << 4) +#define CKCTL_6362_WLAN_OCP_EN (1 << 5) +#define CKCTL_6362_SWPKT_USB_EN (1 << 7) +#define CKCTL_6362_SWPKT_SAR_EN (1 << 8) +#define CKCTL_6362_SAR_EN (1 << 9) +#define CKCTL_6362_ROBOSW_EN (1 << 10) +#define CKCTL_6362_PCM_EN (1 << 11) +#define CKCTL_6362_USBD_EN (1 << 12) +#define CKCTL_6362_USBH_EN (1 << 13) +#define CKCTL_6362_IPSEC_EN (1 << 14) +#define CKCTL_6362_SPI_EN (1 << 15) +#define CKCTL_6362_HSSPI_EN (1 << 16) +#define CKCTL_6362_PCIE_EN (1 << 17) +#define CKCTL_6362_FAP_EN (1 << 18) +#define CKCTL_6362_PHYMIPS_EN (1 << 19) +#define CKCTL_6362_NAND_EN (1 << 20) + +#define CKCTL_6362_ALL_SAFE_EN (CKCTL_6362_PHYMIPS_EN | \ + CKCTL_6362_ADSL_QPROC_EN | \ + CKCTL_6362_ADSL_AFE_EN | \ + CKCTL_6362_ADSL_EN | \ + CKCTL_6362_SAR_EN | \ + CKCTL_6362_PCM_EN | \ + CKCTL_6362_IPSEC_EN | \ + CKCTL_6362_USBD_EN | \ + CKCTL_6362_USBH_EN | \ + CKCTL_6362_ROBOSW_EN | \ + CKCTL_6362_PCIE_EN) + + #define CKCTL_6368_VDSL_QPROC_EN (1 << 2) #define CKCTL_6368_VDSL_AFE_EN (1 << 3) #define CKCTL_6368_VDSL_BONDING_EN (1 << 4) @@ -153,6 +186,7 @@ #define PERF_IRQMASK_6345_REG 0xc #define PERF_IRQMASK_6348_REG 0xc #define PERF_IRQMASK_6358_REG 0xc +#define PERF_IRQMASK_6362_REG 0x20 #define PERF_IRQMASK_6368_REG 0x20 /* Interrupt Status register */ @@ -161,6 +195,7 @@ #define PERF_IRQSTAT_6345_REG 0x10 #define PERF_IRQSTAT_6348_REG 0x10 #define PERF_IRQSTAT_6358_REG 0x10 +#define PERF_IRQSTAT_6362_REG 0x28 #define PERF_IRQSTAT_6368_REG 0x28 /* External Interrupt Configuration register */ @@ -169,6 +204,7 @@ #define PERF_EXTIRQ_CFG_REG_6345 0x14 #define PERF_EXTIRQ_CFG_REG_6348 0x14 #define PERF_EXTIRQ_CFG_REG_6358 0x14 +#define PERF_EXTIRQ_CFG_REG_6362 0x18 #define PERF_EXTIRQ_CFG_REG_6368 0x18 #define PERF_EXTIRQ_CFG_REG2_6368 0x1c @@ -197,6 +233,7 @@ #define PERF_SOFTRESET_REG 0x28 #define PERF_SOFTRESET_6328_REG 0x10 #define PERF_SOFTRESET_6358_REG 0x34 +#define PERF_SOFTRESET_6362_REG 0x10 #define PERF_SOFTRESET_6368_REG 0x10 #define SOFTRESET_6328_SPI_MASK (1 << 0) @@ -259,6 +296,22 @@ #define SOFTRESET_6358_PCM_MASK (1 << 13) #define SOFTRESET_6358_ADSL_MASK (1 << 14) +#define SOFTRESET_6362_SPI_MASK (1 << 0) +#define SOFTRESET_6362_IPSEC_MASK (1 << 1) +#define SOFTRESET_6362_EPHY_MASK (1 << 2) +#define SOFTRESET_6362_SAR_MASK (1 << 3) +#define SOFTRESET_6362_ENETSW_MASK (1 << 4) +#define SOFTRESET_6362_USBS_MASK (1 << 5) +#define SOFTRESET_6362_USBH_MASK (1 << 6) +#define SOFTRESET_6362_PCM_MASK (1 << 7) +#define SOFTRESET_6362_PCIE_CORE_MASK (1 << 8) +#define SOFTRESET_6362_PCIE_MASK (1 << 9) +#define SOFTRESET_6362_PCIE_EXT_MASK (1 << 10) +#define SOFTRESET_6362_WLAN_SHIM_MASK (1 << 11) +#define SOFTRESET_6362_DDR_PHY_MASK (1 << 12) +#define SOFTRESET_6362_FAP_MASK (1 << 13) +#define SOFTRESET_6362_WLAN_UBUS_MASK (1 << 14) + #define SOFTRESET_6368_SPI_MASK (1 << 0) #define SOFTRESET_6368_MPI_MASK (1 << 3) #define SOFTRESET_6368_EPHY_MASK (1 << 6) @@ -1223,24 +1276,7 @@ * _REG relative to RSET_SPI *************************************************************************/ -/* BCM 6338 SPI core */ -#define SPI_6338_CMD 0x00 /* 16-bits register */ -#define SPI_6338_INT_STATUS 0x02 -#define SPI_6338_INT_MASK_ST 0x03 -#define SPI_6338_INT_MASK 0x04 -#define SPI_6338_ST 0x05 -#define SPI_6338_CLK_CFG 0x06 -#define SPI_6338_FILL_BYTE 0x07 -#define SPI_6338_MSG_TAIL 0x09 -#define SPI_6338_RX_TAIL 0x0b -#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */ -#define SPI_6338_MSG_CTL_WIDTH 8 -#define SPI_6338_MSG_DATA 0x41 -#define SPI_6338_MSG_DATA_SIZE 0x3f -#define SPI_6338_RX_DATA 0x80 -#define SPI_6338_RX_DATA_SIZE 0x3f - -/* BCM 6348 SPI core */ +/* BCM 6338/6348 SPI core */ #define SPI_6348_CMD 0x00 /* 16-bits register */ #define SPI_6348_INT_STATUS 0x02 #define SPI_6348_INT_MASK_ST 0x03 @@ -1257,7 +1293,7 @@ #define SPI_6348_RX_DATA 0x80 #define SPI_6348_RX_DATA_SIZE 0x3f -/* BCM 6358 SPI core */ +/* BCM 6358/6262/6368 SPI core */ #define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ #define SPI_6358_MSG_CTL_WIDTH 16 #define SPI_6358_MSG_DATA 0x02 @@ -1274,23 +1310,6 @@ #define SPI_6358_MSG_TAIL 0x709 #define SPI_6358_RX_TAIL 0x70B -/* BCM 6358 SPI core */ -#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */ -#define SPI_6368_MSG_CTL_WIDTH 16 -#define SPI_6368_MSG_DATA 0x02 -#define SPI_6368_MSG_DATA_SIZE 0x21e -#define SPI_6368_RX_DATA 0x400 -#define SPI_6368_RX_DATA_SIZE 0x220 -#define SPI_6368_CMD 0x700 /* 16-bits register */ -#define SPI_6368_INT_STATUS 0x702 -#define SPI_6368_INT_MASK_ST 0x703 -#define SPI_6368_INT_MASK 0x704 -#define SPI_6368_ST 0x705 -#define SPI_6368_CLK_CFG 0x706 -#define SPI_6368_FILL_BYTE 0x707 -#define SPI_6368_MSG_TAIL 0x709 -#define SPI_6368_RX_TAIL 0x70B - /* Shared SPI definitions */ /* Message configuration */ @@ -1298,10 +1317,8 @@ #define SPI_HD_W 0x01 #define SPI_HD_R 0x02 #define SPI_BYTE_CNT_SHIFT 0 -#define SPI_6338_MSG_TYPE_SHIFT 6 #define SPI_6348_MSG_TYPE_SHIFT 6 #define SPI_6358_MSG_TYPE_SHIFT 14 -#define SPI_6368_MSG_TYPE_SHIFT 14 /* Command */ #define SPI_CMD_NOOP 0x00 @@ -1348,10 +1365,18 @@ /************************************************************************* * _REG relative to RSET_MISC *************************************************************************/ -#define MISC_SERDES_CTRL_REG 0x0 +#define MISC_SERDES_CTRL_6328_REG 0x0 +#define MISC_SERDES_CTRL_6362_REG 0x4 #define SERDES_PCIE_EN (1 << 0) #define SERDES_PCIE_EXD_EN (1 << 15) +#define MISC_STRAPBUS_6362_REG 0x14 +#define STRAPBUS_6362_FCVO_SHIFT 1 +#define STRAPBUS_6362_HSSPI_CLK_FAST (1 << 13) +#define STRAPBUS_6362_FCVO_MASK (0x1f << STRAPBUS_6362_FCVO_SHIFT) +#define STRAPBUS_6362_BOOT_SEL_SERIAL (1 << 15) +#define STRAPBUS_6362_BOOT_SEL_NAND (0 << 15) + #define MISC_STRAPBUS_6328_REG 0x240 #define STRAPBUS_6328_FCVO_SHIFT 7 #define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT) diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h index 30931c42379d..94e3011ba7df 100644 --- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h +++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h @@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset) return 1; break; case BCM6328_CPU_ID: + case BCM6362_CPU_ID: case BCM6368_CPU_ID: if (offset >= 0xb0000000 && offset < 0xb1000000) return 1; diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 9c95177f7a7e..fe23034aaf72 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev) { #ifdef CONFIG_DMA_COHERENT return 1; -#endif -#ifdef CONFIG_DMA_NONCOHERENT - return 0; +#else + return coherentio; #endif } diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h index 73d717a75cb0..5b2f2e68e57f 100644 --- a/arch/mips/include/asm/mach-generic/spaces.h +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -20,14 +20,21 @@ #endif #ifdef CONFIG_32BIT - +#ifdef CONFIG_KVM_GUEST +#define CAC_BASE _AC(0x40000000, UL) +#else #define CAC_BASE _AC(0x80000000, UL) +#endif #define IO_BASE _AC(0xa0000000, UL) #define UNCAC_BASE _AC(0xa0000000, UL) #ifndef MAP_BASE +#ifdef CONFIG_KVM_GUEST +#define MAP_BASE _AC(0x60000000, UL) +#else #define MAP_BASE _AC(0xc0000000, UL) #endif +#endif /* * Memory above this physical address will be considered highmem. diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index 75fd8c0f986e..c0f3ef45c2c1 100644 --- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -57,5 +57,6 @@ #define cpu_has_vint 0 #define cpu_has_vtag_icache 0 #define cpu_has_watch 1 +#define cpu_has_local_ebase 0 #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h new file mode 100644 index 000000000000..9809972ea882 --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/mt7620.h @@ -0,0 +1,84 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#ifndef _MT7620_REGS_H_ +#define _MT7620_REGS_H_ + +#define MT7620_SYSC_BASE 0x10000000 + +#define SYSC_REG_CHIP_NAME0 0x00 +#define SYSC_REG_CHIP_NAME1 0x04 +#define SYSC_REG_CHIP_REV 0x0c +#define SYSC_REG_SYSTEM_CONFIG0 0x10 +#define SYSC_REG_SYSTEM_CONFIG1 0x14 +#define SYSC_REG_CPLL_CONFIG0 0x54 +#define SYSC_REG_CPLL_CONFIG1 0x58 + +#define MT7620N_CHIP_NAME0 0x33365452 +#define MT7620N_CHIP_NAME1 0x20203235 + +#define MT7620A_CHIP_NAME0 0x3637544d +#define MT7620A_CHIP_NAME1 0x20203032 + +#define CHIP_REV_PKG_MASK 0x1 +#define CHIP_REV_PKG_SHIFT 16 +#define CHIP_REV_VER_MASK 0xf +#define CHIP_REV_VER_SHIFT 8 +#define CHIP_REV_ECO_MASK 0xf + +#define CPLL_SW_CONFIG_SHIFT 31 +#define CPLL_SW_CONFIG_MASK 0x1 +#define CPLL_CPU_CLK_SHIFT 24 +#define CPLL_CPU_CLK_MASK 0x1 +#define CPLL_MULT_RATIO_SHIFT 16 +#define CPLL_MULT_RATIO 0x7 +#define CPLL_DIV_RATIO_SHIFT 10 +#define CPLL_DIV_RATIO 0x3 + +#define SYSCFG0_DRAM_TYPE_MASK 0x3 +#define SYSCFG0_DRAM_TYPE_SHIFT 4 +#define SYSCFG0_DRAM_TYPE_SDRAM 0 +#define SYSCFG0_DRAM_TYPE_DDR1 1 +#define SYSCFG0_DRAM_TYPE_DDR2 2 + +#define MT7620_DRAM_BASE 0x0 +#define MT7620_SDRAM_SIZE_MIN 2 +#define MT7620_SDRAM_SIZE_MAX 64 +#define MT7620_DDR1_SIZE_MIN 32 +#define MT7620_DDR1_SIZE_MAX 128 +#define MT7620_DDR2_SIZE_MIN 32 +#define MT7620_DDR2_SIZE_MAX 256 + +#define MT7620_GPIO_MODE_I2C BIT(0) +#define MT7620_GPIO_MODE_UART0_SHIFT 2 +#define MT7620_GPIO_MODE_UART0_MASK 0x7 +#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT) +#define MT7620_GPIO_MODE_UARTF 0x0 +#define MT7620_GPIO_MODE_PCM_UARTF 0x1 +#define MT7620_GPIO_MODE_PCM_I2S 0x2 +#define MT7620_GPIO_MODE_I2S_UARTF 0x3 +#define MT7620_GPIO_MODE_PCM_GPIO 0x4 +#define MT7620_GPIO_MODE_GPIO_UARTF 0x5 +#define MT7620_GPIO_MODE_GPIO_I2S 0x6 +#define MT7620_GPIO_MODE_GPIO 0x7 +#define MT7620_GPIO_MODE_UART1 BIT(5) +#define MT7620_GPIO_MODE_MDIO BIT(8) +#define MT7620_GPIO_MODE_RGMII1 BIT(9) +#define MT7620_GPIO_MODE_RGMII2 BIT(10) +#define MT7620_GPIO_MODE_SPI BIT(11) +#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12) +#define MT7620_GPIO_MODE_WLED BIT(13) +#define MT7620_GPIO_MODE_JTAG BIT(15) +#define MT7620_GPIO_MODE_EPHY BIT(15) +#define MT7620_GPIO_MODE_WDT BIT(22) + +#endif diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h new file mode 100644 index 000000000000..03ad716acb42 --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/rt288x.h @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#ifndef _RT288X_REGS_H_ +#define _RT288X_REGS_H_ + +#define RT2880_SYSC_BASE 0x00300000 + +#define SYSC_REG_CHIP_NAME0 0x00 +#define SYSC_REG_CHIP_NAME1 0x04 +#define SYSC_REG_CHIP_ID 0x0c +#define SYSC_REG_SYSTEM_CONFIG 0x10 +#define SYSC_REG_CLKCFG 0x30 + +#define RT2880_CHIP_NAME0 0x38325452 +#define RT2880_CHIP_NAME1 0x20203038 + +#define CHIP_ID_ID_MASK 0xff +#define CHIP_ID_ID_SHIFT 8 +#define CHIP_ID_REV_MASK 0xff + +#define SYSTEM_CONFIG_CPUCLK_SHIFT 20 +#define SYSTEM_CONFIG_CPUCLK_MASK 0x3 +#define SYSTEM_CONFIG_CPUCLK_250 0x0 +#define SYSTEM_CONFIG_CPUCLK_266 0x1 +#define SYSTEM_CONFIG_CPUCLK_280 0x2 +#define SYSTEM_CONFIG_CPUCLK_300 0x3 + +#define RT2880_GPIO_MODE_I2C BIT(0) +#define RT2880_GPIO_MODE_UART0 BIT(1) +#define RT2880_GPIO_MODE_SPI BIT(2) +#define RT2880_GPIO_MODE_UART1 BIT(3) +#define RT2880_GPIO_MODE_JTAG BIT(4) +#define RT2880_GPIO_MODE_MDIO BIT(5) +#define RT2880_GPIO_MODE_SDRAM BIT(6) +#define RT2880_GPIO_MODE_PCI BIT(7) + +#define CLKCFG_SRAM_CS_N_WDT BIT(9) + +#define RT2880_SDRAM_BASE 0x08000000 +#define RT2880_MEM_SIZE_MIN 2 +#define RT2880_MEM_SIZE_MAX 128 + +#endif diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h new file mode 100644 index 000000000000..72fc10669199 --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h @@ -0,0 +1,56 @@ +/* + * Ralink RT288x specific CPU feature overrides + * + * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H +#define _RT288X_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 + +#define cpu_has_prefetch 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 + +#define cpu_has_mips16 1 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_dcache_line_size() 16 +#define cpu_icache_line_size() 16 + +#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h index 7d344f2d7d0a..069bf37a6010 100644 --- a/arch/mips/include/asm/mach-ralink/rt305x.h +++ b/arch/mips/include/asm/mach-ralink/rt305x.h @@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void) #define RT5350_SYSCFG0_CPUCLK_320 0x2 #define RT5350_SYSCFG0_CPUCLK_300 0x3 +#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT 12 +#define RT5350_SYSCFG0_DRAM_SIZE_MASK 7 +#define RT5350_SYSCFG0_DRAM_SIZE_2M 0 +#define RT5350_SYSCFG0_DRAM_SIZE_8M 1 +#define RT5350_SYSCFG0_DRAM_SIZE_16M 2 +#define RT5350_SYSCFG0_DRAM_SIZE_32M 3 +#define RT5350_SYSCFG0_DRAM_SIZE_64M 4 + /* multi function gpio pins */ #define RT305X_GPIO_I2C_SD 1 #define RT305X_GPIO_I2C_SCLK 2 @@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void) #define RT305X_GPIO_MODE_SDRAM BIT(8) #define RT305X_GPIO_MODE_RGMII BIT(9) +#define RT3352_SYSC_REG_SYSCFG0 0x010 +#define RT3352_SYSC_REG_SYSCFG1 0x014 +#define RT3352_SYSC_REG_CLKCFG1 0x030 +#define RT3352_SYSC_REG_RSTCTRL 0x034 +#define RT3352_SYSC_REG_USB_PS 0x05c + +#define RT3352_CLKCFG0_XTAL_SEL BIT(20) +#define RT3352_CLKCFG1_UPHY0_CLK_EN BIT(18) +#define RT3352_CLKCFG1_UPHY1_CLK_EN BIT(20) +#define RT3352_RSTCTRL_UHST BIT(22) +#define RT3352_RSTCTRL_UDEV BIT(25) +#define RT3352_SYSCFG1_USB0_HOST_MODE BIT(10) + +#define RT305X_SDRAM_BASE 0x00000000 +#define RT305X_MEM_SIZE_MIN 2 +#define RT305X_MEM_SIZE_MAX 64 +#define RT3352_MEM_SIZE_MIN 2 +#define RT3352_MEM_SIZE_MAX 256 + #endif diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h new file mode 100644 index 000000000000..917c28654552 --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h @@ -0,0 +1,56 @@ +/* + * Ralink RT305x specific CPU feature overrides + * + * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H +#define _RT305X_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 + +#define cpu_has_prefetch 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 + +#define cpu_has_mips16 1 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 1 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 + +#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h new file mode 100644 index 000000000000..058382f37f92 --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/rt3883.h @@ -0,0 +1,252 @@ +/* + * Ralink RT3662/RT3883 SoC register definitions + * + * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef _RT3883_REGS_H_ +#define _RT3883_REGS_H_ + +#include <linux/bitops.h> + +#define RT3883_SDRAM_BASE 0x00000000 +#define RT3883_SYSC_BASE 0x10000000 +#define RT3883_TIMER_BASE 0x10000100 +#define RT3883_INTC_BASE 0x10000200 +#define RT3883_MEMC_BASE 0x10000300 +#define RT3883_UART0_BASE 0x10000500 +#define RT3883_PIO_BASE 0x10000600 +#define RT3883_FSCC_BASE 0x10000700 +#define RT3883_NANDC_BASE 0x10000810 +#define RT3883_I2C_BASE 0x10000900 +#define RT3883_I2S_BASE 0x10000a00 +#define RT3883_SPI_BASE 0x10000b00 +#define RT3883_UART1_BASE 0x10000c00 +#define RT3883_PCM_BASE 0x10002000 +#define RT3883_GDMA_BASE 0x10002800 +#define RT3883_CODEC1_BASE 0x10003000 +#define RT3883_CODEC2_BASE 0x10003800 +#define RT3883_FE_BASE 0x10100000 +#define RT3883_ROM_BASE 0x10118000 +#define RT3883_USBDEV_BASE 0x10112000 +#define RT3883_PCI_BASE 0x10140000 +#define RT3883_WLAN_BASE 0x10180000 +#define RT3883_USBHOST_BASE 0x101c0000 +#define RT3883_BOOT_BASE 0x1c000000 +#define RT3883_SRAM_BASE 0x1e000000 +#define RT3883_PCIMEM_BASE 0x20000000 + +#define RT3883_EHCI_BASE (RT3883_USBHOST_BASE) +#define RT3883_OHCI_BASE (RT3883_USBHOST_BASE + 0x1000) + +#define RT3883_SYSC_SIZE 0x100 +#define RT3883_TIMER_SIZE 0x100 +#define RT3883_INTC_SIZE 0x100 +#define RT3883_MEMC_SIZE 0x100 +#define RT3883_UART0_SIZE 0x100 +#define RT3883_UART1_SIZE 0x100 +#define RT3883_PIO_SIZE 0x100 +#define RT3883_FSCC_SIZE 0x100 +#define RT3883_NANDC_SIZE 0x0f0 +#define RT3883_I2C_SIZE 0x100 +#define RT3883_I2S_SIZE 0x100 +#define RT3883_SPI_SIZE 0x100 +#define RT3883_PCM_SIZE 0x800 +#define RT3883_GDMA_SIZE 0x800 +#define RT3883_CODEC1_SIZE 0x800 +#define RT3883_CODEC2_SIZE 0x800 +#define RT3883_FE_SIZE 0x10000 +#define RT3883_ROM_SIZE 0x4000 +#define RT3883_USBDEV_SIZE 0x4000 +#define RT3883_PCI_SIZE 0x40000 +#define RT3883_WLAN_SIZE 0x40000 +#define RT3883_USBHOST_SIZE 0x40000 +#define RT3883_BOOT_SIZE (32 * 1024 * 1024) +#define RT3883_SRAM_SIZE (32 * 1024 * 1024) + +/* SYSC registers */ +#define RT3883_SYSC_REG_CHIPID0_3 0x00 /* Chip ID 0 */ +#define RT3883_SYSC_REG_CHIPID4_7 0x04 /* Chip ID 1 */ +#define RT3883_SYSC_REG_REVID 0x0c /* Chip Revision Identification */ +#define RT3883_SYSC_REG_SYSCFG0 0x10 /* System Configuration 0 */ +#define RT3883_SYSC_REG_SYSCFG1 0x14 /* System Configuration 1 */ +#define RT3883_SYSC_REG_CLKCFG0 0x2c /* Clock Configuration 0 */ +#define RT3883_SYSC_REG_CLKCFG1 0x30 /* Clock Configuration 1 */ +#define RT3883_SYSC_REG_RSTCTRL 0x34 /* Reset Control*/ +#define RT3883_SYSC_REG_RSTSTAT 0x38 /* Reset Status*/ +#define RT3883_SYSC_REG_USB_PS 0x5c /* USB Power saving control */ +#define RT3883_SYSC_REG_GPIO_MODE 0x60 /* GPIO Purpose Select */ +#define RT3883_SYSC_REG_PCIE_CLK_GEN0 0x7c +#define RT3883_SYSC_REG_PCIE_CLK_GEN1 0x80 +#define RT3883_SYSC_REG_PCIE_CLK_GEN2 0x84 +#define RT3883_SYSC_REG_PMU 0x88 +#define RT3883_SYSC_REG_PMU1 0x8c + +#define RT3883_CHIP_NAME0 0x38335452 +#define RT3883_CHIP_NAME1 0x20203338 + +#define RT3883_REVID_VER_ID_MASK 0x0f +#define RT3883_REVID_VER_ID_SHIFT 8 +#define RT3883_REVID_ECO_ID_MASK 0x0f + +#define RT3883_SYSCFG0_DRAM_TYPE_DDR2 BIT(17) +#define RT3883_SYSCFG0_CPUCLK_SHIFT 8 +#define RT3883_SYSCFG0_CPUCLK_MASK 0x3 +#define RT3883_SYSCFG0_CPUCLK_250 0x0 +#define RT3883_SYSCFG0_CPUCLK_384 0x1 +#define RT3883_SYSCFG0_CPUCLK_480 0x2 +#define RT3883_SYSCFG0_CPUCLK_500 0x3 + +#define RT3883_SYSCFG1_USB0_HOST_MODE BIT(10) +#define RT3883_SYSCFG1_PCIE_RC_MODE BIT(8) +#define RT3883_SYSCFG1_PCI_HOST_MODE BIT(7) +#define RT3883_SYSCFG1_PCI_66M_MODE BIT(6) +#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT BIT(2) + +#define RT3883_CLKCFG1_PCIE_CLK_EN BIT(21) +#define RT3883_CLKCFG1_UPHY1_CLK_EN BIT(20) +#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19) +#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18) + +#define RT3883_GPIO_MODE_I2C BIT(0) +#define RT3883_GPIO_MODE_SPI BIT(1) +#define RT3883_GPIO_MODE_UART0_SHIFT 2 +#define RT3883_GPIO_MODE_UART0_MASK 0x7 +#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT) +#define RT3883_GPIO_MODE_UARTF 0x0 +#define RT3883_GPIO_MODE_PCM_UARTF 0x1 +#define RT3883_GPIO_MODE_PCM_I2S 0x2 +#define RT3883_GPIO_MODE_I2S_UARTF 0x3 +#define RT3883_GPIO_MODE_PCM_GPIO 0x4 +#define RT3883_GPIO_MODE_GPIO_UARTF 0x5 +#define RT3883_GPIO_MODE_GPIO_I2S 0x6 +#define RT3883_GPIO_MODE_GPIO 0x7 +#define RT3883_GPIO_MODE_UART1 BIT(5) +#define RT3883_GPIO_MODE_JTAG BIT(6) +#define RT3883_GPIO_MODE_MDIO BIT(7) +#define RT3883_GPIO_MODE_GE1 BIT(9) +#define RT3883_GPIO_MODE_GE2 BIT(10) +#define RT3883_GPIO_MODE_PCI_SHIFT 11 +#define RT3883_GPIO_MODE_PCI_MASK 0x7 +#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT) +#define RT3883_GPIO_MODE_LNA_A_SHIFT 16 +#define RT3883_GPIO_MODE_LNA_A_MASK 0x3 +#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT) +#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3 +#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK) +#define RT3883_GPIO_MODE_LNA_G_SHIFT 18 +#define RT3883_GPIO_MODE_LNA_G_MASK 0x3 +#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT) +#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3 +#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK) + +#define RT3883_GPIO_I2C_SD 1 +#define RT3883_GPIO_I2C_SCLK 2 +#define RT3883_GPIO_SPI_CS0 3 +#define RT3883_GPIO_SPI_CLK 4 +#define RT3883_GPIO_SPI_MOSI 5 +#define RT3883_GPIO_SPI_MISO 6 +#define RT3883_GPIO_7 7 +#define RT3883_GPIO_10 10 +#define RT3883_GPIO_11 11 +#define RT3883_GPIO_14 14 +#define RT3883_GPIO_UART1_TXD 15 +#define RT3883_GPIO_UART1_RXD 16 +#define RT3883_GPIO_JTAG_TDO 17 +#define RT3883_GPIO_JTAG_TDI 18 +#define RT3883_GPIO_JTAG_TMS 19 +#define RT3883_GPIO_JTAG_TCLK 20 +#define RT3883_GPIO_JTAG_TRST_N 21 +#define RT3883_GPIO_MDIO_MDC 22 +#define RT3883_GPIO_MDIO_MDIO 23 +#define RT3883_GPIO_LNA_PE_A0 32 +#define RT3883_GPIO_LNA_PE_A1 33 +#define RT3883_GPIO_LNA_PE_A2 34 +#define RT3883_GPIO_LNA_PE_G0 35 +#define RT3883_GPIO_LNA_PE_G1 36 +#define RT3883_GPIO_LNA_PE_G2 37 +#define RT3883_GPIO_PCI_AD0 40 +#define RT3883_GPIO_PCI_AD31 71 +#define RT3883_GPIO_GE2_TXD0 72 +#define RT3883_GPIO_GE2_TXD1 73 +#define RT3883_GPIO_GE2_TXD2 74 +#define RT3883_GPIO_GE2_TXD3 75 +#define RT3883_GPIO_GE2_TXEN 76 +#define RT3883_GPIO_GE2_TXCLK 77 +#define RT3883_GPIO_GE2_RXD0 78 +#define RT3883_GPIO_GE2_RXD1 79 +#define RT3883_GPIO_GE2_RXD2 80 +#define RT3883_GPIO_GE2_RXD3 81 +#define RT3883_GPIO_GE2_RXDV 82 +#define RT3883_GPIO_GE2_RXCLK 83 +#define RT3883_GPIO_GE1_TXD0 84 +#define RT3883_GPIO_GE1_TXD1 85 +#define RT3883_GPIO_GE1_TXD2 86 +#define RT3883_GPIO_GE1_TXD3 87 +#define RT3883_GPIO_GE1_TXEN 88 +#define RT3883_GPIO_GE1_TXCLK 89 +#define RT3883_GPIO_GE1_RXD0 90 +#define RT3883_GPIO_GE1_RXD1 91 +#define RT3883_GPIO_GE1_RXD2 92 +#define RT3883_GPIO_GE1_RXD3 93 +#define RT3883_GPIO_GE1_RXDV 94 +#define RT3883_GPIO_GE1_RXCLK 95 + +#define RT3883_RSTCTRL_PCIE_PCI_PDM BIT(27) +#define RT3883_RSTCTRL_FLASH BIT(26) +#define RT3883_RSTCTRL_UDEV BIT(25) +#define RT3883_RSTCTRL_PCI BIT(24) +#define RT3883_RSTCTRL_PCIE BIT(23) +#define RT3883_RSTCTRL_UHST BIT(22) +#define RT3883_RSTCTRL_FE BIT(21) +#define RT3883_RSTCTRL_WLAN BIT(20) +#define RT3883_RSTCTRL_UART1 BIT(29) +#define RT3883_RSTCTRL_SPI BIT(18) +#define RT3883_RSTCTRL_I2S BIT(17) +#define RT3883_RSTCTRL_I2C BIT(16) +#define RT3883_RSTCTRL_NAND BIT(15) +#define RT3883_RSTCTRL_DMA BIT(14) +#define RT3883_RSTCTRL_PIO BIT(13) +#define RT3883_RSTCTRL_UART BIT(12) +#define RT3883_RSTCTRL_PCM BIT(11) +#define RT3883_RSTCTRL_MC BIT(10) +#define RT3883_RSTCTRL_INTC BIT(9) +#define RT3883_RSTCTRL_TIMER BIT(8) +#define RT3883_RSTCTRL_SYS BIT(0) + +#define RT3883_INTC_INT_SYSCTL BIT(0) +#define RT3883_INTC_INT_TIMER0 BIT(1) +#define RT3883_INTC_INT_TIMER1 BIT(2) +#define RT3883_INTC_INT_IA BIT(3) +#define RT3883_INTC_INT_PCM BIT(4) +#define RT3883_INTC_INT_UART0 BIT(5) +#define RT3883_INTC_INT_PIO BIT(6) +#define RT3883_INTC_INT_DMA BIT(7) +#define RT3883_INTC_INT_NAND BIT(8) +#define RT3883_INTC_INT_PERFC BIT(9) +#define RT3883_INTC_INT_I2S BIT(10) +#define RT3883_INTC_INT_UART1 BIT(12) +#define RT3883_INTC_INT_UHST BIT(18) +#define RT3883_INTC_INT_UDEV BIT(19) + +/* FLASH/SRAM/Codec Controller registers */ +#define RT3883_FSCC_REG_FLASH_CFG0 0x00 +#define RT3883_FSCC_REG_FLASH_CFG1 0x04 +#define RT3883_FSCC_REG_CODEC_CFG0 0x40 +#define RT3883_FSCC_REG_CODEC_CFG1 0x44 + +#define RT3883_FLASH_CFG_WIDTH_SHIFT 26 +#define RT3883_FLASH_CFG_WIDTH_MASK 0x3 +#define RT3883_FLASH_CFG_WIDTH_8BIT 0x0 +#define RT3883_FLASH_CFG_WIDTH_16BIT 0x1 +#define RT3883_FLASH_CFG_WIDTH_32BIT 0x2 + +#define RT3883_SDRAM_BASE 0x00000000 +#define RT3883_MEM_SIZE_MIN 2 +#define RT3883_MEM_SIZE_MAX 256 + +#endif /* _RT3883_REGS_H_ */ diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h new file mode 100644 index 000000000000..181fbf4c976f --- /dev/null +++ b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h @@ -0,0 +1,55 @@ +/* + * Ralink RT3662/RT3883 specific CPU feature overrides + * + * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org> + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H +#define _RT3883_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 + +#define cpu_has_prefetch 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 + +#define cpu_has_mips16 1 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 1 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 + +#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index 193c0912d38e..bfbd7035d4c5 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,7 +28,11 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ +#ifdef CONFIG_CPU_MICROMIPS +#define cpu_has_llsc 0 +#else #define cpu_has_llsc 1 +#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h index 44a09a64160a..bd9746fbe4af 100644 --- a/arch/mips/include/asm/mips-boards/generic.h +++ b/arch/mips/include/asm/mips-boards/generic.h @@ -83,4 +83,7 @@ extern void mips_pcibios_init(void); #define mips_pcibios_init() do { } while (0) #endif +extern void mips_scroll_message(void); +extern void mips_display_message(const char *str); + #endif /* __ASM_MIPS_BOARDS_GENERIC_H */ diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h deleted file mode 100644 index e7aed3e4ff58..000000000000 --- a/arch/mips/include/asm/mips-boards/prom.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * MIPS boards bootprom interface for the Linux kernel. - * - */ - -#ifndef _MIPS_PROM_H -#define _MIPS_PROM_H - -extern char *prom_getcmdline(void); -extern char *prom_getenv(char *name); -extern void prom_init_cmdline(void); -extern void prom_meminit(void); -extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem); -extern void mips_display_message(const char *str); -extern void mips_display_word(unsigned int num); -extern void mips_scroll_message(void); -extern int get_ethernet_addr(char *ethernet_addr); - -/* Memory descriptor management. */ -#define PROM_MAX_PMEMBLOCKS 32 -struct prom_pmemblock { - unsigned long base; /* Within KSEG0. */ - unsigned int size; /* In bytes. */ - unsigned int type; /* free or prom memory */ -}; - -#endif /* !(_MIPS_PROM_H) */ diff --git a/arch/mips/include/asm/mips_machine.h b/arch/mips/include/asm/mips_machine.h index 363bb352c7f7..9d00aebe9842 100644 --- a/arch/mips/include/asm/mips_machine.h +++ b/arch/mips/include/asm/mips_machine.h @@ -42,13 +42,9 @@ extern long __mips_machines_end; #ifdef CONFIG_MIPS_MACHINE int mips_machtype_setup(char *id) __init; void mips_machine_setup(void) __init; -void mips_set_machine_name(const char *name) __init; -char *mips_get_machine_name(void); #else static inline int mips_machtype_setup(char *id) { return 1; } static inline void mips_machine_setup(void) { } -static inline void mips_set_machine_name(const char *name) { } -static inline char *mips_get_machine_name(void) { return NULL; } #endif /* CONFIG_MIPS_MACHINE */ #endif /* __ASM_MIPS_MACHINE_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 0da44d422f5b..87e6207b05e4 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -596,6 +596,7 @@ #define MIPS_CONF3_RXI (_ULCAST_(1) << 12) #define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) #define MIPS_CONF3_ISA (_ULCAST_(3) << 14) +#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16) #define MIPS_CONF3_VZ (_ULCAST_(1) << 23) #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) @@ -623,6 +624,24 @@ #ifndef __ASSEMBLY__ /* + * Macros for handling the ISA mode bit for microMIPS. + */ +#define get_isa16_mode(x) ((x) & 0x1) +#define msk_isa16_mode(x) ((x) & ~0x1) +#define set_isa16_mode(x) do { (x) |= 0x1; } while(0) + +/* + * microMIPS instructions can be 16-bit or 32-bit in length. This + * returns a 1 if the instruction is 16-bit and a 0 if 32-bit. + */ +static inline int mm_insn_16bit(u16 insn) +{ + u16 opcode = (insn >> 10) & 0x7; + + return (opcode >= 1 && opcode <= 3) ? 1 : 0; +} + +/* * Functions to access the R10000 performance counters. These are basically * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit * performance counter number encoded into bits 1 ... 5 of the instruction. diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index e81d719efcd1..1554721e4808 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -26,10 +26,15 @@ #ifdef CONFIG_MIPS_PGD_C0_CONTEXT -#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ - tlbmiss_handler_setup_pgd((unsigned long)(pgd)) - -extern void tlbmiss_handler_setup_pgd(unsigned long pgd); +#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ +do { \ + void (*tlbmiss_handler_setup_pgd)(unsigned long); \ + extern u32 tlbmiss_handler_setup_pgd_array[16]; \ + \ + tlbmiss_handler_setup_pgd = \ + (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \ + tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ +} while (0) #define TLBMISS_HANDLER_SETUP() \ do { \ @@ -62,59 +67,88 @@ extern unsigned long pgd_current[]; TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) #endif #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ -#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) - -#define ASID_INC 0x40 -#define ASID_MASK 0xfc0 - -#elif defined(CONFIG_CPU_R8000) - -#define ASID_INC 0x10 -#define ASID_MASK 0xff0 -#elif defined(CONFIG_MIPS_MT_SMTC) - -#define ASID_INC 0x1 -extern unsigned long smtc_asid_mask; -#define ASID_MASK (smtc_asid_mask) -#define HW_ASID_MASK 0xff -/* End SMTC/34K debug hack */ -#else /* FIXME: not correct for R6000 */ - -#define ASID_INC 0x1 -#define ASID_MASK 0xff +#define ASID_INC(asid) \ +({ \ + unsigned long __asid = asid; \ + __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ + ".section\t__asid_inc,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid) \ + :"0" (__asid)); \ + __asid; \ +}) +#define ASID_MASK(asid) \ +({ \ + unsigned long __asid = asid; \ + __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ + ".section\t__asid_mask,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid) \ + :"r" (__asid)); \ + __asid; \ +}) +#define ASID_VERSION_MASK \ +({ \ + unsigned long __asid; \ + __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \ + ".section\t__asid_version_mask,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid)); \ + __asid; \ +}) +#define ASID_FIRST_VERSION \ +({ \ + unsigned long __asid = asid; \ + __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \ + ".section\t__asid_first_version,\"a\"\n\t" \ + ".word\t1b\n\t" \ + ".previous" \ + :"=r" (__asid)); \ + __asid; \ +}) + +#define ASID_FIRST_VERSION_R3000 0x1000 +#define ASID_FIRST_VERSION_R4000 0x100 +#define ASID_FIRST_VERSION_R8000 0x1000 +#define ASID_FIRST_VERSION_RM9000 0x1000 +#ifdef CONFIG_MIPS_MT_SMTC +#define SMTC_HW_ASID_MASK 0xff +extern unsigned int smtc_asid_mask; #endif #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) +#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) #define asid_cache(cpu) (cpu_data[cpu].asid_cache) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -/* - * All unused by hardware upper bits will be considered - * as a software asid extension. - */ -#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) -#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) - #ifndef CONFIG_MIPS_MT_SMTC /* Normal, classic MIPS get_new_mmu_context */ static inline void get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { + extern void kvm_local_flush_tlb_all(void); unsigned long asid = asid_cache(cpu); - if (! ((asid += ASID_INC) & ASID_MASK) ) { + if (!ASID_MASK((asid = ASID_INC(asid)))) { if (cpu_has_vtag_icache) flush_icache_all(); +#ifdef CONFIG_VIRTUALIZATION + kvm_local_flush_tlb_all(); /* start new asid cycle */ +#else local_flush_tlb_all(); /* start new asid cycle */ +#endif if (!asid) /* fix version if needed */ asid = ASID_FIRST_VERSION; } + cpu_context(cpu, mm) = asid_cache(cpu) = asid; } @@ -133,7 +167,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for_each_online_cpu(i) + for_each_possible_cpu(i) cpu_context(i, mm) = 0; return 0; @@ -166,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * free up the ASID value for use and flush any old * instances of it from the TLB. */ - oldasid = (read_c0_entryhi() & ASID_MASK); + oldasid = ASID_MASK(read_c0_entryhi()); if(smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) @@ -177,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * having ASID_MASK smaller than the hardware maximum, * make sure no "soft" bits become "hard"... */ - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | cpu_asid(cpu, next)); ehb(); /* Make sure it propagates to TCStatus */ evpe(mtflags); @@ -230,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) #ifdef CONFIG_MIPS_MT_SMTC /* See comments for similar code above */ mtflags = dvpe(); - oldasid = read_c0_entryhi() & ASID_MASK; + oldasid = ASID_MASK(read_c0_entryhi()); if(smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | - cpu_asid(cpu, next)); + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | + cpu_asid(cpu, next)); ehb(); /* Make sure it propagates to TCStatus */ evpe(mtflags); #else @@ -275,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) #ifdef CONFIG_MIPS_MT_SMTC /* See comments for similar code above */ prevvpe = dvpe(); - oldasid = (read_c0_entryhi() & ASID_MASK); + oldasid = ASID_MASK(read_c0_entryhi()); if (smtc_live_asid[mytlb][oldasid]) { smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); if(smtc_live_asid[mytlb][oldasid] == 0) smtc_flush_tlb_asid(oldasid); } /* See comments for similar code above */ - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | cpu_asid(cpu, mm)); ehb(); /* Make sure it propagates to TCStatus */ evpe(prevvpe); diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h index 419d8aef8569..79c7cccdc22c 100644 --- a/arch/mips/include/asm/netlogic/haldefs.h +++ b/arch/mips/include/asm/netlogic/haldefs.h @@ -35,42 +35,13 @@ #ifndef __NLM_HAL_HALDEFS_H__ #define __NLM_HAL_HALDEFS_H__ +#include <linux/irqflags.h> /* for local_irq_disable */ + /* * This file contains platform specific memory mapped IO implementation * and will provide a way to read 32/64 bit memory mapped registers in * all ABIs */ -#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP) -#error "o32 compile not supported on XLP yet" -#endif -/* - * For o32 compilation, we have to disable interrupts and enable KX bit to - * access 64 bit addresses or data. - * - * We need to disable interrupts because we save just the lower 32 bits of - * registers in interrupt handling. So if we get hit by an interrupt while - * using the upper 32 bits of a register, we lose. - */ -static inline uint32_t nlm_save_flags_kx(void) -{ - return change_c0_status(ST0_KX | ST0_IE, ST0_KX); -} - -static inline uint32_t nlm_save_flags_cop2(void) -{ - return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2); -} - -static inline void nlm_restore_flags(uint32_t sr) -{ - write_c0_status(sr); -} - -/* - * The n64 implementations are simple, the o32 implementations when they - * are added, will have to disable interrupts and enable KX before doing - * 64 bit ops. - */ static inline uint32_t nlm_read_reg(uint64_t base, uint32_t reg) { @@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val) *addr = val; } +/* + * For o32 compilation, we have to disable interrupts to access 64 bit + * registers + * + * We need to disable interrupts because we save just the lower 32 bits of + * registers in interrupt handling. So if we get hit by an interrupt while + * using the upper 32 bits of a register, we lose. + */ + static inline uint64_t nlm_read_reg64(uint64_t base, uint32_t reg) { uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; - - return *ptr; + uint64_t val; + + if (sizeof(unsigned long) == 4) { + unsigned long flags; + + local_irq_save(flags); + __asm__ __volatile__( + ".set push" "\n\t" + ".set mips64" "\n\t" + "ld %L0, %1" "\n\t" + "dsra32 %M0, %L0, 0" "\n\t" + "sll %L0, %L0, 0" "\n\t" + ".set pop" "\n" + : "=r" (val) + : "m" (*ptr)); + local_irq_restore(flags); + } else + val = *ptr; + + return val; } static inline void @@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val) uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; - *ptr = val; + if (sizeof(unsigned long) == 4) { + unsigned long flags; + uint64_t tmp; + + local_irq_save(flags); + __asm__ __volatile__( + ".set push" "\n\t" + ".set mips64" "\n\t" + "dsll32 %L0, %L0, 0" "\n\t" + "dsrl32 %L0, %L0, 0" "\n\t" + "dsll32 %M0, %M0, 0" "\n\t" + "or %L0, %L0, %M0" "\n\t" + "sd %L0, %2" "\n\t" + ".set pop" "\n" + : "=r" (tmp) + : "0" (val), "m" (*ptr)); + local_irq_restore(flags); + } else + *ptr = val; } /* @@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset) return nlm_io_base + devoffset; } -static inline uint64_t -nlm_xkphys_map_pcibar0(uint64_t pcibase) -{ - uint64_t paddr; - - paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu; - return (uint64_t)0x9000000000000000 | paddr; -} #elif defined(CONFIG_CPU_XLR) static inline uint64_t diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h index 8ad2e0f81719..f299d31d7c1a 100644 --- a/arch/mips/include/asm/netlogic/mips-extns.h +++ b/arch/mips/include/asm/netlogic/mips-extns.h @@ -38,21 +38,16 @@ /* * XLR and XLP interrupt request and interrupt mask registers */ -#define read_c0_eirr() __read_64bit_c0_register($9, 6) -#define read_c0_eimr() __read_64bit_c0_register($9, 7) -#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) - /* - * Writing EIMR in 32 bit is a special case, the lower 8 bit of the - * EIMR is shadowed in the status register, so we cannot save and - * restore status register for split read. + * NOTE: Do not save/restore flags around write_c0_eimr(). + * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS + * register. Restoring flags will overwrite the lower 8 bits of EIMR. + * + * Call with interrupts disabled. */ #define write_c0_eimr(val) \ do { \ if (sizeof(unsigned long) == 4) { \ - unsigned long __flags; \ - \ - local_irq_save(__flags); \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ "dsll\t%L0, %L0, 32\n\t" \ @@ -62,8 +57,6 @@ do { \ "dmtc0\t%L0, $9, 7\n\t" \ ".set\tmips0" \ : : "r" (val)); \ - __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ - local_irq_restore(__flags); \ } else \ __write_64bit_c0_register($9, 7, (val)); \ } while (0) @@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void) uint64_t val; #ifdef CONFIG_64BIT - val = read_c0_eimr() & read_c0_eirr(); + val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7); #else __asm__ __volatile__( ".set push\n\t" @@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void) ".set pop" : "=r" (val)); #endif - return val; } diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h index 3df53017fe51..a981f4681a15 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h @@ -191,59 +191,6 @@ #define PIC_IRT_PCIE_LINK_2_INDEX 80 #define PIC_IRT_PCIE_LINK_3_INDEX 81 #define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) -/* 78 to 81 */ -#define PIC_NUM_NA_IRTS 32 -/* 82 to 113 */ -#define PIC_IRT_NA_0_INDEX 82 -#define PIC_IRT_NA_INDEX(num) ((num) + PIC_IRT_NA_0_INDEX) -#define PIC_IRT_POE_INDEX 114 - -#define PIC_NUM_USB_IRTS 6 -#define PIC_IRT_USB_0_INDEX 115 -#define PIC_IRT_EHCI_0_INDEX 115 -#define PIC_IRT_OHCI_0_INDEX 116 -#define PIC_IRT_OHCI_1_INDEX 117 -#define PIC_IRT_EHCI_1_INDEX 118 -#define PIC_IRT_OHCI_2_INDEX 119 -#define PIC_IRT_OHCI_3_INDEX 120 -#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX) -/* 115 to 120 */ -#define PIC_IRT_GDX_INDEX 121 -#define PIC_IRT_SEC_INDEX 122 -#define PIC_IRT_RSA_INDEX 123 - -#define PIC_NUM_COMP_IRTS 4 -#define PIC_IRT_COMP_0_INDEX 124 -#define PIC_IRT_COMP_INDEX(num) ((num) + PIC_IRT_COMP_0_INDEX) -/* 124 to 127 */ -#define PIC_IRT_GBU_INDEX 128 -#define PIC_IRT_ICC_0_INDEX 129 /* ICC - Inter Chip Coherency */ -#define PIC_IRT_ICC_1_INDEX 130 -#define PIC_IRT_ICC_2_INDEX 131 -#define PIC_IRT_CAM_INDEX 132 -#define PIC_IRT_UART_0_INDEX 133 -#define PIC_IRT_UART_1_INDEX 134 -#define PIC_IRT_I2C_0_INDEX 135 -#define PIC_IRT_I2C_1_INDEX 136 -#define PIC_IRT_SYS_0_INDEX 137 -#define PIC_IRT_SYS_1_INDEX 138 -#define PIC_IRT_JTAG_INDEX 139 -#define PIC_IRT_PIC_INDEX 140 -#define PIC_IRT_NBU_INDEX 141 -#define PIC_IRT_TCU_INDEX 142 -#define PIC_IRT_GCU_INDEX 143 /* GBC - Global Coherency */ -#define PIC_IRT_DMC_0_INDEX 144 -#define PIC_IRT_DMC_1_INDEX 145 - -#define PIC_NUM_GPIO_IRTS 4 -#define PIC_IRT_GPIO_0_INDEX 146 -#define PIC_IRT_GPIO_INDEX(num) ((num) + PIC_IRT_GPIO_0_INDEX) - -/* 146 to 149 */ -#define PIC_IRT_NOR_INDEX 150 -#define PIC_IRT_NAND_INDEX 151 -#define PIC_IRT_SPI_INDEX 152 -#define PIC_IRT_MMC_INDEX 153 #define PIC_CLOCK_TIMER 7 #define PIC_IRQ_BASE 8 diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h deleted file mode 100644 index a9cd350dfb6c..000000000000 --- a/arch/mips/include/asm/netlogic/xlp-hal/usb.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the Broadcom - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __NLM_HAL_USB_H__ -#define __NLM_HAL_USB_H__ - -#define USB_CTL_0 0x01 -#define USB_PHY_0 0x0A -#define USB_PHY_RESET 0x01 -#define USB_PHY_PORT_RESET_0 0x10 -#define USB_PHY_PORT_RESET_1 0x20 -#define USB_CONTROLLER_RESET 0x01 -#define USB_INT_STATUS 0x0E -#define USB_INT_EN 0x0F -#define USB_PHY_INTERRUPT_EN 0x01 -#define USB_OHCI_INTERRUPT_EN 0x02 -#define USB_OHCI_INTERRUPT1_EN 0x04 -#define USB_OHCI_INTERRUPT2_EN 0x08 -#define USB_CTRL_INTERRUPT_EN 0x10 - -#ifndef __ASSEMBLY__ - -#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r) -#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v) -#define nlm_get_usb_pcibase(node, inst) \ - nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst)) -#define nlm_get_usb_hcd_base(node, inst) \ - nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst)) -#define nlm_get_usb_regbase(node, inst) \ - (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ) - -#endif -#endif /* __NLM_HAL_USB_H__ */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index fdc62fb5630d..8b8f6b393363 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -8,6 +8,7 @@ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H +#include <linux/mm_types.h> #include <linux/mmzone.h> #ifdef CONFIG_32BIT #include <asm/pgtable-32.h> diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 2a5fa7abb346..71686c897dea 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count; #define SPECIAL_PAGES_SIZE PAGE_SIZE #ifdef CONFIG_32BIT +#ifdef CONFIG_KVM_GUEST +/* User space process size is limited to 1GB in KVM Guest Mode */ +#define TASK_SIZE 0x3fff8000UL +#else /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ #define TASK_SIZE 0x7fff8000UL +#endif #ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index 8808bf548b99..1e7e0961064b 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph); static inline void device_tree_init(void) { } #endif /* CONFIG_OF */ +extern char *mips_get_machine_name(void); +extern void mips_set_machine_name(const char *name); + #endif /* __ASM_PROM_H */ diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h index 1a2c3025bf28..fdfae43d8b99 100644 --- a/arch/mips/include/asm/sn/sn_private.h +++ b/arch/mips/include/asm/sn/sn_private.h @@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice); extern void install_ipi(void); extern void setup_replication_mask(void); extern void replicate_kernel_text(void); -extern pfn_t node_getfirstfree(cnodeid_t); +extern unsigned long node_getfirstfree(cnodeid_t); #endif /* __ASM_SN_SN_PRIVATE_H */ diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h index c4813d67aec3..6d24d4e8b9ed 100644 --- a/arch/mips/include/asm/sn/types.h +++ b/arch/mips/include/asm/sn/types.h @@ -19,7 +19,6 @@ typedef signed char partid_t; /* partition ID type */ typedef signed short moduleid_t; /* user-visible module number type */ typedef signed short cmoduleid_t; /* kernel compact module id type */ typedef unsigned char clusterid_t; /* Clusterid of the cell */ -typedef unsigned long pfn_t; typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5130c88d6420..78d201fb6c87 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " nop \n" " srl %[my_ticket], %[ticket], 16 \n" " andi %[ticket], %[ticket], 0xffff \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" "2: \n" @@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " beqz %[my_ticket], 1b \n" " srl %[my_ticket], %[ticket], 16 \n" " andi %[ticket], %[ticket], 0xffff \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" "2: \n" @@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " \n" "1: ll %[ticket], %[ticket_ptr] \n" " srl %[my_ticket], %[ticket], 16 \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " andi %[now_serving], %[ticket], 0xffff \n" " bne %[my_ticket], %[now_serving], 3f \n" " addu %[ticket], %[ticket], %[inc] \n" @@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " \n" "1: ll %[ticket], %[ticket_ptr] \n" " srl %[my_ticket], %[ticket], 16 \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " andi %[now_serving], %[ticket], 0xffff \n" " bne %[my_ticket], %[now_serving], 3f \n" " addu %[ticket], %[ticket], %[inc] \n" @@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_read_lock \n" - "1: ll %1, %2 \n" - " bltz %1, 3f \n" - " addu %1, 1 \n" - "2: sc %1, %0 \n" - " beqz %1, 1b \n" - " nop \n" - " .subsection 2 \n" - "3: ll %1, %2 \n" - " bltz %1, 3b \n" - " addu %1, 1 \n" - " b 2b \n" - " nop \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + "1: ll %1, %2 # arch_read_lock \n" + " bltz %1, 1b \n" + " addu %1, 1 \n" + "2: sc %1, %0 \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); } smp_llsc_mb(); @@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_read_unlock \n" - "1: ll %1, %2 \n" - " sub %1, 1 \n" - " sc %1, %0 \n" - " beqz %1, 2f \n" - " nop \n" - " .subsection 2 \n" - "2: b 1b \n" - " nop \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + "1: ll %1, %2 # arch_read_unlock \n" + " sub %1, 1 \n" + " sc %1, %0 \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); } } @@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_write_lock \n" - "1: ll %1, %2 \n" - " bnez %1, 3f \n" - " lui %1, 0x8000 \n" - "2: sc %1, %0 \n" - " beqz %1, 3f \n" - " nop \n" - " .subsection 2 \n" - "3: ll %1, %2 \n" - " bnez %1, 3b \n" - " lui %1, 0x8000 \n" - " b 2b \n" - " nop \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + "1: ll %1, %2 # arch_write_lock \n" + " bnez %1, 1b \n" + " lui %1, 0x8000 \n" + "2: sc %1, %0 \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); } smp_llsc_mb(); @@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_write_trylock \n" - " li %2, 0 \n" - "1: ll %1, %3 \n" - " bnez %1, 2f \n" - " lui %1, 0x8000 \n" - " sc %1, %0 \n" - " beqz %1, 3f \n" - " li %2, 1 \n" - "2: \n" - __WEAK_LLSC_MB - " .subsection 2 \n" - "3: b 1b \n" - " li %2, 0 \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + " ll %1, %3 # arch_write_trylock \n" + " li %2, 0 \n" + " bnez %1, 2f \n" + " lui %1, 0x8000 \n" + " sc %1, %0 \n" + " li %2, 1 \n" + "2: \n" + : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); + + smp_llsc_mb(); } return ret; diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index c99384018161..a89d1b10d027 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -139,7 +139,7 @@ 1: move ra, k0 li k0, 3 mtc0 k0, $22 -#endif /* CONFIG_CPU_LOONGSON2F */ +#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(kernelsp) #else @@ -189,6 +189,7 @@ LONG_S $0, PT_R0(sp) mfc0 v1, CP0_STATUS LONG_S $2, PT_R2(sp) + LONG_S v1, PT_STATUS(sp) #ifdef CONFIG_MIPS_MT_SMTC /* * Ideally, these instructions would be shuffled in @@ -200,21 +201,20 @@ LONG_S k0, PT_TCSTATUS(sp) #endif /* CONFIG_MIPS_MT_SMTC */ LONG_S $4, PT_R4(sp) - LONG_S $5, PT_R5(sp) - LONG_S v1, PT_STATUS(sp) mfc0 v1, CP0_CAUSE - LONG_S $6, PT_R6(sp) - LONG_S $7, PT_R7(sp) + LONG_S $5, PT_R5(sp) LONG_S v1, PT_CAUSE(sp) + LONG_S $6, PT_R6(sp) MFC0 v1, CP0_EPC + LONG_S $7, PT_R7(sp) #ifdef CONFIG_64BIT LONG_S $8, PT_R8(sp) LONG_S $9, PT_R9(sp) #endif + LONG_S v1, PT_EPC(sp) LONG_S $25, PT_R25(sp) LONG_S $28, PT_R28(sp) LONG_S $31, PT_R31(sp) - LONG_S v1, PT_EPC(sp) ori $28, sp, _THREAD_MASK xori $28, _THREAD_MASK #ifdef CONFIG_CPU_CAVIUM_OCTEON diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 178f7924149a..895320e25662 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -58,8 +58,12 @@ struct thread_info { #define init_stack (init_thread_union.stack) /* How to get the thread information struct from C. */ -register struct thread_info *__current_thread_info __asm__("$28"); -#define current_thread_info() __current_thread_info +static inline struct thread_info *current_thread_info(void) +{ + register struct thread_info *__current_thread_info __asm__("$28"); + + return __current_thread_info; +} #endif /* !__ASSEMBLY__ */ diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index debc8009bd58..2d7b9df4542d 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h @@ -52,13 +52,15 @@ extern int (*perf_irq)(void); */ extern unsigned int __weak get_c0_compare_int(void); extern int r4k_clockevent_init(void); +extern int smtc_clockevent_init(void); +extern int gic_clockevent_init(void); static inline int mips_clockevent_init(void) { #ifdef CONFIG_MIPS_MT_SMTC - extern int smtc_clockevent_init(void); - return smtc_clockevent_init(); +#elif defined(CONFIG_CEVT_GIC) + return (gic_clockevent_init() | r4k_clockevent_init()); #elif defined(CONFIG_CEVT_R4K) return r4k_clockevent_init(); #else @@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void) /* * Initialize the count register as a clocksource */ -#ifdef CONFIG_CSRC_R4K extern int init_r4k_clocksource(void); -#endif static inline int init_mips_clocksource(void) { diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index bd87e36bf26a..f3fa3750f577 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -23,7 +23,11 @@ */ #ifdef CONFIG_32BIT -#define __UA_LIMIT 0x80000000UL +#ifdef CONFIG_KVM_GUEST +#define __UA_LIMIT 0x40000000UL +#else +#define __UA_LIMIT 0x80000000UL +#endif #define __UA_ADDR ".word" #define __UA_LA "la" @@ -55,8 +59,13 @@ extern u64 __ua_limit; * address in this range it's the process's problem, not ours :-) */ +#ifdef CONFIG_KVM_GUEST +#define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) +#define USER_DS ((mm_segment_t) { 0xC0000000UL }) +#else #define KERNEL_DS ((mm_segment_t) { 0UL }) #define USER_DS ((mm_segment_t) { __UA_LIMIT }) +#endif #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -261,6 +270,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -287,7 +297,9 @@ do { \ __asm__ __volatile__( \ "1: lw %1, (%3) \n" \ "2: lw %D1, 4(%3) \n" \ - "3: .section .fixup,\"ax\" \n" \ + "3: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \ " move %D1, $0 \n" \ @@ -355,6 +367,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_asm\n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -373,6 +386,7 @@ do { \ "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ "2: sw %D2, 4(%3) \n" \ "3: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " j 3b \n" \ @@ -524,6 +538,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -549,7 +564,9 @@ do { \ "1: ulw %1, (%3) \n" \ "2: ulw %D1, 4(%3) \n" \ " move %0, $0 \n" \ - "3: .section .fixup,\"ax\" \n" \ + "3: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \ " move %D1, $0 \n" \ @@ -616,6 +633,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -634,6 +652,7 @@ do { \ "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ "2: sw %D2, 4(%3) \n" \ "3: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " j 3b \n" \ diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index 058e941626a6..370d967725c2 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -6,7 +6,7 @@ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2012 MIPS Technologies, Inc. + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ #include <linux/types.h> @@ -22,44 +22,75 @@ #define UASM_EXPORT_SYMBOL(sym) #endif +#define _UASM_ISA_CLASSIC 0 +#define _UASM_ISA_MICROMIPS 1 + +#ifndef UASM_ISA +#ifdef CONFIG_CPU_MICROMIPS +#define UASM_ISA _UASM_ISA_MICROMIPS +#else +#define UASM_ISA _UASM_ISA_CLASSIC +#endif +#endif + +#if (UASM_ISA == _UASM_ISA_CLASSIC) +#ifdef CONFIG_CPU_MICROMIPS +#define ISAOPC(op) CL_uasm_i##op +#define ISAFUNC(x) CL_##x +#else +#define ISAOPC(op) uasm_i##op +#define ISAFUNC(x) x +#endif +#elif (UASM_ISA == _UASM_ISA_MICROMIPS) +#ifdef CONFIG_CPU_MICROMIPS +#define ISAOPC(op) uasm_i##op +#define ISAFUNC(x) x +#else +#define ISAOPC(op) MM_uasm_i##op +#define ISAFUNC(x) MM_##x +#endif +#else +#error Unsupported micro-assembler ISA!!! +#endif + #define Ip_u1u2u3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u2u1u3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u3u1u2(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u1u2s3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2s3u1(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) #define Ip_u2u1s3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2u1msbu3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ unsigned int d) #define Ip_u1u2(op) \ -void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b) +void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) #define Ip_u1s2(op) \ -void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b) +void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b) -#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a) +#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a) -#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf) +#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf) Ip_u2u1s3(_addiu); Ip_u3u1u2(_addu); @@ -132,19 +163,20 @@ struct uasm_label { int lab; }; -void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); +void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, + int lid); #ifdef CONFIG_64BIT -int uasm_in_compat_space_p(long addr); +int ISAFUNC(uasm_in_compat_space_p)(long addr); #endif -int uasm_rel_hi(long val); -int uasm_rel_lo(long val); -void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); -void UASM_i_LA(u32 **buf, unsigned int rs, long addr); +int ISAFUNC(uasm_rel_hi)(long val); +int ISAFUNC(uasm_rel_lo)(long val); +void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr); +void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr); #define UASM_L_LA(lb) \ -static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ +static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \ { \ - uasm_build_label(lab, addr, label##lb); \ + ISAFUNC(uasm_build_label)(lab, addr, label##lb); \ } /* convenience macros for instructions */ @@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) - uasm_i_drotr(p, a1, a2, a3); + ISAOPC(_drotr)(p, a1, a2, a3); else - uasm_i_drotr32(p, a1, a2, a3 - 32); + ISAOPC(_drotr32)(p, a1, a2, a3 - 32); } static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) - uasm_i_dsll(p, a1, a2, a3); + ISAOPC(_dsll)(p, a1, a2, a3); else - uasm_i_dsll32(p, a1, a2, a3 - 32); + ISAOPC(_dsll32)(p, a1, a2, a3 - 32); } static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) - uasm_i_dsrl(p, a1, a2, a3); + ISAOPC(_dsrl)(p, a1, a2, a3); else - uasm_i_dsrl32(p, a1, a2, a3 - 32); + ISAOPC(_dsrl32)(p, a1, a2, a3 - 32); } /* Handle relocations. */ diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 4d078815eaa5..0f4aec2ad1e6 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -7,6 +7,7 @@ * * Copyright (C) 1996, 2000 by Ralf Baechle * Copyright (C) 2006 by Thiemo Seufer + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ #ifndef _UAPI_ASM_INST_H #define _UAPI_ASM_INST_H @@ -193,6 +194,282 @@ enum lx_func { }; /* + * (microMIPS) Major opcodes. + */ +enum mm_major_op { + mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op, + mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op, + mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op, + mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op, + mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op, + mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op, + mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op, + mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op, + mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op, + mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op, + mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op, + mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op, + mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op, + mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op, + mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op, + mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op, +}; + +/* + * (microMIPS) POOL32I minor opcodes. + */ +enum mm_32i_minor_op { + mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op, + mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op, + mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op, + mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op, + mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op, + mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op, + mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op, + mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op, + mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op, +}; + +/* + * (microMIPS) POOL32A minor opcodes. + */ +enum mm_32a_minor_op { + mm_sll32_op = 0x000, + mm_ins_op = 0x00c, + mm_ext_op = 0x02c, + mm_pool32axf_op = 0x03c, + mm_srl32_op = 0x040, + mm_sra_op = 0x080, + mm_rotr_op = 0x0c0, + mm_lwxs_op = 0x118, + mm_addu32_op = 0x150, + mm_subu32_op = 0x1d0, + mm_and_op = 0x250, + mm_or32_op = 0x290, + mm_xor32_op = 0x310, +}; + +/* + * (microMIPS) POOL32B functions. + */ +enum mm_32b_func { + mm_lwc2_func = 0x0, + mm_lwp_func = 0x1, + mm_ldc2_func = 0x2, + mm_ldp_func = 0x4, + mm_lwm32_func = 0x5, + mm_cache_func = 0x6, + mm_ldm_func = 0x7, + mm_swc2_func = 0x8, + mm_swp_func = 0x9, + mm_sdc2_func = 0xa, + mm_sdp_func = 0xc, + mm_swm32_func = 0xd, + mm_sdm_func = 0xf, +}; + +/* + * (microMIPS) POOL32C functions. + */ +enum mm_32c_func { + mm_pref_func = 0x2, + mm_ll_func = 0x3, + mm_swr_func = 0x9, + mm_sc_func = 0xb, + mm_lwu_func = 0xe, +}; + +/* + * (microMIPS) POOL32AXF minor opcodes. + */ +enum mm_32axf_minor_op { + mm_mfc0_op = 0x003, + mm_mtc0_op = 0x00b, + mm_tlbp_op = 0x00d, + mm_jalr_op = 0x03c, + mm_tlbr_op = 0x04d, + mm_jalrhb_op = 0x07c, + mm_tlbwi_op = 0x08d, + mm_tlbwr_op = 0x0cd, + mm_jalrs_op = 0x13c, + mm_jalrshb_op = 0x17c, + mm_syscall_op = 0x22d, + mm_eret_op = 0x3cd, +}; + +/* + * (microMIPS) POOL32F minor opcodes. + */ +enum mm_32f_minor_op { + mm_32f_00_op = 0x00, + mm_32f_01_op = 0x01, + mm_32f_02_op = 0x02, + mm_32f_10_op = 0x08, + mm_32f_11_op = 0x09, + mm_32f_12_op = 0x0a, + mm_32f_20_op = 0x10, + mm_32f_30_op = 0x18, + mm_32f_40_op = 0x20, + mm_32f_41_op = 0x21, + mm_32f_42_op = 0x22, + mm_32f_50_op = 0x28, + mm_32f_51_op = 0x29, + mm_32f_52_op = 0x2a, + mm_32f_60_op = 0x30, + mm_32f_70_op = 0x38, + mm_32f_73_op = 0x3b, + mm_32f_74_op = 0x3c, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_10_minor_op { + mm_lwxc1_op = 0x1, + mm_swxc1_op, + mm_ldxc1_op, + mm_sdxc1_op, + mm_luxc1_op, + mm_suxc1_op, +}; + +enum mm_32f_func { + mm_lwxc1_func = 0x048, + mm_swxc1_func = 0x088, + mm_ldxc1_func = 0x0c8, + mm_sdxc1_func = 0x108, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_40_minor_op { + mm_fmovf_op, + mm_fmovt_op, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_60_minor_op { + mm_fadd_op, + mm_fsub_op, + mm_fmul_op, + mm_fdiv_op, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_70_minor_op { + mm_fmovn_op, + mm_fmovz_op, +}; + +/* + * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F. + */ +enum mm_32f_73_minor_op { + mm_fmov0_op = 0x01, + mm_fcvtl_op = 0x04, + mm_movf0_op = 0x05, + mm_frsqrt_op = 0x08, + mm_ffloorl_op = 0x0c, + mm_fabs0_op = 0x0d, + mm_fcvtw_op = 0x24, + mm_movt0_op = 0x25, + mm_fsqrt_op = 0x28, + mm_ffloorw_op = 0x2c, + mm_fneg0_op = 0x2d, + mm_cfc1_op = 0x40, + mm_frecip_op = 0x48, + mm_fceill_op = 0x4c, + mm_fcvtd0_op = 0x4d, + mm_ctc1_op = 0x60, + mm_fceilw_op = 0x6c, + mm_fcvts0_op = 0x6d, + mm_mfc1_op = 0x80, + mm_fmov1_op = 0x81, + mm_movf1_op = 0x85, + mm_ftruncl_op = 0x8c, + mm_fabs1_op = 0x8d, + mm_mtc1_op = 0xa0, + mm_movt1_op = 0xa5, + mm_ftruncw_op = 0xac, + mm_fneg1_op = 0xad, + mm_froundl_op = 0xcc, + mm_fcvtd1_op = 0xcd, + mm_froundw_op = 0xec, + mm_fcvts1_op = 0xed, +}; + +/* + * (microMIPS) POOL16C minor opcodes. + */ +enum mm_16c_minor_op { + mm_lwm16_op = 0x04, + mm_swm16_op = 0x05, + mm_jr16_op = 0x18, + mm_jrc_op = 0x1a, + mm_jalr16_op = 0x1c, + mm_jalrs16_op = 0x1e, +}; + +/* + * (microMIPS) POOL16D minor opcodes. + */ +enum mm_16d_minor_op { + mm_addius5_func, + mm_addiusp_func, +}; + +/* + * (MIPS16e) opcodes. + */ +enum MIPS16e_ops { + MIPS16e_jal_op = 003, + MIPS16e_ld_op = 007, + MIPS16e_i8_op = 014, + MIPS16e_sd_op = 017, + MIPS16e_lb_op = 020, + MIPS16e_lh_op = 021, + MIPS16e_lwsp_op = 022, + MIPS16e_lw_op = 023, + MIPS16e_lbu_op = 024, + MIPS16e_lhu_op = 025, + MIPS16e_lwpc_op = 026, + MIPS16e_lwu_op = 027, + MIPS16e_sb_op = 030, + MIPS16e_sh_op = 031, + MIPS16e_swsp_op = 032, + MIPS16e_sw_op = 033, + MIPS16e_rr_op = 035, + MIPS16e_extend_op = 036, + MIPS16e_i64_op = 037, +}; + +enum MIPS16e_i64_func { + MIPS16e_ldsp_func, + MIPS16e_sdsp_func, + MIPS16e_sdrasp_func, + MIPS16e_dadjsp_func, + MIPS16e_ldpc_func, +}; + +enum MIPS16e_rr_func { + MIPS16e_jr_func, +}; + +enum MIPS6e_i8_func { + MIPS16e_swrasp_func = 02, +}; + +/* + * (microMIPS & MIPS16e) NOP instruction. + */ +#define MM_NOP16 0x0c00 + +/* * Damn ... bitfields depend from byteorder :-( */ #ifdef __MIPSEB__ @@ -311,6 +588,262 @@ struct v_format { /* MDMX vector format */ ;))))))) }; +/* + * microMIPS instruction formats (32-bit length) + * + * NOTE: + * Parenthesis denote whether the format is a microMIPS instruction or + * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE. + */ +struct fb_format { /* FPU branch format (MIPS32) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int bc : 5, + BITFIELD_FIELD(unsigned int cc : 3, + BITFIELD_FIELD(unsigned int flag : 2, + BITFIELD_FIELD(signed int simmediate : 16, + ;))))) +}; + +struct fp0_format { /* FPU multiply and add format (MIPS32) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int fmt : 5, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int fmt : 3, + BITFIELD_FIELD(unsigned int op : 2, + BITFIELD_FIELD(unsigned int func : 6, + ;))))))) +}; + +struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int op : 5, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fmt : 2, + BITFIELD_FIELD(unsigned int op : 8, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int cc : 3, + BITFIELD_FIELD(unsigned int zero : 2, + BITFIELD_FIELD(unsigned int fmt : 2, + BITFIELD_FIELD(unsigned int op : 3, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))))) +}; + +struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fmt : 3, + BITFIELD_FIELD(unsigned int op : 7, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp4_format { /* FPU c.cond format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int cc : 3, + BITFIELD_FIELD(unsigned int fmt : 3, + BITFIELD_FIELD(unsigned int cond : 4, + BITFIELD_FIELD(unsigned int func : 6, + ;))))))) +}; + +struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int index : 5, + BITFIELD_FIELD(unsigned int base : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int op : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct fp6_format { /* FPU madd and msub format (MIPS IV) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int fr : 5, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int fr : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_i_format { /* Immediate format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int rs : 5, + BITFIELD_FIELD(signed int simmediate : 16, + ;)))) +}; + +struct mm_m_format { /* Multi-word load/store format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rd : 5, + BITFIELD_FIELD(unsigned int base : 5, + BITFIELD_FIELD(unsigned int func : 4, + BITFIELD_FIELD(signed int simmediate : 12, + ;))))) +}; + +struct mm_x_format { /* Scaled indexed load format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int index : 5, + BITFIELD_FIELD(unsigned int base : 5, + BITFIELD_FIELD(unsigned int rd : 5, + BITFIELD_FIELD(unsigned int func : 11, + ;))))) +}; + +/* + * microMIPS instruction formats (16-bit length) + */ +struct mm_b0_format { /* Unconditional branch format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(signed int simmediate : 10, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;))) +}; + +struct mm_b1_format { /* Conditional branch format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rs : 3, + BITFIELD_FIELD(signed int simmediate : 7, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;)))) +}; + +struct mm16_m_format { /* Multi-word load/store format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int func : 4, + BITFIELD_FIELD(unsigned int rlist : 2, + BITFIELD_FIELD(unsigned int imm : 4, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;))))) +}; + +struct mm16_rb_format { /* Signed immediate format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 3, + BITFIELD_FIELD(unsigned int base : 3, + BITFIELD_FIELD(signed int simmediate : 4, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;))))) +}; + +struct mm16_r3_format { /* Load from global pointer format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 3, + BITFIELD_FIELD(signed int simmediate : 7, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;)))) +}; + +struct mm16_r5_format { /* Load/store from stack pointer format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(signed int simmediate : 5, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;)))) +}; + +/* + * MIPS16e instruction formats (16-bit length) + */ +struct m16e_rr { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int rx : 3, + BITFIELD_FIELD(unsigned int nd : 1, + BITFIELD_FIELD(unsigned int l : 1, + BITFIELD_FIELD(unsigned int ra : 1, + BITFIELD_FIELD(unsigned int func : 5, + ;)))))) +}; + +struct m16e_jal { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int x : 1, + BITFIELD_FIELD(unsigned int imm20_16 : 5, + BITFIELD_FIELD(signed int imm25_21 : 5, + ;)))) +}; + +struct m16e_i64 { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int func : 3, + BITFIELD_FIELD(unsigned int imm : 8, + ;))) +}; + +struct m16e_ri64 { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int func : 3, + BITFIELD_FIELD(unsigned int ry : 3, + BITFIELD_FIELD(unsigned int imm : 5, + ;)))) +}; + +struct m16e_ri { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int rx : 3, + BITFIELD_FIELD(unsigned int imm : 8, + ;))) +}; + +struct m16e_rri { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int rx : 3, + BITFIELD_FIELD(unsigned int ry : 3, + BITFIELD_FIELD(unsigned int imm : 5, + ;)))) +}; + +struct m16e_i8 { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int func : 3, + BITFIELD_FIELD(unsigned int imm : 8, + ;))) +}; + union mips_instruction { unsigned int word; unsigned short halfword[2]; @@ -326,6 +859,37 @@ union mips_instruction { struct b_format b_format; struct ps_format ps_format; struct v_format v_format; + struct fb_format fb_format; + struct fp0_format fp0_format; + struct mm_fp0_format mm_fp0_format; + struct fp1_format fp1_format; + struct mm_fp1_format mm_fp1_format; + struct mm_fp2_format mm_fp2_format; + struct mm_fp3_format mm_fp3_format; + struct mm_fp4_format mm_fp4_format; + struct mm_fp5_format mm_fp5_format; + struct fp6_format fp6_format; + struct mm_fp6_format mm_fp6_format; + struct mm_i_format mm_i_format; + struct mm_m_format mm_m_format; + struct mm_x_format mm_x_format; + struct mm_b0_format mm_b0_format; + struct mm_b1_format mm_b1_format; + struct mm16_m_format mm16_m_format ; + struct mm16_rb_format mm16_rb_format; + struct mm16_r3_format mm16_r3_format; + struct mm16_r5_format mm16_r5_format; +}; + +union mips16e_instruction { + unsigned int full : 16; + struct m16e_rr rr; + struct m16e_jal jal; + struct m16e_i64 i64; + struct m16e_ri64 ri64; + struct m16e_ri ri; + struct m16e_rri rri; + struct m16e_i8 i8; }; #endif /* _UAPI_ASM_INST_H */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 520a908d45d6..6ad9e04bdf62 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -5,7 +5,7 @@ extra-y := head.o vmlinux.lds obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ - ptrace.o reset.o setup.o signal.o syscall.o \ + prom.o ptrace.o reset.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o watch.o vdso.o ifdef CONFIG_FUNCTION_TRACER @@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o +obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o +obj-$(CONFIG_CSRC_GIC) += csrc-gic.o obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o -obj-$(CONFIG_CSRC_GIC) += csrc-gic.o obj-$(CONFIG_SYNC_R4K) += sync-r4k.o obj-$(CONFIG_STACKTRACE) += stacktrace.o @@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o -obj-$(CONFIG_OF) += prom.o - CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 50285b2c7ffe..0845091ba480 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -17,6 +17,8 @@ #include <asm/ptrace.h> #include <asm/processor.h> +#include <linux/kvm_host.h> + void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); @@ -328,3 +330,67 @@ void output_pbe_defines(void) BLANK(); } #endif + +void output_kvm_defines(void) +{ + COMMENT(" KVM/MIPS Specfic offsets. "); + DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch)); + OFFSET(VCPU_RUN, kvm_vcpu, run); + OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch); + + OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase); + OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase); + + OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack); + OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp); + + OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr); + OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause); + OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc); + OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi); + + OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst); + + OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]); + OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]); + OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]); + OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]); + OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]); + OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]); + OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]); + OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]); + OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]); + OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]); + OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]); + OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]); + OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]); + OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]); + OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]); + OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]); + OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]); + OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]); + OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]); + OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]); + OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]); + OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]); + OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]); + OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]); + OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]); + OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]); + OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]); + OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]); + OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]); + OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]); + OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]); + OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]); + OFFSET(VCPU_LO, kvm_vcpu_arch, lo); + OFFSET(VCPU_HI, kvm_vcpu_arch, hi); + OFFSET(VCPU_PC, kvm_vcpu_arch, pc); + OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); + OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); + OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); + + OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]); + OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); + BLANK(); +} diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 556a4357d7fc..97c5a1668e53 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; __res; \ }) +#ifdef CONFIG_KVM_GUEST +#define TASK32_SIZE 0x3fff8000UL +#else #define TASK32_SIZE 0x7fff8000UL +#endif #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 83ffe950f710..46c2ad0703a0 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -14,10 +14,186 @@ #include <asm/cpu.h> #include <asm/cpu-features.h> #include <asm/fpu.h> +#include <asm/fpu_emulator.h> #include <asm/inst.h> #include <asm/ptrace.h> #include <asm/uaccess.h> +/* + * Calculate and return exception PC in case of branch delay slot + * for microMIPS and MIPS16e. It does not clear the ISA mode bit. + */ +int __isa_exception_epc(struct pt_regs *regs) +{ + unsigned short inst; + long epc = regs->cp0_epc; + + /* Calculate exception PC in branch delay slot. */ + if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) { + /* This should never happen because delay slot was checked. */ + force_sig(SIGSEGV, current); + return epc; + } + if (cpu_has_mips16) { + if (((union mips16e_instruction)inst).ri.opcode + == MIPS16e_jal_op) + epc += 4; + else + epc += 2; + } else if (mm_insn_16bit(inst)) + epc += 2; + else + epc += 4; + + return epc; +} + +/* + * Compute return address and emulate branch in microMIPS mode after an + * exception only. It does not handle compact branches/jumps and cannot + * be used in interrupt context. (Compact branches/jumps do not cause + * exceptions.) + */ +int __microMIPS_compute_return_epc(struct pt_regs *regs) +{ + u16 __user *pc16; + u16 halfword; + unsigned int word; + unsigned long contpc; + struct mm_decoded_insn mminsn = { 0 }; + + mminsn.micro_mips_mode = 1; + + /* This load never faults. */ + pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 2; + word = ((unsigned int)halfword << 16); + mminsn.pc_inc = 2; + + if (!mm_insn_16bit(halfword)) { + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 4; + mminsn.pc_inc = 4; + word |= halfword; + } + mminsn.insn = word; + + if (get_user(halfword, pc16)) + goto sigsegv; + mminsn.next_pc_inc = 2; + word = ((unsigned int)halfword << 16); + + if (!mm_insn_16bit(halfword)) { + pc16++; + if (get_user(halfword, pc16)) + goto sigsegv; + mminsn.next_pc_inc = 4; + word |= halfword; + } + mminsn.next_insn = word; + + mm_isBranchInstr(regs, mminsn, &contpc); + + regs->cp0_epc = contpc; + + return 0; + +sigsegv: + force_sig(SIGSEGV, current); + return -EFAULT; +} + +/* + * Compute return address and emulate branch in MIPS16e mode after an + * exception only. It does not handle compact branches/jumps and cannot + * be used in interrupt context. (Compact branches/jumps do not cause + * exceptions.) + */ +int __MIPS16e_compute_return_epc(struct pt_regs *regs) +{ + u16 __user *addr; + union mips16e_instruction inst; + u16 inst2; + u32 fullinst; + long epc; + + epc = regs->cp0_epc; + + /* Read the instruction. */ + addr = (u16 __user *)msk_isa16_mode(epc); + if (__get_user(inst.full, addr)) { + force_sig(SIGSEGV, current); + return -EFAULT; + } + + switch (inst.ri.opcode) { + case MIPS16e_extend_op: + regs->cp0_epc += 4; + return 0; + + /* + * JAL and JALX in MIPS16e mode + */ + case MIPS16e_jal_op: + addr += 1; + if (__get_user(inst2, addr)) { + force_sig(SIGSEGV, current); + return -EFAULT; + } + fullinst = ((unsigned)inst.full << 16) | inst2; + regs->regs[31] = epc + 6; + epc += 4; + epc >>= 28; + epc <<= 28; + /* + * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16 + * + * ......TARGET[15:0].................TARGET[20:16]........... + * ......TARGET[25:21] + */ + epc |= + ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) | + ((fullinst & 0x1f0000) << 7); + if (!inst.jal.x) + set_isa16_mode(epc); /* Set ISA mode bit. */ + regs->cp0_epc = epc; + return 0; + + /* + * J(AL)R(C) + */ + case MIPS16e_rr_op: + if (inst.rr.func == MIPS16e_jr_func) { + + if (inst.rr.ra) + regs->cp0_epc = regs->regs[31]; + else + regs->cp0_epc = + regs->regs[reg16to32[inst.rr.rx]]; + + if (inst.rr.l) { + if (inst.rr.nd) + regs->regs[31] = epc + 2; + else + regs->regs[31] = epc + 4; + } + return 0; + } + break; + } + + /* + * All other cases have no branch delay slot and are 16-bits. + * Branches do not cause an exception. + */ + regs->cp0_epc += 2; + + return 0; +} + /** * __compute_return_epc_for_insn - Computes the return address and do emulate * branch simulation, if required. @@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, epc <<= 28; epc |= (insn.j_format.target << 2); regs->cp0_epc = epc; + if (insn.i_format.opcode == jalx_op) + set_isa16_mode(regs->cp0_epc); break; /* diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c new file mode 100644 index 000000000000..730eaf92c018 --- /dev/null +++ b/arch/mips/kernel/cevt-gic.c @@ -0,0 +1,104 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> +#include <linux/smp.h> +#include <linux/irq.h> + +#include <asm/time.h> +#include <asm/gic.h> +#include <asm/mips-boards/maltaint.h> + +DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); +int gic_timer_irq_installed; + + +static int gic_next_event(unsigned long delta, struct clock_event_device *evt) +{ + u64 cnt; + int res; + + cnt = gic_read_count(); + cnt += (u64)delta; + gic_write_compare(cnt); + res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; + return res; +} + +void gic_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* Nothing to do ... */ +} + +irqreturn_t gic_compare_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *cd; + int cpu = smp_processor_id(); + + gic_write_compare(gic_read_compare()); + cd = &per_cpu(gic_clockevent_device, cpu); + cd->event_handler(cd); + return IRQ_HANDLED; +} + +struct irqaction gic_compare_irqaction = { + .handler = gic_compare_interrupt, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "timer", +}; + + +void gic_event_handler(struct clock_event_device *dev) +{ +} + +int __cpuinit gic_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + + if (!cpu_has_counter || !gic_frequency) + return -ENXIO; + + irq = MIPS_GIC_IRQ_BASE; + + cd = &per_cpu(gic_clockevent_device, cpu); + + cd->name = "MIPS GIC"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + clockevent_set_clock(cd, gic_frequency); + + /* Calculate the min / max delta */ + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = gic_next_event; + cd->set_mode = gic_set_clock_mode; + cd->event_handler = gic_event_handler; + + clockevents_register_device(cd); + + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002); + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK); + + if (gic_timer_irq_installed) + return 0; + + gic_timer_irq_installed = 1; + + setup_irq(irq, &gic_compare_irqaction); + irq_set_handler(irq, handle_percpu_irq); + return 0; +} diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 07b847d77f5d..02033eaf8825 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -23,7 +23,6 @@ */ #ifndef CONFIG_MIPS_MT_SMTC - static int mips_next_event(unsigned long delta, struct clock_event_device *evt) { @@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); int cp0_timer_irq_installed; #ifndef CONFIG_MIPS_MT_SMTC - irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; @@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); +#ifdef CONFIG_CEVT_GIC + if (!gic_present) +#endif cd->event_handler(cd); } @@ -118,6 +119,10 @@ int c0_compare_int_usable(void) unsigned int delta; unsigned int cnt; +#ifdef CONFIG_KVM_GUEST + return 1; +#endif + /* * IP7 already pending? Try to clear it by acking the timer. */ @@ -166,7 +171,6 @@ int c0_compare_int_usable(void) } #ifndef CONFIG_MIPS_MT_SMTC - int __cpuinit r4k_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void) cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; +#ifdef CONFIG_CEVT_GIC + if (!gic_present) +#endif clockevents_register_device(cd); if (cp0_timer_irq_installed) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5fe66a0c3224..4bbffdb9024f 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_ULRI; if (config3 & MIPS_CONF3_ISA) c->options |= MIPS_CPU_MICROMIPS; +#ifdef CONFIG_CPU_MICROMIPS + write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); +#endif if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c index 5dca24bce51b..e02620901117 100644 --- a/arch/mips/kernel/csrc-gic.c +++ b/arch/mips/kernel/csrc-gic.c @@ -5,23 +5,14 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/clocksource.h> #include <linux/init.h> +#include <linux/time.h> -#include <asm/time.h> #include <asm/gic.h> static cycle_t gic_hpt_read(struct clocksource *cs) { - unsigned int hi, hi2, lo; - - do { - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); - } while (hi2 != hi); - - return (((cycle_t) hi) << 32) + lo; + return gic_read_count(); } static struct clocksource gic_clocksource = { diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index ecb347ce1b3d..5c2ba9f08a80 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -5,8 +5,8 @@ * * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2002, 2007 Maciej W. Rozycki + * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/init.h> @@ -21,8 +21,10 @@ #include <asm/war.h> #include <asm/thread_info.h> +#ifdef CONFIG_MIPS_MT_SMTC #define PANIC_PIC(msg) \ - .set push; \ + .set push; \ + .set nomicromips; \ .set reorder; \ PTR_LA a0,8f; \ .set noat; \ @@ -31,17 +33,10 @@ 9: b 9b; \ .set pop; \ TEXT(msg) +#endif __INIT -NESTED(except_vec0_generic, 0, sp) - PANIC_PIC("Exception vector 0 called") - END(except_vec0_generic) - -NESTED(except_vec1_generic, 0, sp) - PANIC_PIC("Exception vector 1 called") - END(except_vec1_generic) - /* * General exception vector for all other CPUs. * @@ -138,12 +133,19 @@ LEAF(r4k_wait) nop nop nop +#ifdef CONFIG_CPU_MICROMIPS + nop + nop + nop + nop +#endif .set mips3 wait /* end of rollback region (the region size must be power of two) */ - .set pop 1: jr ra + nop + .set pop END(r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler @@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp) LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) PTR_LA ra, ret_from_irq - j plat_irq_dispatch + PTR_LA v0, plat_irq_dispatch + jr v0 +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif END(handle_int) __INIT @@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp) /* * EJTAG debug exception handler. * The EJTAG debug exception entry point is 0xbfc00480, which - * normally is in the boot PROM, so the boot PROM must do a + * normally is in the boot PROM, so the boot PROM must do an * unconditional jump to this vector. */ NESTED(except_vec_ejtag_debug, 0, sp) j ejtag_debug_handler +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif END(except_vec_ejtag_debug) __FINIT @@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp) FEXPORT(except_vec_vi_mori) ori a0, $0, 0 #endif /* CONFIG_MIPS_MT_SMTC */ + PTR_LA v1, except_vec_vi_handler FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ - j except_vec_vi_handler + jr v1 FEXPORT(except_vec_vi_ori) ori v0, 0 /* Patched */ .set pop @@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer) */ NESTED(except_vec_nmi, 0, sp) j nmi_handler +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif END(except_vec_nmi) __FINIT @@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set noreorder /* check if TLB contains a entry for EPC */ MFC0 k1, CP0_ENTRYHI - andi k1, 0xff /* ASID_MASK */ + andi k1, 0xff /* ASID_MASK patched at run-time!! */ MFC0 k0, CP0_EPC PTR_SRL k0, _PAGE_SHIFT + 1 PTR_SLL k0, _PAGE_SHIFT + 1 @@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp) .set push .set noat .set noreorder - /* 0x7c03e83b: rdhwr v1,$29 */ + /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ + /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ MFC0 k1, CP0_EPC - lui k0, 0x7c03 - lw k1, (k1) - ori k0, 0xe83b - .set reorder +#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) + and k0, k1, 1 + beqz k0, 1f + xor k1, k0 + lhu k0, (k1) + lhu k1, 2(k1) + ins k1, k0, 16, 16 + lui k0, 0x007d + b docheck + ori k0, 0x6b3c +1: + lui k0, 0x7c03 + lw k1, (k1) + ori k0, 0xe83b +#else + andi k0, k1, 1 + bnez k0, handle_ri + lui k0, 0x7c03 + lw k1, (k1) + ori k0, 0xe83b +#endif + .set reorder +docheck: bne k0, k1, handle_ri /* if not ours */ + +isrdhwr: /* The insn is rdhwr. No need to check CAUSE.BD here. */ get_saved_sp /* k1 := current_thread_info */ .set noreorder diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 485e6a961b31..c01b307317a9 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/smp.h> #include <linux/irq.h> +#include <linux/clocksource.h> #include <asm/io.h> #include <asm/gic.h> @@ -19,6 +20,8 @@ #include <linux/hardirq.h> #include <asm-generic/bitops/find.h> +unsigned int gic_frequency; +unsigned int gic_present; unsigned long _gic_base; unsigned int gic_irq_base; unsigned int gic_irq_flags[GIC_NUM_INTRS]; @@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static struct gic_pending_regs pending_regs[NR_CPUS]; static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; +#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC) +cycle_t gic_read_count(void) +{ + unsigned int hi, hi2, lo; + + do { + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); + } while (hi2 != hi); + + return (((cycle_t) hi) << 32) + lo; +} + +void gic_write_compare(cycle_t cnt) +{ + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); +} + +cycle_t gic_read_compare(void) +{ + unsigned int hi, lo; + + GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi); + GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo); + + return (((cycle_t) hi) << 32) + lo; +} +#endif + unsigned int gic_get_timer_pending(void) { unsigned int vpe_pending; @@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes) } } +unsigned int gic_compare_int(void) +{ + unsigned int pending; + + GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending); + if (pending & GIC_VPE_PEND_CMP_MSK) + return 1; + else + return 0; +} + unsigned int gic_get_int(void) { unsigned int i; diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index d1d576b765f5..0b29646bcee7 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -165,10 +165,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2, return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), merge_64(len_a4, len_a5)); } - -SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, - u64, a3, u64, a4, int, dfd, const char __user *, pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), - dfd, pathname); -} diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c index 411a058d2c53..876097529697 100644 --- a/arch/mips/kernel/mips_machine.c +++ b/arch/mips/kernel/mips_machine.c @@ -11,9 +11,9 @@ #include <linux/slab.h> #include <asm/mips_machine.h> +#include <asm/prom.h> static struct mips_machine *mips_machine __initdata; -static char *mips_machine_name = "Unknown"; #define for_each_machine(mach) \ for ((mach) = (struct mips_machine *)&__mips_machines_start; \ @@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown"; (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \ (mach)++) -__init void mips_set_machine_name(const char *name) -{ - char *p; - - if (name == NULL) - return; - - p = kstrdup(name, GFP_KERNEL); - if (!p) - pr_err("MIPS: no memory for machine_name\n"); - - mips_machine_name = p; -} - -char *mips_get_machine_name(void) -{ - return mips_machine_name; -} - __init int mips_machtype_setup(char *id) { struct mips_machine *mach; @@ -79,7 +60,6 @@ __init void mips_machine_setup(void) return; mips_set_machine_name(mips_machine->mach_name); - pr_info("MIPS: machine is %s\n", mips_machine_name); if (mips_machine->mach_setup) mips_machine->mach_setup(); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 7a54f74b7818..a3e461408b7e 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -12,7 +12,7 @@ #include <asm/cpu-features.h> #include <asm/mipsregs.h> #include <asm/processor.h> -#include <asm/mips_machine.h> +#include <asm/prom.h> unsigned int vced_count, vcei_count; @@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_vz) seq_printf(m, "%s", " vz"); seq_printf(m, "\n"); + if (cpu_has_mmips) { + seq_printf(m, "micromips kernel\t: %s\n", + (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); + } seq_printf(m, "shadow register sets\t: %d\n", cpu_data[n].srsets); seq_printf(m, "kscratch registers\t: %d\n", diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index cfc742d75b7f..eb902c1f0cad 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -7,6 +7,7 @@ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2004 Thiemo Seufer + * Copyright (C) 2013 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <linux/sched.h> @@ -225,34 +226,115 @@ struct mips_frame_info { static inline int is_ra_save_ins(union mips_instruction *ip) { +#ifdef CONFIG_CPU_MICROMIPS + union mips_instruction mmi; + + /* + * swsp ra,offset + * swm16 reglist,offset(sp) + * swm32 reglist,offset(sp) + * sw32 ra,offset(sp) + * jradiussp - NOT SUPPORTED + * + * microMIPS is way more fun... + */ + if (mm_insn_16bit(ip->halfword[0])) { + mmi.word = (ip->halfword[0] << 16); + return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && + mmi.mm16_r5_format.rt == 31) || + (mmi.mm16_m_format.opcode == mm_pool16c_op && + mmi.mm16_m_format.func == mm_swm16_op)); + } + else { + mmi.halfword[0] = ip->halfword[1]; + mmi.halfword[1] = ip->halfword[0]; + return ((mmi.mm_m_format.opcode == mm_pool32b_op && + mmi.mm_m_format.rd > 9 && + mmi.mm_m_format.base == 29 && + mmi.mm_m_format.func == mm_swm32_func) || + (mmi.i_format.opcode == mm_sw32_op && + mmi.i_format.rs == 29 && + mmi.i_format.rt == 31)); + } +#else /* sw / sd $ra, offset($sp) */ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ip->i_format.rs == 29 && ip->i_format.rt == 31; +#endif } static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) { +#ifdef CONFIG_CPU_MICROMIPS + /* + * jr16,jrc,jalr16,jalr16 + * jal + * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb + * jraddiusp - NOT SUPPORTED + * + * microMIPS is kind of more fun... + */ + union mips_instruction mmi; + + mmi.word = (ip->halfword[0] << 16); + + if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && + (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || + ip->j_format.opcode == mm_jal32_op) + return 1; + if (ip->r_format.opcode != mm_pool32a_op || + ip->r_format.func != mm_pool32axf_op) + return 0; + return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); +#else if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) return 0; return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; +#endif } static inline int is_sp_move_ins(union mips_instruction *ip) { +#ifdef CONFIG_CPU_MICROMIPS + /* + * addiusp -imm + * addius5 sp,-imm + * addiu32 sp,sp,-imm + * jradiussp - NOT SUPPORTED + * + * microMIPS is not more fun... + */ + if (mm_insn_16bit(ip->halfword[0])) { + union mips_instruction mmi; + + mmi.word = (ip->halfword[0] << 16); + return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && + mmi.mm16_r3_format.simmediate && mm_addiusp_func) || + (mmi.mm16_r5_format.opcode == mm_pool16d_op && + mmi.mm16_r5_format.rt == 29)); + } + return (ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); +#else /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) return 1; +#endif return 0; } static int get_frame_info(struct mips_frame_info *info) { +#ifdef CONFIG_CPU_MICROMIPS + union mips_instruction *ip = (void *) (((char *) info->func) - 1); +#else union mips_instruction *ip = info->func; +#endif unsigned max_insns = info->func_size / sizeof(union mips_instruction); unsigned i; @@ -272,7 +354,26 @@ static int get_frame_info(struct mips_frame_info *info) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) + { +#ifdef CONFIG_CPU_MICROMIPS + if (mm_insn_16bit(ip->halfword[0])) + { + unsigned short tmp; + + if (ip->halfword[0] & mm_addiusp_func) + { + tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); + info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0)); + } else { + tmp = (ip->halfword[0] >> 1); + info->frame_size = -(signed short)(tmp & 0xf); + } + ip = (void *) &ip->halfword[1]; + ip--; + } else +#endif info->frame_size = - ip->i_format.simmediate; + } continue; } if (info->pc_offset == -1 && is_ra_save_ins(ip)) { diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 028f6f837ef9..5712bb532245 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -23,6 +23,23 @@ #include <asm/page.h> #include <asm/prom.h> +static char mips_machine_name[64] = "Unknown"; + +__init void mips_set_machine_name(const char *name) +{ + if (name == NULL) + return; + + strncpy(mips_machine_name, name, sizeof(mips_machine_name)); + pr_info("MIPS: machine is %s\n", mips_get_machine_name()); +} + +char *mips_get_machine_name(void) +{ + return mips_machine_name; +} + +#ifdef CONFIG_OF int __init early_init_dt_scan_memory_arch(unsigned long node, const char *uname, int depth, void *data) @@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start, } #endif +int __init early_init_dt_scan_model(unsigned long node, const char *uname, + int depth, void *data) +{ + if (!depth) { + char *model = of_get_flat_dt_prop(node, "model", NULL); + + if (model) + mips_set_machine_name(model); + } + return 0; +} + void __init early_init_devtree(void *params) { /* Setup flat device-tree pointer */ @@ -65,6 +94,9 @@ void __init early_init_devtree(void *params) /* Scan memory nodes */ of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); + + /* try to load the mips machine name */ + of_scan_flat_dt(early_init_dt_scan_model, NULL); } void __init __dt_setup_arch(struct boot_param_header *bph) @@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph) early_init_devtree(initial_boot_params); } +#endif diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9ea29649fc28..9b36424b03c5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -138,9 +138,18 @@ stackargs: 5: jr t1 sw t5, 16(sp) # argument #5 to ksp +#ifdef CONFIG_CPU_MICROMIPS sw t8, 28(sp) # argument #8 to ksp + nop sw t7, 24(sp) # argument #7 to ksp + nop sw t6, 20(sp) # argument #6 to ksp + nop +#else + sw t8, 28(sp) # argument #8 to ksp + sw t7, 24(sp) # argument #7 to ksp + sw t6, 20(sp) # argument #6 to ksp +#endif 6: j stack_done # go back nop .set pop diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 103bfe570fe8..74f485d3c0ef 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -529,7 +529,7 @@ sys_call_table: PTR sys_accept4 PTR compat_sys_recvmmsg /* 4335 */ PTR sys_fanotify_init - PTR sys_32_fanotify_mark + PTR compat_sys_fanotify_mark PTR sys_prlimit64 PTR sys_name_to_handle_at PTR compat_sys_open_by_handle_at /* 4340 */ diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 4c774d5d5087..c7f90519e58c 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -23,6 +23,7 @@ #include <linux/pfn.h> #include <linux/debugfs.h> #include <linux/kexec.h> +#include <linux/sizes.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base); static struct resource code_resource = { .name = "Kernel code", }; static struct resource data_resource = { .name = "Kernel data", }; +static void *detect_magic __initdata = detect_memory_region; + void __init add_memory_region(phys_t start, phys_t size, long type) { int x = boot_mem_map.nr_map; @@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type) boot_mem_map.nr_map++; } +void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max) +{ + void *dm = &detect_magic; + phys_t size; + + for (size = sz_min; size < sz_max; size <<= 1) { + if (!memcmp(dm, dm + size, sizeof(detect_magic))) + break; + } + + pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n", + ((unsigned long long) size) / SZ_1M, + (unsigned long long) start, + ((unsigned long long) sz_min) / SZ_1M, + ((unsigned long long) sz_max) / SZ_1M); + + add_memory_region(start, size, BOOT_MEM_RAM); +} + static void __init print_memory_map(void) { int i; diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b5e88fd83277..fd3ef2c2afbc 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -35,6 +35,7 @@ #include <asm/war.h> #include <asm/vdso.h> #include <asm/dsp.h> +#include <asm/inst.h> #include "signal-common.h" @@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset = sigmask_to_save(); int ret; struct mips_abi *abi = current->thread.abi; +#ifdef CONFIG_CPU_MICROMIPS + void *vdso; + unsigned int tmp = (unsigned int)current->mm->context.vdso; + + set_isa16_mode(tmp); + vdso = (void *)tmp; +#else void *vdso = current->mm->context.vdso; +#endif if (regs->regs[0]) { switch(regs->regs[2]) { diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index bfede063d96a..3e5164c11cac 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -34,6 +34,7 @@ #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> +#include <asm/gic.h> static void __init smvp_copy_vpe_config(void) { @@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) static void __cpuinit vsmp_init_secondary(void) { #ifdef CONFIG_IRQ_GIC - extern int gic_present; - /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index aee04af213c5..c17619fe18e3 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu) } struct plat_smp_ops *mp_ops; +EXPORT_SYMBOL(mp_ops); __cpuinit void register_smp_ops(struct plat_smp_ops *ops) { diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S index 76016ac0a9c8..2866863a39df 100644 --- a/arch/mips/kernel/smtc-asm.S +++ b/arch/mips/kernel/smtc-asm.S @@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? .text .align 5 FEXPORT(__smtc_ipi_vector) +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif .set noat /* Disable thread scheduling to make Status update atomic */ DMT 27 # dmt k1 diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 7186222dc5bb..31d22f3121c9 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -111,7 +111,7 @@ static int vpe0limit; static int ipibuffers; static int nostlb; static int asidmask; -unsigned long smtc_asid_mask = 0xff; +unsigned int smtc_asid_mask = 0xff; static int __init vpe0tcs(char *str) { @@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = asid_cache(cpu); do { - if (!((asid += ASID_INC) & ASID_MASK) ) { + if (!ASID_MASK(ASID_INC(asid))) { if (cpu_has_vtag_icache) flush_icache_all(); /* Traverse all online CPUs (hack requires contiguous range) */ @@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) mips_ihb(); } tcstat = read_tc_c0_tcstatus(); - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); + smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); if (!prevhalt) write_tc_c0_tchalt(0); } @@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) asid = ASID_FIRST_VERSION; local_flush_tlb_all(); /* start new asid cycle */ } - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); + } while (smtc_live_asid[tlb][ASID_MASK(asid)]); /* * SMTC shares the TLB within VPEs and possibly across all VPEs. @@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid) tlb_read(); ehb(); ehi = read_c0_entryhi(); - if ((ehi & ASID_MASK) == asid) { + if (ASID_MASK(ehi) == asid) { /* * Invalidate only entries with specified ASID, * makiing sure all entries differ. diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 25225515451f..77cff1f6d050 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -8,8 +8,8 @@ * Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1999 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000, 01 MIPS Technologies, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include <linux/bug.h> #include <linux/compiler.h> @@ -60,9 +60,9 @@ extern void check_wait(void); extern asmlinkage void r4k_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); -extern asmlinkage void handle_tlbm(void); -extern asmlinkage void handle_tlbl(void); -extern asmlinkage void handle_tlbs(void); +extern u32 handle_tlbl[]; +extern u32 handle_tlbs[]; +extern u32 handle_tlbm[]; extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ibe(void); @@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); -extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, - struct mips_fpu_struct *ctx, int has_fpu, - void *__user *fault_addr); - void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); void (*board_nmi_handler_setup)(void); @@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs) #define SYNC 0x0000000f #define RDHWR 0x0000003b +/* microMIPS definitions */ +#define MM_POOL32A_FUNC 0xfc00ffff +#define MM_RDHWR 0x00006b3c +#define MM_RS 0x001f0000 +#define MM_RT 0x03e00000 + /* * The ll_bit is cleared by r*_switch.S */ @@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) * Simulate trapping 'rdhwr' instructions to provide user accessible * registers not implemented in hardware. */ -static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) +static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) { struct thread_info *ti = task_thread_info(current); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + switch (rd) { + case 0: /* CPU number */ + regs->regs[rt] = smp_processor_id(); + return 0; + case 1: /* SYNCI length */ + regs->regs[rt] = min(current_cpu_data.dcache.linesz, + current_cpu_data.icache.linesz); + return 0; + case 2: /* Read count register */ + regs->regs[rt] = read_c0_count(); + return 0; + case 3: /* Count register resolution */ + switch (current_cpu_data.cputype) { + case CPU_20KC: + case CPU_25KF: + regs->regs[rt] = 1; + break; + default: + regs->regs[rt] = 2; + } + return 0; + case 29: + regs->regs[rt] = ti->tp_value; + return 0; + default: + return -1; + } +} + +static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) +{ if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, regs, 0); - switch (rd) { - case 0: /* CPU number */ - regs->regs[rt] = smp_processor_id(); - return 0; - case 1: /* SYNCI length */ - regs->regs[rt] = min(current_cpu_data.dcache.linesz, - current_cpu_data.icache.linesz); - return 0; - case 2: /* Read count register */ - regs->regs[rt] = read_c0_count(); - return 0; - case 3: /* Count register resolution */ - switch (current_cpu_data.cputype) { - case CPU_20KC: - case CPU_25KF: - regs->regs[rt] = 1; - break; - default: - regs->regs[rt] = 2; - } - return 0; - case 29: - regs->regs[rt] = ti->tp_value; - return 0; - default: - return -1; - } + + simulate_rdhwr(regs, rd, rt); + return 0; + } + + /* Not ours. */ + return -1; +} + +static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) +{ + if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { + int rd = (opcode & MM_RS) >> 16; + int rt = (opcode & MM_RT) >> 21; + simulate_rdhwr(regs, rd, rt); + return 0; } /* Not ours. */ @@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs) force_sig_info(SIGFPE, &info, current); } -static int process_fpemu_return(int sig, void __user *fault_addr) +int process_fpemu_return(int sig, void __user *fault_addr) { if (sig == SIGSEGV || sig == SIGBUS) { struct siginfo si = {0}; @@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, asmlinkage void do_bp(struct pt_regs *regs) { unsigned int opcode, bcode; - - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; + unsigned long epc; + u16 instr[2]; + + if (get_isa16_mode(regs->cp0_epc)) { + /* Calculate EPC. */ + epc = exception_epc(regs); + if (cpu_has_mmips) { + if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || + (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) + goto out_sigsegv; + opcode = (instr[0] << 16) | instr[1]; + } else { + /* MIPS16e mode */ + if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) + goto out_sigsegv; + bcode = (instr[0] >> 6) & 0x3f; + do_trap_or_bp(regs, bcode, "Break"); + return; + } + } else { + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; + } /* * There is the ancient bug in the MIPS assemblers that the break @@ -856,13 +898,22 @@ out_sigsegv: asmlinkage void do_tr(struct pt_regs *regs) { unsigned int opcode, tcode = 0; + u16 instr[2]; + unsigned long epc = exception_epc(regs); - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; + if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || + (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) + goto out_sigsegv; + opcode = (instr[0] << 16) | instr[1]; /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) - tcode = ((opcode >> 6) & ((1 << 10) - 1)); + if (!(opcode & OPCODE)) { + if (get_isa16_mode(regs->cp0_epc)) + /* microMIPS */ + tcode = (opcode >> 12) & 0x1f; + else + tcode = ((opcode >> 6) & ((1 << 10) - 1)); + } do_trap_or_bp(regs, tcode, "Trap"); return; @@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs) { unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned long old_epc = regs->cp0_epc; + unsigned long old31 = regs->regs[31]; unsigned int opcode = 0; int status = -1; @@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs) if (unlikely(compute_return_epc(regs) < 0)) return; - if (unlikely(get_user(opcode, epc) < 0)) - status = SIGSEGV; + if (get_isa16_mode(regs->cp0_epc)) { + unsigned short mmop[2] = { 0 }; - if (!cpu_has_llsc && status < 0) - status = simulate_llsc(regs, opcode); + if (unlikely(get_user(mmop[0], epc) < 0)) + status = SIGSEGV; + if (unlikely(get_user(mmop[1], epc) < 0)) + status = SIGSEGV; + opcode = (mmop[0] << 16) | mmop[1]; - if (status < 0) - status = simulate_rdhwr(regs, opcode); + if (status < 0) + status = simulate_rdhwr_mm(regs, opcode); + } else { + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; - if (status < 0) - status = simulate_sync(regs, opcode); + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr_normal(regs, opcode); + + if (status < 0) + status = simulate_sync(regs, opcode); + } if (status < 0) status = SIGILL; if (unlikely(status > 0)) { regs->cp0_epc = old_epc; /* Undo skip-over. */ + regs->regs[31] = old31; force_sig(status, current); } } @@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, asmlinkage void do_cpu(struct pt_regs *regs) { unsigned int __user *epc; - unsigned long old_epc; + unsigned long old_epc, old31; unsigned int opcode; unsigned int cpid; int status; @@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs) case 0: epc = (unsigned int __user *)exception_epc(regs); old_epc = regs->cp0_epc; + old31 = regs->regs[31]; opcode = 0; status = -1; if (unlikely(compute_return_epc(regs) < 0)) return; - if (unlikely(get_user(opcode, epc) < 0)) - status = SIGSEGV; + if (get_isa16_mode(regs->cp0_epc)) { + unsigned short mmop[2] = { 0 }; - if (!cpu_has_llsc && status < 0) - status = simulate_llsc(regs, opcode); + if (unlikely(get_user(mmop[0], epc) < 0)) + status = SIGSEGV; + if (unlikely(get_user(mmop[1], epc) < 0)) + status = SIGSEGV; + opcode = (mmop[0] << 16) | mmop[1]; - if (status < 0) - status = simulate_rdhwr(regs, opcode); + if (status < 0) + status = simulate_rdhwr_mm(regs, opcode); + } else { + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; + + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr_normal(regs, opcode); + } if (status < 0) status = SIGILL; if (unlikely(status > 0)) { regs->cp0_epc = old_epc; /* Undo skip-over. */ + regs->regs[31] = old31; force_sig(status, current); } @@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void) void ejtag_exception_handler(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); - unsigned long depc, old_epc; + unsigned long depc, old_epc, old_ra; unsigned int debug; printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); @@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs) * calculation. */ old_epc = regs->cp0_epc; + old_ra = regs->regs[31]; regs->cp0_epc = depc; - __compute_return_epc(regs); + compute_return_epc(regs); depc = regs->cp0_epc; regs->cp0_epc = old_epc; + regs->regs[31] = old_ra; } else depc += 4; write_c0_depc(depc); @@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64]; void __init *set_except_vector(int n, void *addr) { unsigned long handler = (unsigned long) addr; - unsigned long old_handler = exception_handlers[n]; + unsigned long old_handler; + +#ifdef CONFIG_CPU_MICROMIPS + /* + * Only the TLB handlers are cache aligned with an even + * address. All other handlers are on an odd address and + * require no modification. Otherwise, MIPS32 mode will + * be entered when handling any TLB exceptions. That + * would be bad...since we must stay in microMIPS mode. + */ + if (!(handler & 0x1)) + handler |= 1; +#endif + old_handler = xchg(&exception_handlers[n], handler); - exception_handlers[n] = handler; if (n == 0 && cpu_has_divec) { +#ifdef CONFIG_CPU_MICROMIPS + unsigned long jump_mask = ~((1 << 27) - 1); +#else unsigned long jump_mask = ~((1 << 28) - 1); +#endif u32 *buf = (u32 *)(ebase + 0x200); unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { @@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr) return (void *)old_handler; } -static asmlinkage void do_default_vi(void) +static void do_default_vi(void) { show_regs(get_irq_regs()); panic("Caught unexpected vectored interrupt."); @@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; - u32 *w; + u16 *h; unsigned char *b; BUG_ON(!cpu_has_veic && !cpu_has_vint); + BUG_ON((n < 0) && (n > 9)); if (addr == NULL) { handler = (unsigned long) do_default_vi; srs = 0; } else handler = (unsigned long) addr; - vi_handlers[n] = (unsigned long) addr; + vi_handlers[n] = handler; b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); @@ -1437,9 +1537,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) if (srs == 0) { /* * If no shadow set is selected then use the default handler - * that does normal register saving and a standard interrupt exit + * that does normal register saving and standard interrupt exit */ - extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; extern char rollback_except_vec_vi; @@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * Status.IM bit to be masked before going there. */ extern char except_vec_vi_mori; +#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) + const int mori_offset = &except_vec_vi_mori - vec_start + 2; +#else const int mori_offset = &except_vec_vi_mori - vec_start; +#endif #endif /* CONFIG_MIPS_MT_SMTC */ - const int handler_len = &except_vec_vi_end - vec_start; +#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) + const int lui_offset = &except_vec_vi_lui - vec_start + 2; + const int ori_offset = &except_vec_vi_ori - vec_start + 2; +#else const int lui_offset = &except_vec_vi_lui - vec_start; const int ori_offset = &except_vec_vi_ori - vec_start; +#endif + const int handler_len = &except_vec_vi_end - vec_start; if (handler_len > VECTORSPACING) { /* @@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) panic("VECTORSPACING too small"); } - memcpy(b, vec_start, handler_len); + set_handler(((unsigned long)b - ebase), vec_start, +#ifdef CONFIG_CPU_MICROMIPS + (handler_len - 1)); +#else + handler_len); +#endif #ifdef CONFIG_MIPS_MT_SMTC BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ - w = (u32 *)(b + mori_offset); - *w = (*w & 0xffff0000) | (0x100 << n); + h = (u16 *)(b + mori_offset); + *h = (0x100 << n); #endif /* CONFIG_MIPS_MT_SMTC */ - w = (u32 *)(b + lui_offset); - *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); - w = (u32 *)(b + ori_offset); - *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); + h = (u16 *)(b + lui_offset); + *h = (handler >> 16) & 0xffff; + h = (u16 *)(b + ori_offset); + *h = (handler & 0xffff); local_flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); } else { /* - * In other cases jump directly to the interrupt handler - * - * It is the handlers responsibility to save registers if required - * (eg hi/lo) and return from the exception using "eret" + * In other cases jump directly to the interrupt handler. It + * is the handler's responsibility to save registers if required + * (eg hi/lo) and return from the exception using "eret". */ - w = (u32 *)b; - *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ - *w = 0; + u32 insn; + + h = (u16 *)b; + /* j handler */ +#ifdef CONFIG_CPU_MICROMIPS + insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); +#else + insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); +#endif + h[0] = (insn >> 16) & 0xffff; + h[1] = insn & 0xffff; + h[2] = 0; + h[3] = 0; local_flush_icache_range((unsigned long)b, (unsigned long)(b+8)); } @@ -1534,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) unsigned int cpu = smp_processor_id(); unsigned int status_set = ST0_CU0; unsigned int hwrena = cpu_hwrena_impl_bits; + unsigned long asid = 0; #ifdef CONFIG_MIPS_MT_SMTC int secondaryTC = 0; int bootTC = (cpu == 0); @@ -1617,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) } #endif /* CONFIG_MIPS_MT_SMTC */ - if (!cpu_data[cpu].asid_cache) - cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; + asid = ASID_FIRST_VERSION; + cpu_data[cpu].asid_cache = asid; + TLBMISS_HANDLER_SETUP(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; @@ -1648,7 +1772,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) /* Install CPU exception handler */ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) { +#ifdef CONFIG_CPU_MICROMIPS + memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); +#else memcpy((void *)(ebase + offset), addr, size); +#endif local_flush_icache_range(ebase + offset, ebase + offset + size); } @@ -1682,8 +1810,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt); void __init trap_init(void) { - extern char except_vec3_generic, except_vec3_r4000; + extern char except_vec3_generic; extern char except_vec4; + extern char except_vec3_r4000; unsigned long i; int rollback; @@ -1700,7 +1829,12 @@ void __init trap_init(void) ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); } else { - ebase = CKSEG0; +#ifdef CONFIG_KVM_GUEST +#define KVM_GUEST_KSEG0 0x40000000 + ebase = KVM_GUEST_KSEG0; +#else + ebase = CKSEG0; +#endif if (cpu_has_mips_r2) ebase += (read_c0_ebase() & 0x3ffff000); } @@ -1816,11 +1950,11 @@ void __init trap_init(void) if (cpu_has_vce) /* Special exception: R4[04]00 uses also the divec space. */ - memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); + set_handler(0x180, &except_vec3_r4000, 0x100); else if (cpu_has_4kex) - memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); + set_handler(0x180, &except_vec3_generic, 0x80); else - memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); + set_handler(0x080, &except_vec3_generic, 0x80); local_flush_icache_range(ebase, ebase + 0x400); flush_tlb_handlers(); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 6087a54c86a0..203d8857070d 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -83,8 +83,12 @@ #include <asm/branch.h> #include <asm/byteorder.h> #include <asm/cop2.h> +#include <asm/fpu.h> +#include <asm/fpu_emulator.h> #include <asm/inst.h> #include <asm/uaccess.h> +#include <asm/fpu.h> +#include <asm/fpu_emulator.h> #define STR(x) __STR(x) #define __STR(x) #x @@ -102,12 +106,332 @@ static u32 unaligned_action; #endif extern void show_registers(struct pt_regs *regs); +#ifdef __BIG_ENDIAN +#define LoadHW(addr, value, res) \ + __asm__ __volatile__ (".set\tnoat\n" \ + "1:\tlb\t%0, 0(%2)\n" \ + "2:\tlbu\t$1, 1(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\t.set\tat\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, (%2)\n" \ + "2:\tlwr\t%0, 3(%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadHWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tlbu\t%0, 0(%2)\n" \ + "2:\tlbu\t$1, 1(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".set\tat\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, (%2)\n" \ + "2:\tlwr\t%0, 3(%2)\n\t" \ + "dsll\t%0, %0, 32\n\t" \ + "dsrl\t%0, %0, 32\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tldl\t%0, (%2)\n" \ + "2:\tldr\t%0, 7(%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define StoreHW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tsb\t%1, 1(%2)\n\t" \ + "srl\t$1, %1, 0x8\n" \ + "2:\tsb\t$1, 0(%2)\n\t" \ + ".set\tat\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tswl\t%1,(%2)\n" \ + "2:\tswr\t%1, 3(%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tsdl\t%1,(%2)\n" \ + "2:\tsdr\t%1, 7(%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); +#endif + +#ifdef __LITTLE_ENDIAN +#define LoadHW(addr, value, res) \ + __asm__ __volatile__ (".set\tnoat\n" \ + "1:\tlb\t%0, 1(%2)\n" \ + "2:\tlbu\t$1, 0(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\t.set\tat\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, 3(%2)\n" \ + "2:\tlwr\t%0, (%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadHWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tlbu\t%0, 1(%2)\n" \ + "2:\tlbu\t$1, 0(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".set\tat\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, 3(%2)\n" \ + "2:\tlwr\t%0, (%2)\n\t" \ + "dsll\t%0, %0, 32\n\t" \ + "dsrl\t%0, %0, 32\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tldl\t%0, 7(%2)\n" \ + "2:\tldr\t%0, (%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define StoreHW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tsb\t%1, 0(%2)\n\t" \ + "srl\t$1,%1, 0x8\n" \ + "2:\tsb\t$1, 1(%2)\n\t" \ + ".set\tat\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tswl\t%1, 3(%2)\n" \ + "2:\tswr\t%1, (%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tsdl\t%1, 7(%2)\n" \ + "2:\tsdr\t%1, (%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); +#endif + static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { union mips_instruction insn; unsigned long value; unsigned int res; + unsigned long origpc; + unsigned long orig31; + void __user *fault_addr = NULL; + + origpc = (unsigned long)pc; + orig31 = regs->regs[31]; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); @@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs, __get_user(insn.word, pc); switch (insn.i_format.opcode) { - /* - * These are instructions that a compiler doesn't generate. We - * can assume therefore that the code is MIPS-aware and - * really buggy. Emulating these instructions would break the - * semantics anyway. - */ + /* + * These are instructions that a compiler doesn't generate. We + * can assume therefore that the code is MIPS-aware and + * really buggy. Emulating these instructions would break the + * semantics anyway. + */ case ll_op: case lld_op: case sc_op: case scd_op: - /* - * For these instructions the only way to create an address - * error is an attempted access to kernel/supervisor address - * space. - */ + /* + * For these instructions the only way to create an address + * error is an attempted access to kernel/supervisor address + * space. + */ case ldl_op: case ldr_op: case lwl_op: @@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sb_op: goto sigbus; - /* - * The remaining opcodes are the ones that are really of interest. - */ + /* + * The remaining opcodes are the ones that are really of + * interest. + */ case lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - __asm__ __volatile__ (".set\tnoat\n" -#ifdef __BIG_ENDIAN - "1:\tlb\t%0, 0(%2)\n" - "2:\tlbu\t$1, 1(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlb\t%0, 1(%2)\n" - "2:\tlbu\t$1, 0(%2)\n\t" -#endif - "sll\t%0, 0x8\n\t" - "or\t%0, $1\n\t" - "li\t%1, 0\n" - "3:\t.set\tat\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadHW(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tlwl\t%0, (%2)\n" - "2:\tlwr\t%0, 3(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlwl\t%0, 3(%2)\n" - "2:\tlwr\t%0, (%2)\n\t" -#endif - "li\t%1, 0\n" - "3:\t.section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadW(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - __asm__ __volatile__ ( - ".set\tnoat\n" -#ifdef __BIG_ENDIAN - "1:\tlbu\t%0, 0(%2)\n" - "2:\tlbu\t$1, 1(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlbu\t%0, 1(%2)\n" - "2:\tlbu\t$1, 0(%2)\n\t" -#endif - "sll\t%0, 0x8\n\t" - "or\t%0, $1\n\t" - "li\t%1, 0\n" - "3:\t.set\tat\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadHWU(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tlwl\t%0, (%2)\n" - "2:\tlwr\t%0, 3(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlwl\t%0, 3(%2)\n" - "2:\tlwr\t%0, (%2)\n\t" -#endif - "dsll\t%0, %0, 32\n\t" - "dsrl\t%0, %0, 32\n\t" - "li\t%1, 0\n" - "3:\t.section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadWU(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tldl\t%0, (%2)\n" - "2:\tldr\t%0, 7(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tldl\t%0, 7(%2)\n" - "2:\tldr\t%0, (%2)\n\t" -#endif - "li\t%1, 0\n" - "3:\t.section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadDW(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, addr, 2)) goto sigbus; + compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - ".set\tnoat\n" - "1:\tsb\t%1, 1(%2)\n\t" - "srl\t$1, %1, 0x8\n" - "2:\tsb\t$1, 0(%2)\n\t" - ".set\tat\n\t" -#endif -#ifdef __LITTLE_ENDIAN - ".set\tnoat\n" - "1:\tsb\t%1, 0(%2)\n\t" - "srl\t$1,%1, 0x8\n" - "2:\tsb\t$1, 1(%2)\n\t" - ".set\tat\n\t" -#endif - "li\t%0, 0\n" - "3:\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%0, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=r" (res) - : "r" (value), "r" (addr), "i" (-EFAULT)); + StoreHW(addr, value, res); if (res) goto fault; - compute_return_epc(regs); break; case sw_op: if (!access_ok(VERIFY_WRITE, addr, 4)) goto sigbus; + compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tswl\t%1,(%2)\n" - "2:\tswr\t%1, 3(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tswl\t%1, 3(%2)\n" - "2:\tswr\t%1, (%2)\n\t" -#endif - "li\t%0, 0\n" - "3:\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%0, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=r" (res) - : "r" (value), "r" (addr), "i" (-EFAULT)); + StoreW(addr, value, res); if (res) goto fault; - compute_return_epc(regs); break; case sd_op: @@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; + compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tsdl\t%1,(%2)\n" - "2:\tsdr\t%1, 7(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tsdl\t%1, 7(%2)\n" - "2:\tsdr\t%1, (%2)\n\t" -#endif - "li\t%0, 0\n" - "3:\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%0, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=r" (res) - : "r" (value), "r" (addr), "i" (-EFAULT)); + StoreDW(addr, value, res); if (res) goto fault; - compute_return_epc(regs); break; #endif /* CONFIG_64BIT */ @@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs, case ldc1_op: case swc1_op: case sdc1_op: - /* - * I herewith declare: this does not happen. So send SIGBUS. - */ - goto sigbus; + die_if_kernel("Unaligned FP access in kernel code", regs); + BUG_ON(!used_math()); + BUG_ON(!is_fpu_owner()); + + lose_fpu(1); /* Save FPU state for the emulator. */ + res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + own_fpu(1); /* Restore FPU state. */ + + /* Signal if something went wrong. */ + process_fpemu_return(res, fault_addr); + + if (res == 0) + break; + return; /* * COP2 is available to implementor for application specific use. @@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs, return; fault: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; /* Did we have an exception handler installed? */ if (fixup_exception(regs)) return; @@ -504,10 +673,881 @@ sigbus: return; sigill: - die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); + die_if_kernel + ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } +/* Recode table from 16-bit register notation to 32-bit GPR. */ +const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + +/* Recode table from 16-bit STORE register notation to 32-bit GPR. */ +const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; + +void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) +{ + unsigned long value; + unsigned int res; + int i; + unsigned int reg = 0, rvar; + unsigned long orig31; + u16 __user *pc16; + u16 halfword; + unsigned int word; + unsigned long origpc, contpc; + union mips_instruction insn; + struct mm_decoded_insn mminsn; + void __user *fault_addr = NULL; + + origpc = regs->cp0_epc; + orig31 = regs->regs[31]; + + mminsn.micro_mips_mode = 1; + + /* + * This load never faults. + */ + pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 2; + word = ((unsigned int)halfword << 16); + mminsn.pc_inc = 2; + + if (!mm_insn_16bit(halfword)) { + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 4; + mminsn.pc_inc = 4; + word |= halfword; + } + mminsn.insn = word; + + if (get_user(halfword, pc16)) + goto fault; + mminsn.next_pc_inc = 2; + word = ((unsigned int)halfword << 16); + + if (!mm_insn_16bit(halfword)) { + pc16++; + if (get_user(halfword, pc16)) + goto fault; + mminsn.next_pc_inc = 4; + word |= halfword; + } + mminsn.next_insn = word; + + insn = (union mips_instruction)(mminsn.insn); + if (mm_isBranchInstr(regs, mminsn, &contpc)) + insn = (union mips_instruction)(mminsn.next_insn); + + /* Parse instruction to find what to do */ + + switch (insn.mm_i_format.opcode) { + + case mm_pool32a_op: + switch (insn.mm_x_format.func) { + case mm_lwxs_op: + reg = insn.mm_x_format.rd; + goto loadW; + } + + goto sigbus; + + case mm_pool32b_op: + switch (insn.mm_m_format.func) { + case mm_lwp_func: + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + addr += 4; + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[reg + 1] = value; + goto success; + + case mm_swp_func: + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + value = regs->regs[reg]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + value = regs->regs[reg + 1]; + StoreW(addr, value, res); + if (res) + goto fault; + goto success; + + case mm_ldp_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_READ, addr, 16)) + goto sigbus; + + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + addr += 8; + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[reg + 1] = value; + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + case mm_sdp_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_WRITE, addr, 16)) + goto sigbus; + + value = regs->regs[reg]; + StoreDW(addr, value, res); + if (res) + goto fault; + addr += 8; + value = regs->regs[reg + 1]; + StoreDW(addr, value, res); + if (res) + goto fault; + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + case mm_lwm32_func: + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_READ, addr, 4 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_READ, addr, 4 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + for (i = 16; rvar; rvar--, i++) { + LoadW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[i] = value; + } + if ((reg & 0xf) == 9) { + LoadW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[30] = value; + } + if (reg & 0x10) { + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[31] = value; + } + goto success; + + case mm_swm32_func: + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_WRITE, addr, 4 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + for (i = 16; rvar; rvar--, i++) { + value = regs->regs[i]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + } + if ((reg & 0xf) == 9) { + value = regs->regs[30]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + } + if (reg & 0x10) { + value = regs->regs[31]; + StoreW(addr, value, res); + if (res) + goto fault; + } + goto success; + + case mm_ldm_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_READ, addr, 8 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_READ, addr, 8 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + + for (i = 16; rvar; rvar--, i++) { + LoadDW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[i] = value; + } + if ((reg & 0xf) == 9) { + LoadDW(addr, value, res); + if (res) + goto fault; + addr += 8; + regs->regs[30] = value; + } + if (reg & 0x10) { + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[31] = value; + } + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + case mm_sdm_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_WRITE, addr, 8 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + + for (i = 16; rvar; rvar--, i++) { + value = regs->regs[i]; + StoreDW(addr, value, res); + if (res) + goto fault; + addr += 8; + } + if ((reg & 0xf) == 9) { + value = regs->regs[30]; + StoreDW(addr, value, res); + if (res) + goto fault; + addr += 8; + } + if (reg & 0x10) { + value = regs->regs[31]; + StoreDW(addr, value, res); + if (res) + goto fault; + } + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + /* LWC2, SWC2, LDC2, SDC2 are not serviced */ + } + + goto sigbus; + + case mm_pool32c_op: + switch (insn.mm_m_format.func) { + case mm_lwu_func: + reg = insn.mm_m_format.rd; + goto loadWU; + } + + /* LL,SC,LLD,SCD are not serviced */ + goto sigbus; + + case mm_pool32f_op: + switch (insn.mm_x_format.func) { + case mm_lwxc1_func: + case mm_swxc1_func: + case mm_ldxc1_func: + case mm_sdxc1_func: + goto fpu_emul; + } + + goto sigbus; + + case mm_ldc132_op: + case mm_sdc132_op: + case mm_lwc132_op: + case mm_swc132_op: +fpu_emul: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; + + die_if_kernel("Unaligned FP access in kernel code", regs); + BUG_ON(!used_math()); + BUG_ON(!is_fpu_owner()); + + lose_fpu(1); /* save the FPU state for the emulator */ + res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + own_fpu(1); /* restore FPU state */ + + /* If something went wrong, signal */ + process_fpemu_return(res, fault_addr); + + if (res == 0) + goto success; + return; + + case mm_lh32_op: + reg = insn.mm_i_format.rt; + goto loadHW; + + case mm_lhu32_op: + reg = insn.mm_i_format.rt; + goto loadHWU; + + case mm_lw32_op: + reg = insn.mm_i_format.rt; + goto loadW; + + case mm_sh32_op: + reg = insn.mm_i_format.rt; + goto storeHW; + + case mm_sw32_op: + reg = insn.mm_i_format.rt; + goto storeW; + + case mm_ld32_op: + reg = insn.mm_i_format.rt; + goto loadDW; + + case mm_sd32_op: + reg = insn.mm_i_format.rt; + goto storeDW; + + case mm_pool16c_op: + switch (insn.mm16_m_format.func) { + case mm_lwm16_op: + reg = insn.mm16_m_format.rlist; + rvar = reg + 1; + if (!access_ok(VERIFY_READ, addr, 4 * rvar)) + goto sigbus; + + for (i = 16; rvar; rvar--, i++) { + LoadW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[i] = value; + } + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[31] = value; + + goto success; + + case mm_swm16_op: + reg = insn.mm16_m_format.rlist; + rvar = reg + 1; + if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) + goto sigbus; + + for (i = 16; rvar; rvar--, i++) { + value = regs->regs[i]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + } + value = regs->regs[31]; + StoreW(addr, value, res); + if (res) + goto fault; + + goto success; + + } + + goto sigbus; + + case mm_lhu16_op: + reg = reg16to32[insn.mm16_rb_format.rt]; + goto loadHWU; + + case mm_lw16_op: + reg = reg16to32[insn.mm16_rb_format.rt]; + goto loadW; + + case mm_sh16_op: + reg = reg16to32st[insn.mm16_rb_format.rt]; + goto storeHW; + + case mm_sw16_op: + reg = reg16to32st[insn.mm16_rb_format.rt]; + goto storeW; + + case mm_lwsp16_op: + reg = insn.mm16_r5_format.rt; + goto loadW; + + case mm_swsp16_op: + reg = insn.mm16_r5_format.rt; + goto storeW; + + case mm_lwgp16_op: + reg = reg16to32[insn.mm16_r3_format.rt]; + goto loadW; + + default: + goto sigill; + } + +loadHW: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; + +loadHWU: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHWU(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; + +loadW: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; + +loadWU: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadWU(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + +loadDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + +storeHW: + if (!access_ok(VERIFY_WRITE, addr, 2)) + goto sigbus; + + value = regs->regs[reg]; + StoreHW(addr, value, res); + if (res) + goto fault; + goto success; + +storeW: + if (!access_ok(VERIFY_WRITE, addr, 4)) + goto sigbus; + + value = regs->regs[reg]; + StoreW(addr, value, res); + if (res) + goto fault; + goto success; + +storeDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + value = regs->regs[reg]; + StoreDW(addr, value, res); + if (res) + goto fault; + goto success; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + +success: + regs->cp0_epc = contpc; /* advance or branch */ + +#ifdef CONFIG_DEBUG_FS + unaligned_instructions++; +#endif + return; + +fault: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; + /* Did we have an exception handler installed? */ + if (fixup_exception(regs)) + return; + + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV, current); + + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS, current); + + return; + +sigill: + die_if_kernel + ("Unhandled kernel unaligned access or invalid instruction", regs); + force_sig(SIGILL, current); +} + +static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) +{ + unsigned long value; + unsigned int res; + int reg; + unsigned long orig31; + u16 __user *pc16; + unsigned long origpc; + union mips16e_instruction mips16inst, oldinst; + + origpc = regs->cp0_epc; + orig31 = regs->regs[31]; + pc16 = (unsigned short __user *)msk_isa16_mode(origpc); + /* + * This load never faults. + */ + __get_user(mips16inst.full, pc16); + oldinst = mips16inst; + + /* skip EXTEND instruction */ + if (mips16inst.ri.opcode == MIPS16e_extend_op) { + pc16++; + __get_user(mips16inst.full, pc16); + } else if (delay_slot(regs)) { + /* skip jump instructions */ + /* JAL/JALX are 32 bits but have OPCODE in first short int */ + if (mips16inst.ri.opcode == MIPS16e_jal_op) + pc16++; + pc16++; + if (get_user(mips16inst.full, pc16)) + goto sigbus; + } + + switch (mips16inst.ri.opcode) { + case MIPS16e_i64_op: /* I64 or RI64 instruction */ + switch (mips16inst.i64.func) { /* I64/RI64 func field check */ + case MIPS16e_ldpc_func: + case MIPS16e_ldsp_func: + reg = reg16to32[mips16inst.ri64.ry]; + goto loadDW; + + case MIPS16e_sdsp_func: + reg = reg16to32[mips16inst.ri64.ry]; + goto writeDW; + + case MIPS16e_sdrasp_func: + reg = 29; /* GPRSP */ + goto writeDW; + } + + goto sigbus; + + case MIPS16e_swsp_op: + case MIPS16e_lwpc_op: + case MIPS16e_lwsp_op: + reg = reg16to32[mips16inst.ri.rx]; + break; + + case MIPS16e_i8_op: + if (mips16inst.i8.func != MIPS16e_swrasp_func) + goto sigbus; + reg = 29; /* GPRSP */ + break; + + default: + reg = reg16to32[mips16inst.rri.ry]; + break; + } + + switch (mips16inst.ri.opcode) { + + case MIPS16e_lb_op: + case MIPS16e_lbu_op: + case MIPS16e_sb_op: + goto sigbus; + + case MIPS16e_lh_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHW(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; + + case MIPS16e_lhu_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHWU(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; + + case MIPS16e_lw_op: + case MIPS16e_lwpc_op: + case MIPS16e_lwsp_op: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadW(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; + + case MIPS16e_lwu_op: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadWU(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case MIPS16e_ld_op: +loadDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + LoadDW(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case MIPS16e_sh_op: + if (!access_ok(VERIFY_WRITE, addr, 2)) + goto sigbus; + + MIPS16e_compute_return_epc(regs, &oldinst); + value = regs->regs[reg]; + StoreHW(addr, value, res); + if (res) + goto fault; + break; + + case MIPS16e_sw_op: + case MIPS16e_swsp_op: + case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ + if (!access_ok(VERIFY_WRITE, addr, 4)) + goto sigbus; + + MIPS16e_compute_return_epc(regs, &oldinst); + value = regs->regs[reg]; + StoreW(addr, value, res); + if (res) + goto fault; + break; + + case MIPS16e_sd_op: +writeDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + MIPS16e_compute_return_epc(regs, &oldinst); + value = regs->regs[reg]; + StoreDW(addr, value, res); + if (res) + goto fault; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + default: + /* + * Pheeee... We encountered an yet unknown instruction or + * cache coherence problem. Die sucker, die ... + */ + goto sigill; + } + +#ifdef CONFIG_DEBUG_FS + unaligned_instructions++; +#endif + + return; + +fault: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; + /* Did we have an exception handler installed? */ + if (fixup_exception(regs)) + return; + + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV, current); + + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS, current); + + return; + +sigill: + die_if_kernel + ("Unhandled kernel unaligned access or invalid instruction", regs); + force_sig(SIGILL, current); +} asmlinkage void do_ade(struct pt_regs *regs) { unsigned int __user *pc; @@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs) 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? - * Or are we running in MIPS16 mode? */ - if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) + if (regs->cp0_badvaddr == regs->cp0_epc) goto sigbus; - pc = (unsigned int __user *) exception_epc(regs); if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; - else if (unaligned_action == UNALIGNED_ACTION_SHOW) - show_registers(regs); /* * Do branch emulation only if we didn't forward the exception. * This is all so but ugly ... */ + + /* + * Are we running in microMIPS mode? + */ + if (get_isa16_mode(regs->cp0_epc)) { + /* + * Did we catch a fault trying to load an instruction in + * 16-bit mode? + */ + if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) + goto sigbus; + if (unaligned_action == UNALIGNED_ACTION_SHOW) + show_registers(regs); + + if (cpu_has_mmips) { + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + emulate_load_store_microMIPS(regs, + (void __user *)regs->cp0_badvaddr); + set_fs(seg); + + return; + } + + if (cpu_has_mips16) { + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + emulate_load_store_MIPS16e(regs, + (void __user *)regs->cp0_badvaddr); + set_fs(seg); + + return; + } + + goto sigbus; + } + + if (unaligned_action == UNALIGNED_ACTION_SHOW) + show_registers(regs); + pc = (unsigned int __user *)exception_epc(regs); + seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt new file mode 100644 index 000000000000..51617e481aa3 --- /dev/null +++ b/arch/mips/kvm/00README.txt @@ -0,0 +1,31 @@ +KVM/MIPS Trap & Emulate Release Notes +===================================== + +(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms: + Malta Board with FPGA based 34K + Sigma Designs TangoX board with a 24K based 8654 SoC. + Malta Board with 74K @ 1GHz + +(2) Both Guest kernel and Guest Userspace execute in UM. + Guest User address space: 0x00000000 -> 0x40000000 + Guest Kernel Unmapped: 0x40000000 -> 0x60000000 + Guest Kernel Mapped: 0x60000000 -> 0x80000000 + + Guest Usermode virtual memory is limited to 1GB. + +(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K. + Note that due to cache aliasing issues, 4K page sizes are NOT supported. + +(3) No HugeTLB Support + Both the host kernel and Guest kernel should have the page size set to 16K. + This will be implemented in a future release. + +(4) KVM/MIPS does not have support for SMP Guests + Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers: + LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared + when we ERET back to the guest. This causes the guest to hang in an infinite loop. + This will be fixed in a future release. + +(5) Use Host FPU + Currently KVM/MIPS emulates a 24K CPU without a FPU. + This will be fixed in a future release diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig new file mode 100644 index 000000000000..2c15590e55f7 --- /dev/null +++ b/arch/mips/kvm/Kconfig @@ -0,0 +1,49 @@ +# +# KVM configuration +# +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + depends on HAVE_KVM + ---help--- + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on HAVE_KVM + select PREEMPT_NOTIFIERS + select ANON_INODES + select KVM_MMIO + ---help--- + Support for hosting Guest kernels. + Currently supported on MIPS32 processors. + +config KVM_MIPS_DYN_TRANS + bool "KVM/MIPS: Dynamic binary translation to reduce traps" + depends on KVM + ---help--- + When running in Trap & Emulate mode patch privileged + instructions to reduce the number of traps. + + If unsure, say Y. + +config KVM_MIPS_DEBUG_COP0_COUNTERS + bool "Maintain counters for COP0 accesses" + depends on KVM + ---help--- + Maintain statistics for Guest COP0 accesses. + A histogram of COP0 accesses is printed when the VM is + shutdown. + + If unsure, say N. + +source drivers/vhost/Kconfig + +endif # VIRTUALIZATION diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile new file mode 100644 index 000000000000..78d87bbc99db --- /dev/null +++ b/arch/mips/kvm/Makefile @@ -0,0 +1,13 @@ +# Makefile for KVM support for MIPS +# + +common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) + +EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm + +kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ + kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ + kvm_mips_dyntrans.o kvm_trap_emul.o + +obj-$(CONFIG_KVM) += kvm.o +obj-y += kvm_cb.o kvm_tlb.o diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c new file mode 100644 index 000000000000..313c2e37b978 --- /dev/null +++ b/arch/mips/kvm/kvm_cb.c @@ -0,0 +1,14 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Yann Le Du <ledu@kymasys.com> + */ + +#include <linux/export.h> +#include <linux/kvm_host.h> + +struct kvm_mips_callbacks *kvm_mips_callbacks; +EXPORT_SYMBOL(kvm_mips_callbacks); diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S new file mode 100644 index 000000000000..dca2aa665993 --- /dev/null +++ b/arch/mips/kvm/kvm_locore.S @@ -0,0 +1,650 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Main entry point for the guest, exception handling. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> +#include <asm/mipsregs.h> +#include <asm/stackframe.h> +#include <asm/asm-offsets.h> + + +#define _C_LABEL(x) x +#define MIPSX(name) mips32_ ## name +#define CALLFRAME_SIZ 32 + +/* + * VECTOR + * exception vector entrypoint + */ +#define VECTOR(x, regmask) \ + .ent _C_LABEL(x),0; \ + EXPORT(x); + +#define VECTOR_END(x) \ + EXPORT(x); + +/* Overload, Danger Will Robinson!! */ +#define PT_HOST_ASID PT_BVADDR +#define PT_HOST_USERLOCAL PT_EPC + +#define CP0_DDATA_LO $28,3 +#define CP0_EBASE $15,1 + +#define CP0_INTCTL $12,1 +#define CP0_SRSCTL $12,2 +#define CP0_SRSMAP $12,3 +#define CP0_HWRENA $7,0 + +/* Resume Flags */ +#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ + +#define RESUME_GUEST 0 +#define RESUME_HOST RESUME_FLAG_HOST + +/* + * __kvm_mips_vcpu_run: entry point to the guest + * a0: run + * a1: vcpu + */ + +FEXPORT(__kvm_mips_vcpu_run) + .set push + .set noreorder + .set noat + + /* k0/k1 not being used in host kernel context */ + addiu k1,sp, -PT_SIZE + LONG_S $0, PT_R0(k1) + LONG_S $1, PT_R1(k1) + LONG_S $2, PT_R2(k1) + LONG_S $3, PT_R3(k1) + + LONG_S $4, PT_R4(k1) + LONG_S $5, PT_R5(k1) + LONG_S $6, PT_R6(k1) + LONG_S $7, PT_R7(k1) + + LONG_S $8, PT_R8(k1) + LONG_S $9, PT_R9(k1) + LONG_S $10, PT_R10(k1) + LONG_S $11, PT_R11(k1) + LONG_S $12, PT_R12(k1) + LONG_S $13, PT_R13(k1) + LONG_S $14, PT_R14(k1) + LONG_S $15, PT_R15(k1) + LONG_S $16, PT_R16(k1) + LONG_S $17, PT_R17(k1) + + LONG_S $18, PT_R18(k1) + LONG_S $19, PT_R19(k1) + LONG_S $20, PT_R20(k1) + LONG_S $21, PT_R21(k1) + LONG_S $22, PT_R22(k1) + LONG_S $23, PT_R23(k1) + LONG_S $24, PT_R24(k1) + LONG_S $25, PT_R25(k1) + + /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ + + LONG_S $28, PT_R28(k1) + LONG_S $29, PT_R29(k1) + LONG_S $30, PT_R30(k1) + LONG_S $31, PT_R31(k1) + + /* Save hi/lo */ + mflo v0 + LONG_S v0, PT_LO(k1) + mfhi v1 + LONG_S v1, PT_HI(k1) + + /* Save host status */ + mfc0 v0, CP0_STATUS + LONG_S v0, PT_STATUS(k1) + + /* Save host ASID, shove it into the BVADDR location */ + mfc0 v1,CP0_ENTRYHI + andi v1, 0xff + LONG_S v1, PT_HOST_ASID(k1) + + /* Save DDATA_LO, will be used to store pointer to vcpu */ + mfc0 v1, CP0_DDATA_LO + LONG_S v1, PT_HOST_USERLOCAL(k1) + + /* DDATA_LO has pointer to vcpu */ + mtc0 a1,CP0_DDATA_LO + + /* Offset into vcpu->arch */ + addiu k1, a1, VCPU_HOST_ARCH + + /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */ + LONG_S sp, VCPU_HOST_STACK(k1) + + /* Save the kernel gp as well */ + LONG_S gp, VCPU_HOST_GP(k1) + + /* Setup status register for running the guest in UM, interrupts are disabled */ + li k0,(ST0_EXL | KSU_USER| ST0_BEV) + mtc0 k0,CP0_STATUS + ehb + + /* load up the new EBASE */ + LONG_L k0, VCPU_GUEST_EBASE(k1) + mtc0 k0,CP0_EBASE + + /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was + * but make sure that timer interrupts are enabled + */ + li k0,(ST0_EXL | KSU_USER | ST0_IE) + andi v0, v0, ST0_IM + or k0, k0, v0 + mtc0 k0,CP0_STATUS + ehb + + + /* Set Guest EPC */ + LONG_L t0, VCPU_PC(k1) + mtc0 t0, CP0_EPC + +FEXPORT(__kvm_mips_load_asid) + /* Set the ASID for the Guest Kernel */ + sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ + /* addresses shift to 0x80000000 */ + bltz t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ +1: + /* t1: contains the base of the ASID array, need to get the cpu id */ + LONG_L t2, TI_CPU($28) /* smp_processor_id */ + sll t2, t2, 2 /* x4 */ + addu t3, t1, t2 + LONG_L k0, (t3) + andi k0, k0, 0xff + mtc0 k0,CP0_ENTRYHI + ehb + + /* Disable RDHWR access */ + mtc0 zero, CP0_HWRENA + + /* Now load up the Guest Context from VCPU */ + LONG_L $1, VCPU_R1(k1) + LONG_L $2, VCPU_R2(k1) + LONG_L $3, VCPU_R3(k1) + + LONG_L $4, VCPU_R4(k1) + LONG_L $5, VCPU_R5(k1) + LONG_L $6, VCPU_R6(k1) + LONG_L $7, VCPU_R7(k1) + + LONG_L $8, VCPU_R8(k1) + LONG_L $9, VCPU_R9(k1) + LONG_L $10, VCPU_R10(k1) + LONG_L $11, VCPU_R11(k1) + LONG_L $12, VCPU_R12(k1) + LONG_L $13, VCPU_R13(k1) + LONG_L $14, VCPU_R14(k1) + LONG_L $15, VCPU_R15(k1) + LONG_L $16, VCPU_R16(k1) + LONG_L $17, VCPU_R17(k1) + LONG_L $18, VCPU_R18(k1) + LONG_L $19, VCPU_R19(k1) + LONG_L $20, VCPU_R20(k1) + LONG_L $21, VCPU_R21(k1) + LONG_L $22, VCPU_R22(k1) + LONG_L $23, VCPU_R23(k1) + LONG_L $24, VCPU_R24(k1) + LONG_L $25, VCPU_R25(k1) + + /* k0/k1 loaded up later */ + + LONG_L $28, VCPU_R28(k1) + LONG_L $29, VCPU_R29(k1) + LONG_L $30, VCPU_R30(k1) + LONG_L $31, VCPU_R31(k1) + + /* Restore hi/lo */ + LONG_L k0, VCPU_LO(k1) + mtlo k0 + + LONG_L k0, VCPU_HI(k1) + mthi k0 + +FEXPORT(__kvm_mips_load_k0k1) + /* Restore the guest's k0/k1 registers */ + LONG_L k0, VCPU_R26(k1) + LONG_L k1, VCPU_R27(k1) + + /* Jump to guest */ + eret + .set pop + +VECTOR(MIPSX(exception), unknown) +/* + * Find out what mode we came from and jump to the proper handler. + */ + .set push + .set noat + .set noreorder + mtc0 k0, CP0_ERROREPC #01: Save guest k0 + ehb #02: + + mfc0 k0, CP0_EBASE #02: Get EBASE + srl k0, k0, 10 #03: Get rid of CPUNum + sll k0, k0, 10 #04 + LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 + addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 + j k0 #07: jump to the function + nop #08: branch delay slot + .set push +VECTOR_END(MIPSX(exceptionEnd)) +.end MIPSX(exception) + +/* + * Generic Guest exception handler. We end up here when the guest + * does something that causes a trap to kernel mode. + * + */ +NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) + .set push + .set noat + .set noreorder + + /* Get the VCPU pointer from DDTATA_LO */ + mfc0 k1, CP0_DDATA_LO + addiu k1, k1, VCPU_HOST_ARCH + + /* Start saving Guest context to VCPU */ + LONG_S $0, VCPU_R0(k1) + LONG_S $1, VCPU_R1(k1) + LONG_S $2, VCPU_R2(k1) + LONG_S $3, VCPU_R3(k1) + LONG_S $4, VCPU_R4(k1) + LONG_S $5, VCPU_R5(k1) + LONG_S $6, VCPU_R6(k1) + LONG_S $7, VCPU_R7(k1) + LONG_S $8, VCPU_R8(k1) + LONG_S $9, VCPU_R9(k1) + LONG_S $10, VCPU_R10(k1) + LONG_S $11, VCPU_R11(k1) + LONG_S $12, VCPU_R12(k1) + LONG_S $13, VCPU_R13(k1) + LONG_S $14, VCPU_R14(k1) + LONG_S $15, VCPU_R15(k1) + LONG_S $16, VCPU_R16(k1) + LONG_S $17,VCPU_R17(k1) + LONG_S $18, VCPU_R18(k1) + LONG_S $19, VCPU_R19(k1) + LONG_S $20, VCPU_R20(k1) + LONG_S $21, VCPU_R21(k1) + LONG_S $22, VCPU_R22(k1) + LONG_S $23, VCPU_R23(k1) + LONG_S $24, VCPU_R24(k1) + LONG_S $25, VCPU_R25(k1) + + /* Guest k0/k1 saved later */ + + LONG_S $28, VCPU_R28(k1) + LONG_S $29, VCPU_R29(k1) + LONG_S $30, VCPU_R30(k1) + LONG_S $31, VCPU_R31(k1) + + /* We need to save hi/lo and restore them on + * the way out + */ + mfhi t0 + LONG_S t0, VCPU_HI(k1) + + mflo t0 + LONG_S t0, VCPU_LO(k1) + + /* Finally save guest k0/k1 to VCPU */ + mfc0 t0, CP0_ERROREPC + LONG_S t0, VCPU_R26(k1) + + /* Get GUEST k1 and save it in VCPU */ + la t1, ~0x2ff + mfc0 t0, CP0_EBASE + and t0, t0, t1 + LONG_L t0, 0x3000(t0) + LONG_S t0, VCPU_R27(k1) + + /* Now that context has been saved, we can use other registers */ + + /* Restore vcpu */ + mfc0 a1, CP0_DDATA_LO + move s1, a1 + + /* Restore run (vcpu->run) */ + LONG_L a0, VCPU_RUN(a1) + /* Save pointer to run in s0, will be saved by the compiler */ + move s0, a0 + + + /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */ + mfc0 k0,CP0_EPC + LONG_S k0, VCPU_PC(k1) + + mfc0 k0, CP0_BADVADDR + LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) + + mfc0 k0, CP0_CAUSE + LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) + + mfc0 k0, CP0_ENTRYHI + LONG_S k0, VCPU_HOST_ENTRYHI(k1) + + /* Now restore the host state just enough to run the handlers */ + + /* Swtich EBASE to the one used by Linux */ + /* load up the host EBASE */ + mfc0 v0, CP0_STATUS + + .set at + or k0, v0, ST0_BEV + .set noat + + mtc0 k0, CP0_STATUS + ehb + + LONG_L k0, VCPU_HOST_EBASE(k1) + mtc0 k0,CP0_EBASE + + + /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ + .set at + and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) + or v0, v0, ST0_CU0 + .set noat + mtc0 v0, CP0_STATUS + ehb + + /* Load up host GP */ + LONG_L gp, VCPU_HOST_GP(k1) + + /* Need a stack before we can jump to "C" */ + LONG_L sp, VCPU_HOST_STACK(k1) + + /* Saved host state */ + addiu sp,sp, -PT_SIZE + + /* XXXKYMA do we need to load the host ASID, maybe not because the + * kernel entries are marked GLOBAL, need to verify + */ + + /* Restore host DDATA_LO */ + LONG_L k0, PT_HOST_USERLOCAL(sp) + mtc0 k0, CP0_DDATA_LO + + /* Restore RDHWR access */ + la k0, 0x2000000F + mtc0 k0, CP0_HWRENA + + /* Jump to handler */ +FEXPORT(__kvm_mips_jump_to_handler) + /* XXXKYMA: not sure if this is safe, how large is the stack?? */ + /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */ + la t9,kvm_mips_handle_exit + jalr.hb t9 + addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */ + + /* Return from handler Make sure interrupts are disabled */ + di + ehb + + /* XXXKYMA: k0/k1 could have been blown away if we processed an exception + * while we were handling the exception from the guest, reload k1 + */ + move k1, s1 + addiu k1, k1, VCPU_HOST_ARCH + + /* Check return value, should tell us if we are returning to the host (handle I/O etc) + * or resuming the guest + */ + andi t0, v0, RESUME_HOST + bnez t0, __kvm_mips_return_to_host + nop + +__kvm_mips_return_to_guest: + /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ + mtc0 s1, CP0_DDATA_LO + + /* Load up the Guest EBASE to minimize the window where BEV is set */ + LONG_L t0, VCPU_GUEST_EBASE(k1) + + /* Switch EBASE back to the one used by KVM */ + mfc0 v1, CP0_STATUS + .set at + or k0, v1, ST0_BEV + .set noat + mtc0 k0, CP0_STATUS + ehb + mtc0 t0,CP0_EBASE + + /* Setup status register for running guest in UM */ + .set at + or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) + and v1, v1, ~ST0_CU0 + .set noat + mtc0 v1, CP0_STATUS + ehb + + + /* Set Guest EPC */ + LONG_L t0, VCPU_PC(k1) + mtc0 t0, CP0_EPC + + /* Set the ASID for the Guest Kernel */ + sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ + /* addresses shift to 0x80000000 */ + bltz t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ +1: + /* t1: contains the base of the ASID array, need to get the cpu id */ + LONG_L t2, TI_CPU($28) /* smp_processor_id */ + sll t2, t2, 2 /* x4 */ + addu t3, t1, t2 + LONG_L k0, (t3) + andi k0, k0, 0xff + mtc0 k0,CP0_ENTRYHI + ehb + + /* Disable RDHWR access */ + mtc0 zero, CP0_HWRENA + + /* load the guest context from VCPU and return */ + LONG_L $0, VCPU_R0(k1) + LONG_L $1, VCPU_R1(k1) + LONG_L $2, VCPU_R2(k1) + LONG_L $3, VCPU_R3(k1) + LONG_L $4, VCPU_R4(k1) + LONG_L $5, VCPU_R5(k1) + LONG_L $6, VCPU_R6(k1) + LONG_L $7, VCPU_R7(k1) + LONG_L $8, VCPU_R8(k1) + LONG_L $9, VCPU_R9(k1) + LONG_L $10, VCPU_R10(k1) + LONG_L $11, VCPU_R11(k1) + LONG_L $12, VCPU_R12(k1) + LONG_L $13, VCPU_R13(k1) + LONG_L $14, VCPU_R14(k1) + LONG_L $15, VCPU_R15(k1) + LONG_L $16, VCPU_R16(k1) + LONG_L $17, VCPU_R17(k1) + LONG_L $18, VCPU_R18(k1) + LONG_L $19, VCPU_R19(k1) + LONG_L $20, VCPU_R20(k1) + LONG_L $21, VCPU_R21(k1) + LONG_L $22, VCPU_R22(k1) + LONG_L $23, VCPU_R23(k1) + LONG_L $24, VCPU_R24(k1) + LONG_L $25, VCPU_R25(k1) + + /* $/k1 loaded later */ + LONG_L $28, VCPU_R28(k1) + LONG_L $29, VCPU_R29(k1) + LONG_L $30, VCPU_R30(k1) + LONG_L $31, VCPU_R31(k1) + +FEXPORT(__kvm_mips_skip_guest_restore) + LONG_L k0, VCPU_HI(k1) + mthi k0 + + LONG_L k0, VCPU_LO(k1) + mtlo k0 + + LONG_L k0, VCPU_R26(k1) + LONG_L k1, VCPU_R27(k1) + + eret + +__kvm_mips_return_to_host: + /* EBASE is already pointing to Linux */ + LONG_L k1, VCPU_HOST_STACK(k1) + addiu k1,k1, -PT_SIZE + + /* Restore host DDATA_LO */ + LONG_L k0, PT_HOST_USERLOCAL(k1) + mtc0 k0, CP0_DDATA_LO + + /* Restore host ASID */ + LONG_L k0, PT_HOST_ASID(sp) + andi k0, 0xff + mtc0 k0,CP0_ENTRYHI + ehb + + /* Load context saved on the host stack */ + LONG_L $0, PT_R0(k1) + LONG_L $1, PT_R1(k1) + + /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */ + sra k0, v0, 2 + move $2, k0 + + LONG_L $3, PT_R3(k1) + LONG_L $4, PT_R4(k1) + LONG_L $5, PT_R5(k1) + LONG_L $6, PT_R6(k1) + LONG_L $7, PT_R7(k1) + LONG_L $8, PT_R8(k1) + LONG_L $9, PT_R9(k1) + LONG_L $10, PT_R10(k1) + LONG_L $11, PT_R11(k1) + LONG_L $12, PT_R12(k1) + LONG_L $13, PT_R13(k1) + LONG_L $14, PT_R14(k1) + LONG_L $15, PT_R15(k1) + LONG_L $16, PT_R16(k1) + LONG_L $17, PT_R17(k1) + LONG_L $18, PT_R18(k1) + LONG_L $19, PT_R19(k1) + LONG_L $20, PT_R20(k1) + LONG_L $21, PT_R21(k1) + LONG_L $22, PT_R22(k1) + LONG_L $23, PT_R23(k1) + LONG_L $24, PT_R24(k1) + LONG_L $25, PT_R25(k1) + + /* Host k0/k1 were not saved */ + + LONG_L $28, PT_R28(k1) + LONG_L $29, PT_R29(k1) + LONG_L $30, PT_R30(k1) + + LONG_L k0, PT_HI(k1) + mthi k0 + + LONG_L k0, PT_LO(k1) + mtlo k0 + + /* Restore RDHWR access */ + la k0, 0x2000000F + mtc0 k0, CP0_HWRENA + + + /* Restore RA, which is the address we will return to */ + LONG_L ra, PT_R31(k1) + j ra + nop + + .set pop +VECTOR_END(MIPSX(GuestExceptionEnd)) +.end MIPSX(GuestException) + +MIPSX(exceptions): + #### + ##### The exception handlers. + ##### + .word _C_LABEL(MIPSX(GuestException)) # 0 + .word _C_LABEL(MIPSX(GuestException)) # 1 + .word _C_LABEL(MIPSX(GuestException)) # 2 + .word _C_LABEL(MIPSX(GuestException)) # 3 + .word _C_LABEL(MIPSX(GuestException)) # 4 + .word _C_LABEL(MIPSX(GuestException)) # 5 + .word _C_LABEL(MIPSX(GuestException)) # 6 + .word _C_LABEL(MIPSX(GuestException)) # 7 + .word _C_LABEL(MIPSX(GuestException)) # 8 + .word _C_LABEL(MIPSX(GuestException)) # 9 + .word _C_LABEL(MIPSX(GuestException)) # 10 + .word _C_LABEL(MIPSX(GuestException)) # 11 + .word _C_LABEL(MIPSX(GuestException)) # 12 + .word _C_LABEL(MIPSX(GuestException)) # 13 + .word _C_LABEL(MIPSX(GuestException)) # 14 + .word _C_LABEL(MIPSX(GuestException)) # 15 + .word _C_LABEL(MIPSX(GuestException)) # 16 + .word _C_LABEL(MIPSX(GuestException)) # 17 + .word _C_LABEL(MIPSX(GuestException)) # 18 + .word _C_LABEL(MIPSX(GuestException)) # 19 + .word _C_LABEL(MIPSX(GuestException)) # 20 + .word _C_LABEL(MIPSX(GuestException)) # 21 + .word _C_LABEL(MIPSX(GuestException)) # 22 + .word _C_LABEL(MIPSX(GuestException)) # 23 + .word _C_LABEL(MIPSX(GuestException)) # 24 + .word _C_LABEL(MIPSX(GuestException)) # 25 + .word _C_LABEL(MIPSX(GuestException)) # 26 + .word _C_LABEL(MIPSX(GuestException)) # 27 + .word _C_LABEL(MIPSX(GuestException)) # 28 + .word _C_LABEL(MIPSX(GuestException)) # 29 + .word _C_LABEL(MIPSX(GuestException)) # 30 + .word _C_LABEL(MIPSX(GuestException)) # 31 + + +/* This routine makes changes to the instruction stream effective to the hardware. + * It should be called after the instruction stream is written. + * On return, the new instructions are effective. + * Inputs: + * a0 = Start address of new instruction stream + * a1 = Size, in bytes, of new instruction stream + */ + +#define HW_SYNCI_Step $1 +LEAF(MIPSX(SyncICache)) + .set push + .set mips32r2 + beq a1, zero, 20f + nop + addu a1, a0, a1 + rdhwr v0, HW_SYNCI_Step + beq v0, zero, 20f + nop + +10: + synci 0(a0) + addu a0, a0, v0 + sltu v1, a0, a1 + bne v1, zero, 10b + nop + sync +20: + jr.hb ra + nop + .set pop +END(MIPSX(SyncICache)) diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c new file mode 100644 index 000000000000..e0dad0289797 --- /dev/null +++ b/arch/mips/kvm/kvm_mips.c @@ -0,0 +1,958 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: MIPS specific KVM APIs + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/bootmem.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/mmu_context.h> + +#include <linux/kvm_host.h> + +#include "kvm_mips_int.h" +#include "kvm_mips_comm.h" + +#define CREATE_TRACE_POINTS +#include "trace.h" + +#ifndef VECTORSPACING +#define VECTORSPACING 0x100 /* for EI/VI mode */ +#endif + +#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +struct kvm_stats_debugfs_item debugfs_entries[] = { + { "wait", VCPU_STAT(wait_exits) }, + { "cache", VCPU_STAT(cache_exits) }, + { "signal", VCPU_STAT(signal_exits) }, + { "interrupt", VCPU_STAT(int_exits) }, + { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, + { "tlbmod", VCPU_STAT(tlbmod_exits) }, + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, + { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, + { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, + { "syscall", VCPU_STAT(syscall_exits) }, + { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, + { "break_inst", VCPU_STAT(break_inst_exits) }, + { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, + { "halt_wakeup", VCPU_STAT(halt_wakeup) }, + {NULL} +}; + +static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) +{ + int i; + for_each_possible_cpu(i) { + vcpu->arch.guest_kernel_asid[i] = 0; + vcpu->arch.guest_user_asid[i] = 0; + } + return 0; +} + +gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) +{ + return gfn; +} + +/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we + * are "runnable" if interrupts are pending + */ +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return !!(vcpu->arch.pending_exceptions); +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return 1; +} + +int kvm_arch_hardware_enable(void *garbage) +{ + return 0; +} + +void kvm_arch_hardware_disable(void *garbage) +{ +} + +int kvm_arch_hardware_setup(void) +{ + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +void kvm_arch_check_processor_compat(void *rtn) +{ + int *r = (int *)rtn; + *r = 0; + return; +} + +static void kvm_mips_init_tlbs(struct kvm *kvm) +{ + unsigned long wired; + + /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ + wired = read_c0_wired(); + write_c0_wired(wired + 1); + mtc0_tlbw_hazard(); + kvm->arch.commpage_tlb = wired; + + kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), + kvm->arch.commpage_tlb); +} + +static void kvm_mips_init_vm_percpu(void *arg) +{ + struct kvm *kvm = (struct kvm *)arg; + + kvm_mips_init_tlbs(kvm); + kvm_mips_callbacks->vm_init(kvm); + +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + if (atomic_inc_return(&kvm_mips_instance) == 1) { + kvm_info("%s: 1st KVM instance, setup host TLB parameters\n", + __func__); + on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); + } + + + return 0; +} + +void kvm_mips_free_vcpus(struct kvm *kvm) +{ + unsigned int i; + struct kvm_vcpu *vcpu; + + /* Put the pages we reserved for the guest pmap */ + for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { + if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) + kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); + } + + if (kvm->arch.guest_pmap) + kfree(kvm->arch.guest_pmap); + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvm_arch_vcpu_free(vcpu); + } + + mutex_lock(&kvm->lock); + + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) + kvm->vcpus[i] = NULL; + + atomic_set(&kvm->online_vcpus, 0); + + mutex_unlock(&kvm->lock); +} + +void kvm_arch_sync_events(struct kvm *kvm) +{ +} + +static void kvm_mips_uninit_tlbs(void *arg) +{ + /* Restore wired count */ + write_c0_wired(0); + mtc0_tlbw_hazard(); + /* Clear out all the TLBs */ + kvm_local_flush_tlb_all(); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + kvm_mips_free_vcpus(kvm); + + /* If this is the last instance, restore wired count */ + if (atomic_dec_return(&kvm_mips_instance) == 0) { + kvm_info("%s: last KVM instance, restoring TLB parameters\n", + __func__); + on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); + } +} + +long +kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ + return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + enum kvm_mr_change change) +{ + unsigned long npages = 0; + int i, err = 0; + + kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", + __func__, kvm, mem->slot, mem->guest_phys_addr, + mem->memory_size, mem->userspace_addr); + + /* Setup Guest PMAP table */ + if (!kvm->arch.guest_pmap) { + if (mem->slot == 0) + npages = mem->memory_size >> PAGE_SHIFT; + + if (npages) { + kvm->arch.guest_pmap_npages = npages; + kvm->arch.guest_pmap = + kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); + + if (!kvm->arch.guest_pmap) { + kvm_err("Failed to allocate guest PMAP"); + err = -ENOMEM; + goto out; + } + + kvm_info + ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", + npages, kvm->arch.guest_pmap); + + /* Now setup the page table */ + for (i = 0; i < npages; i++) { + kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; + } + } + } +out: + return; +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_flush_shadow(struct kvm *kvm) +{ +} + +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +{ + extern char mips32_exception[], mips32_exceptionEnd[]; + extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; + int err, size, offset; + void *gebase; + int i; + + struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); + + if (!vcpu) { + err = -ENOMEM; + goto out; + } + + err = kvm_vcpu_init(vcpu, kvm, id); + + if (err) + goto out_free_cpu; + + kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); + + /* Allocate space for host mode exception handlers that handle + * guest mode exits + */ + if (cpu_has_veic || cpu_has_vint) { + size = 0x200 + VECTORSPACING * 64; + } else { + size = 0x200; + } + + /* Save Linux EBASE */ + vcpu->arch.host_ebase = (void *)read_c0_ebase(); + + gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); + + if (!gebase) { + err = -ENOMEM; + goto out_free_cpu; + } + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", + ALIGN(size, PAGE_SIZE), gebase); + + /* Save new ebase */ + vcpu->arch.guest_ebase = gebase; + + /* Copy L1 Guest Exception handler to correct offset */ + + /* TLB Refill, EXL = 0 */ + memcpy(gebase, mips32_exception, + mips32_exceptionEnd - mips32_exception); + + /* General Exception Entry point */ + memcpy(gebase + 0x180, mips32_exception, + mips32_exceptionEnd - mips32_exception); + + /* For vectored interrupts poke the exception code @ all offsets 0-7 */ + for (i = 0; i < 8; i++) { + kvm_debug("L1 Vectored handler @ %p\n", + gebase + 0x200 + (i * VECTORSPACING)); + memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, + mips32_exceptionEnd - mips32_exception); + } + + /* General handler, relocate to unmapped space for sanity's sake */ + offset = 0x2000; + kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n", + gebase + offset, + mips32_GuestExceptionEnd - mips32_GuestException); + + memcpy(gebase + offset, mips32_GuestException, + mips32_GuestExceptionEnd - mips32_GuestException); + + /* Invalidate the icache for these ranges */ + mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE)); + + /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ + vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); + + if (!vcpu->arch.kseg0_commpage) { + err = -ENOMEM; + goto out_free_gebase; + } + + kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); + kvm_mips_commpage_init(vcpu); + + /* Init */ + vcpu->arch.last_sched_cpu = -1; + + /* Start off the timer */ + kvm_mips_emulate_count(vcpu); + + return vcpu; + +out_free_gebase: + kfree(gebase); + +out_free_cpu: + kfree(vcpu); + +out: + return ERR_PTR(err); +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + hrtimer_cancel(&vcpu->arch.comparecount_timer); + + kvm_vcpu_uninit(vcpu); + + kvm_mips_dump_stats(vcpu); + + if (vcpu->arch.guest_ebase) + kfree(vcpu->arch.guest_ebase); + + if (vcpu->arch.kseg0_commpage) + kfree(vcpu->arch.kseg0_commpage); + +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_free(vcpu); +} + +int +kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + int r = 0; + sigset_t sigsaved; + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + + if (vcpu->mmio_needed) { + if (!vcpu->mmio_is_write) + kvm_mips_complete_mmio_load(vcpu, run); + vcpu->mmio_needed = 0; + } + + /* Check if we have any exceptions/interrupts pending */ + kvm_mips_deliver_interrupts(vcpu, + kvm_read_c0_guest_cause(vcpu->arch.cop0)); + + local_irq_disable(); + kvm_guest_enter(); + + r = __kvm_mips_vcpu_run(run, vcpu); + + kvm_guest_exit(); + local_irq_enable(); + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + + return r; +} + +int +kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +{ + int intr = (int)irq->irq; + struct kvm_vcpu *dvcpu = NULL; + + if (intr == 3 || intr == -3 || intr == 4 || intr == -4) + kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, + (int)intr); + + if (irq->cpu == -1) + dvcpu = vcpu; + else + dvcpu = vcpu->kvm->vcpus[irq->cpu]; + + if (intr == 2 || intr == 3 || intr == 4) { + kvm_mips_callbacks->queue_io_int(dvcpu, irq); + + } else if (intr == -2 || intr == -3 || intr == -4) { + kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); + } else { + kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, + irq->cpu, irq->irq); + return -EINVAL; + } + + dvcpu->arch.wait = 0; + + if (waitqueue_active(&dvcpu->wq)) { + wake_up_interruptible(&dvcpu->wq); + } + + return 0; +} + +int +kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -EINVAL; +} + +int +kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -EINVAL; +} + +long +kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + long r; + int intr; + + switch (ioctl) { + case KVM_NMI: + /* Treat the NMI as a CPU reset */ + r = kvm_mips_reset_vcpu(vcpu); + break; + case KVM_INTERRUPT: + { + struct kvm_mips_interrupt irq; + r = -EFAULT; + if (copy_from_user(&irq, argp, sizeof(irq))) + goto out; + + intr = (int)irq.irq; + + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, + irq.irq); + + r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); + break; + } + default: + r = -EINVAL; + } + +out: + return r; +} + +/* + * Get (and clear) the dirty memory log for a memory slot. + */ +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ + struct kvm_memory_slot *memslot; + unsigned long ga, ga_end; + int is_dirty = 0; + int r; + unsigned long n; + + mutex_lock(&kvm->slots_lock); + + r = kvm_get_dirty_log(kvm, log, &is_dirty); + if (r) + goto out; + + /* If nothing is dirty, don't bother messing with page tables. */ + if (is_dirty) { + memslot = &kvm->memslots->memslots[log->slot]; + + ga = memslot->base_gfn << PAGE_SHIFT; + ga_end = ga + (memslot->npages << PAGE_SHIFT); + + printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, + ga_end); + + n = kvm_dirty_bitmap_bytes(memslot); + memset(memslot->dirty_bitmap, 0, n); + } + + r = 0; +out: + mutex_unlock(&kvm->slots_lock); + return r; + +} + +long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + long r; + + switch (ioctl) { + default: + r = -EINVAL; + } + + return r; +} + +int kvm_arch_init(void *opaque) +{ + int ret; + + if (kvm_mips_callbacks) { + kvm_err("kvm: module already exists\n"); + return -EEXIST; + } + + ret = kvm_mips_emulation_init(&kvm_mips_callbacks); + + return ret; +} + +void kvm_arch_exit(void) +{ + kvm_mips_callbacks = NULL; +} + +int +kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOTSUPP; +} + +int +kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOTSUPP; +} + +int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOTSUPP; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOTSUPP; +} + +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +int kvm_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_COALESCED_MMIO: + r = KVM_COALESCED_MMIO_PAGE_OFFSET; + break; + default: + r = 0; + break; + } + return r; + +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return kvm_mips_pending_timer(vcpu); +} + +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) +{ + int i; + struct mips_coproc *cop0; + + if (!vcpu) + return -1; + + printk("VCPU Register Dump:\n"); + printk("\tpc = 0x%08lx\n", vcpu->arch.pc);; + printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); + + for (i = 0; i < 32; i += 4) { + printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, + vcpu->arch.gprs[i], + vcpu->arch.gprs[i + 1], + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); + } + printk("\thi: 0x%08lx\n", vcpu->arch.hi); + printk("\tlo: 0x%08lx\n", vcpu->arch.lo); + + cop0 = vcpu->arch.cop0; + printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", + kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); + + printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < 32; i++) + vcpu->arch.gprs[i] = regs->gprs[i]; + + vcpu->arch.hi = regs->hi; + vcpu->arch.lo = regs->lo; + vcpu->arch.pc = regs->pc; + + return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < 32; i++) + regs->gprs[i] = vcpu->arch.gprs[i]; + + regs->hi = vcpu->arch.hi; + regs->lo = vcpu->arch.lo; + regs->pc = vcpu->arch.pc; + + return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); +} + +void kvm_mips_comparecount_func(unsigned long data) +{ + struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + + kvm_mips_callbacks->queue_timer_int(vcpu); + + vcpu->arch.wait = 0; + if (waitqueue_active(&vcpu->wq)) { + wake_up_interruptible(&vcpu->wq); + } +} + +/* + * low level hrtimer wake routine. + */ +enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); + kvm_mips_comparecount_func((unsigned long) vcpu); + hrtimer_forward_now(&vcpu->arch.comparecount_timer, + ktime_set(0, MS_TO_NS(10))); + return HRTIMER_RESTART; +} + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) +{ + kvm_mips_callbacks->vcpu_init(vcpu); + hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; + kvm_mips_init_shadow_tlb(vcpu); + return 0; +} + +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + return; +} + +int +kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) +{ + return 0; +} + +/* Initial guest state */ +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return kvm_mips_callbacks->vcpu_setup(vcpu); +} + +static +void kvm_mips_set_c0_status(void) +{ + uint32_t status = read_c0_status(); + + if (cpu_has_fpu) + status |= (ST0_CU1); + + if (cpu_has_dsp) + status |= (ST0_MX); + + write_c0_status(status); + ehb(); +} + +/* + * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) + */ +int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + uint32_t cause = vcpu->arch.host_cp0_cause; + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + run->ready_for_interrupt_injection = 1; + + /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ + kvm_mips_set_c0_status(); + + local_irq_enable(); + + kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", + cause, opc, run, vcpu); + + /* Do a privilege check, if in UM most of these exit conditions end up + * causing an exception to be delivered to the Guest Kernel + */ + er = kvm_mips_check_privilege(cause, opc, run, vcpu); + if (er == EMULATE_PRIV_FAIL) { + goto skip_emul; + } else if (er == EMULATE_FAIL) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + goto skip_emul; + } + + switch (exccode) { + case T_INT: + kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc); + + ++vcpu->stat.int_exits; + trace_kvm_exit(vcpu, INT_EXITS); + + if (need_resched()) { + cond_resched(); + } + + ret = RESUME_GUEST; + break; + + case T_COP_UNUSABLE: + kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc); + + ++vcpu->stat.cop_unusable_exits; + trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); + ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); + /* XXXKYMA: Might need to return to user space */ + if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { + ret = RESUME_HOST; + } + break; + + case T_TLB_MOD: + ++vcpu->stat.tlbmod_exits; + trace_kvm_exit(vcpu, TLBMOD_EXITS); + ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); + break; + + case T_TLB_ST_MISS: + kvm_debug + ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", + cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, + badvaddr); + + ++vcpu->stat.tlbmiss_st_exits; + trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); + ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); + break; + + case T_TLB_LD_MISS: + kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + + ++vcpu->stat.tlbmiss_ld_exits; + trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); + ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); + break; + + case T_ADDR_ERR_ST: + ++vcpu->stat.addrerr_st_exits; + trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); + ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); + break; + + case T_ADDR_ERR_LD: + ++vcpu->stat.addrerr_ld_exits; + trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); + ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); + break; + + case T_SYSCALL: + ++vcpu->stat.syscall_exits; + trace_kvm_exit(vcpu, SYSCALL_EXITS); + ret = kvm_mips_callbacks->handle_syscall(vcpu); + break; + + case T_RES_INST: + ++vcpu->stat.resvd_inst_exits; + trace_kvm_exit(vcpu, RESVD_INST_EXITS); + ret = kvm_mips_callbacks->handle_res_inst(vcpu); + break; + + case T_BREAK: + ++vcpu->stat.break_inst_exits; + trace_kvm_exit(vcpu, BREAK_INST_EXITS); + ret = kvm_mips_callbacks->handle_break(vcpu); + break; + + default: + kvm_err + ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", + exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, + kvm_read_c0_guest_status(vcpu->arch.cop0)); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + } + +skip_emul: + local_irq_disable(); + + if (er == EMULATE_DONE && !(ret & RESUME_HOST)) + kvm_mips_deliver_interrupts(vcpu, cause); + + if (!(ret & RESUME_HOST)) { + /* Only check for signals if not already exiting to userspace */ + if (signal_pending(current)) { + run->exit_reason = KVM_EXIT_INTR; + ret = (-EINTR << 2) | RESUME_HOST; + ++vcpu->stat.signal_exits; + trace_kvm_exit(vcpu, SIGNAL_EXITS); + } + } + + return ret; +} + +int __init kvm_mips_init(void) +{ + int ret; + + ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (ret) + return ret; + + /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. + * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) + * to avoid the possibility of double faulting. The issue is that the TLB code + * references routines that are part of the the KVM module, + * which are only available once the module is loaded. + */ + kvm_mips_gfn_to_pfn = gfn_to_pfn; + kvm_mips_release_pfn_clean = kvm_release_pfn_clean; + kvm_mips_is_error_pfn = is_error_pfn; + + pr_info("KVM/MIPS Initialized\n"); + return 0; +} + +void __exit kvm_mips_exit(void) +{ + kvm_exit(); + + kvm_mips_gfn_to_pfn = NULL; + kvm_mips_release_pfn_clean = NULL; + kvm_mips_is_error_pfn = NULL; + + pr_info("KVM/MIPS unloaded\n"); +} + +module_init(kvm_mips_init); +module_exit(kvm_mips_exit); + +EXPORT_TRACEPOINT_SYMBOL(kvm_exit); diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h new file mode 100644 index 000000000000..a4a8c85cc8f7 --- /dev/null +++ b/arch/mips/kvm/kvm_mips_comm.h @@ -0,0 +1,23 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: commpage: mapped into get kernel space +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#ifndef __KVM_MIPS_COMMPAGE_H__ +#define __KVM_MIPS_COMMPAGE_H__ + +struct kvm_mips_commpage { + struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ +}; + +#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 + +extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); + +#endif /* __KVM_MIPS_COMMPAGE_H__ */ diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c new file mode 100644 index 000000000000..3873b1ecc40f --- /dev/null +++ b/arch/mips/kvm/kvm_mips_commpage.c @@ -0,0 +1,37 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* commpage, currently used for Virtual COP0 registers. +* Mapped into the guest kernel @ 0x0. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/bootmem.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/mmu_context.h> + +#include <linux/kvm_host.h> + +#include "kvm_mips_comm.h" + +void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) +{ + struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; + memset(page, 0, sizeof(struct kvm_mips_commpage)); + + /* Specific init values for fields */ + vcpu->arch.cop0 = &page->cop0; + memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc)); + + return; +} diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c new file mode 100644 index 000000000000..96528e2d1ea6 --- /dev/null +++ b/arch/mips/kvm/kvm_mips_dyntrans.c @@ -0,0 +1,149 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/bootmem.h> + +#include "kvm_mips_comm.h" + +#define SYNCI_TEMPLATE 0x041f0000 +#define SYNCI_BASE(x) (((x) >> 21) & 0x1f) +#define SYNCI_OFFSET ((x) & 0xffff) + +#define LW_TEMPLATE 0x8c000000 +#define CLEAR_TEMPLATE 0x00000020 +#define SW_TEMPLATE 0xac000000 + +int +kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu) +{ + int result = 0; + unsigned long kseg0_opc; + uint32_t synci_inst = 0x0; + + /* Replace the CACHE instruction, with a NOP */ + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + + return result; +} + +/* + * Address based CACHE instructions are transformed into synci(s). A little heavy + * for just D-cache invalidates, but avoids an expensive trap + */ +int +kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu) +{ + int result = 0; + unsigned long kseg0_opc; + uint32_t synci_inst = SYNCI_TEMPLATE, base, offset; + + base = (inst >> 21) & 0x1f; + offset = inst & 0xffff; + synci_inst |= (base << 21); + synci_inst |= offset; + + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + + return result; +} + +int +kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) +{ + int32_t rt, rd, sel; + uint32_t mfc0_inst; + unsigned long kseg0_opc, flags; + + rt = (inst >> 16) & 0x1f; + rd = (inst >> 11) & 0x1f; + sel = inst & 0x7; + + if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { + mfc0_inst = CLEAR_TEMPLATE; + mfc0_inst |= ((rt & 0x1f) << 16); + } else { + mfc0_inst = LW_TEMPLATE; + mfc0_inst |= ((rt & 0x1f) << 16); + mfc0_inst |= + offsetof(struct mips_coproc, + reg[rd][sel]) + offsetof(struct kvm_mips_commpage, + cop0); + } + + if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { + local_irq_save(flags); + memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t)); + mips32_SyncICache((unsigned long) opc, 32); + local_irq_restore(flags); + } else { + kvm_err("%s: Invalid address: %p\n", __func__, opc); + return -EFAULT; + } + + return 0; +} + +int +kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) +{ + int32_t rt, rd, sel; + uint32_t mtc0_inst = SW_TEMPLATE; + unsigned long kseg0_opc, flags; + + rt = (inst >> 16) & 0x1f; + rd = (inst >> 11) & 0x1f; + sel = inst & 0x7; + + mtc0_inst |= ((rt & 0x1f) << 16); + mtc0_inst |= + offsetof(struct mips_coproc, + reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0); + + if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { + local_irq_save(flags); + memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t)); + mips32_SyncICache((unsigned long) opc, 32); + local_irq_restore(flags); + } else { + kvm_err("%s: Invalid address: %p\n", __func__, opc); + return -EFAULT; + } + + return 0; +} diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c new file mode 100644 index 000000000000..2b2bac9a40aa --- /dev/null +++ b/arch/mips/kvm/kvm_mips_emul.c @@ -0,0 +1,1826 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Instruction/Exception emulation +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/bootmem.h> +#include <linux/random.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/cpu-info.h> +#include <asm/mmu_context.h> +#include <asm/tlbflush.h> +#include <asm/inst.h> + +#undef CONFIG_MIPS_MT +#include <asm/r4kcache.h> +#define CONFIG_MIPS_MT + +#include "kvm_mips_opcode.h" +#include "kvm_mips_int.h" +#include "kvm_mips_comm.h" + +#include "trace.h" + +/* + * Compute the return address and do emulate branch simulation, if required. + * This function should be called only in branch delay slot active. + */ +unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, + unsigned long instpc) +{ + unsigned int dspcontrol; + union mips_instruction insn; + struct kvm_vcpu_arch *arch = &vcpu->arch; + long epc = instpc; + long nextpc = KVM_INVALID_INST; + + if (epc & 3) + goto unaligned; + + /* + * Read the instruction + */ + insn.word = kvm_get_inst((uint32_t *) epc, vcpu); + + if (insn.word == KVM_INVALID_INST) + return KVM_INVALID_INST; + + switch (insn.i_format.opcode) { + /* + * jr and jalr are in r_format format. + */ + case spec_op: + switch (insn.r_format.func) { + case jalr_op: + arch->gprs[insn.r_format.rd] = epc + 8; + /* Fall through */ + case jr_op: + nextpc = arch->gprs[insn.r_format.rs]; + break; + } + break; + + /* + * This group contains: + * bltz_op, bgez_op, bltzl_op, bgezl_op, + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. + */ + case bcond_op: + switch (insn.i_format.rt) { + case bltz_op: + case bltzl_op: + if ((long)arch->gprs[insn.i_format.rs] < 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bgez_op: + case bgezl_op: + if ((long)arch->gprs[insn.i_format.rs] >= 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bltzal_op: + case bltzall_op: + arch->gprs[31] = epc + 8; + if ((long)arch->gprs[insn.i_format.rs] < 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bgezal_op: + case bgezall_op: + arch->gprs[31] = epc + 8; + if ((long)arch->gprs[insn.i_format.rs] >= 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + case bposge32_op: + if (!cpu_has_dsp) + goto sigill; + + dspcontrol = rddsp(0x01); + + if (dspcontrol >= 32) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + } else + epc += 8; + nextpc = epc; + break; + } + break; + + /* + * These are unconditional and in j_format. + */ + case jal_op: + arch->gprs[31] = instpc + 8; + case j_op: + epc += 4; + epc >>= 28; + epc <<= 28; + epc |= (insn.j_format.target << 2); + nextpc = epc; + break; + + /* + * These are conditional and in i_format. + */ + case beq_op: + case beql_op: + if (arch->gprs[insn.i_format.rs] == + arch->gprs[insn.i_format.rt]) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bne_op: + case bnel_op: + if (arch->gprs[insn.i_format.rs] != + arch->gprs[insn.i_format.rt]) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case blez_op: /* not really i_format */ + case blezl_op: + /* rt field assumed to be zero */ + if ((long)arch->gprs[insn.i_format.rs] <= 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bgtz_op: + case bgtzl_op: + /* rt field assumed to be zero */ + if ((long)arch->gprs[insn.i_format.rs] > 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + /* + * And now the FPA/cp1 branch instructions. + */ + case cop1_op: + printk("%s: unsupported cop1_op\n", __func__); + break; + } + + return nextpc; + +unaligned: + printk("%s: unaligned epc\n", __func__); + return nextpc; + +sigill: + printk("%s: DSP branch but not DSP ASE\n", __func__); + return nextpc; +} + +enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) +{ + unsigned long branch_pc; + enum emulation_result er = EMULATE_DONE; + + if (cause & CAUSEF_BD) { + branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); + if (branch_pc == KVM_INVALID_INST) { + er = EMULATE_FAIL; + } else { + vcpu->arch.pc = branch_pc; + kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); + } + } else + vcpu->arch.pc += 4; + + kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); + + return er; +} + +/* Everytime the compare register is written to, we need to decide when to fire + * the timer that represents timer ticks to the GUEST. + * + */ +enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + + /* If COUNT is enabled */ + if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) { + hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); + hrtimer_start(&vcpu->arch.comparecount_timer, + ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL); + } else { + hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); + } + + return er; +} + +enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + + if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { + kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, + kvm_read_c0_guest_epc(cop0)); + kvm_clear_c0_guest_status(cop0, ST0_EXL); + vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); + + } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { + kvm_clear_c0_guest_status(cop0, ST0_ERL); + vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); + } else { + printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", + vcpu->arch.pc); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + + kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, + vcpu->arch.pending_exceptions); + + ++vcpu->stat.wait_exits; + trace_kvm_exit(vcpu, WAIT_EXITS); + if (!vcpu->arch.pending_exceptions) { + vcpu->arch.wait = 1; + kvm_vcpu_block(vcpu); + + /* We we are runnable, then definitely go off to user space to check if any + * I/O interrupts are pending. + */ + if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; + } + } + + return er; +} + +/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch + * this, if things ever change + */ +enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_FAIL; + uint32_t pc = vcpu->arch.pc; + + printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); + return er; +} + +/* Write Guest TLB Entry @ Index */ +enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int index = kvm_read_c0_guest_index(cop0); + enum emulation_result er = EMULATE_DONE; + struct kvm_mips_tlb *tlb = NULL; + uint32_t pc = vcpu->arch.pc; + + if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { + printk("%s: illegal index: %d\n", __func__, index); + printk + ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0), + kvm_read_c0_guest_pagemask(cop0)); + index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; + } + + tlb = &vcpu->arch.guest_tlb[index]; +#if 1 + /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +#endif + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); + tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); + tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); + + kvm_debug + ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), + kvm_read_c0_guest_pagemask(cop0)); + + return er; +} + +/* Write Guest TLB Entry @ Random Index */ +enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + struct kvm_mips_tlb *tlb = NULL; + uint32_t pc = vcpu->arch.pc; + int index; + +#if 1 + get_random_bytes(&index, sizeof(index)); + index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); +#else + index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; +#endif + + if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { + printk("%s: illegal index: %d\n", __func__, index); + return EMULATE_FAIL; + } + + tlb = &vcpu->arch.guest_tlb[index]; + +#if 1 + /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +#endif + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); + tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); + tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); + + kvm_debug + ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0)); + + return er; +} + +enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + long entryhi = kvm_read_c0_guest_entryhi(cop0); + enum emulation_result er = EMULATE_DONE; + uint32_t pc = vcpu->arch.pc; + int index = -1; + + index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); + + kvm_write_c0_guest_index(cop0, index); + + kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, + index); + + return er; +} + +enum emulation_result +kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + int32_t rt, rd, copz, sel, co_bit, op; + uint32_t pc = vcpu->arch.pc; + unsigned long curr_pc; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) { + return er; + } + + copz = (inst >> 21) & 0x1f; + rt = (inst >> 16) & 0x1f; + rd = (inst >> 11) & 0x1f; + sel = inst & 0x7; + co_bit = (inst >> 25) & 1; + + /* Verify that the register is valid */ + if (rd > MIPS_CP0_DESAVE) { + printk("Invalid rd: %d\n", rd); + er = EMULATE_FAIL; + goto done; + } + + if (co_bit) { + op = (inst) & 0xff; + + switch (op) { + case tlbr_op: /* Read indexed TLB entry */ + er = kvm_mips_emul_tlbr(vcpu); + break; + case tlbwi_op: /* Write indexed */ + er = kvm_mips_emul_tlbwi(vcpu); + break; + case tlbwr_op: /* Write random */ + er = kvm_mips_emul_tlbwr(vcpu); + break; + case tlbp_op: /* TLB Probe */ + er = kvm_mips_emul_tlbp(vcpu); + break; + case rfe_op: + printk("!!!COP0_RFE!!!\n"); + break; + case eret_op: + er = kvm_mips_emul_eret(vcpu); + goto dont_update_pc; + break; + case wait_op: + er = kvm_mips_emul_wait(vcpu); + break; + } + } else { + switch (copz) { + case mfc_op: +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + cop0->stat[rd][sel]++; +#endif + /* Get reg */ + if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { + /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */ + vcpu->arch.gprs[rt] = (read_c0_count() >> 2); + } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { + vcpu->arch.gprs[rt] = 0x0; +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mfc0(inst, opc, vcpu); +#endif + } + else { + vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mfc0(inst, opc, vcpu); +#endif + } + + kvm_debug + ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", + pc, rd, sel, rt, vcpu->arch.gprs[rt]); + + break; + + case dmfc_op: + vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; + break; + + case mtc_op: +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + cop0->stat[rd][sel]++; +#endif + if ((rd == MIPS_CP0_TLB_INDEX) + && (vcpu->arch.gprs[rt] >= + KVM_MIPS_GUEST_TLB_SIZE)) { + printk("Invalid TLB Index: %ld", + vcpu->arch.gprs[rt]); + er = EMULATE_FAIL; + break; + } +#define C0_EBASE_CORE_MASK 0xff + if ((rd == MIPS_CP0_PRID) && (sel == 1)) { + /* Preserve CORE number */ + kvm_change_c0_guest_ebase(cop0, + ~(C0_EBASE_CORE_MASK), + vcpu->arch.gprs[rt]); + printk("MTCz, cop0->reg[EBASE]: %#lx\n", + kvm_read_c0_guest_ebase(cop0)); + } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { + uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); + if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) + && + (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) + != nasid)) { + + kvm_debug + ("MTCz, change ASID from %#lx to %#lx\n", + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), + ASID_MASK(vcpu->arch.gprs[rt])); + + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + } + kvm_write_c0_guest_entryhi(cop0, + vcpu->arch.gprs[rt]); + } + /* Are we writing to COUNT */ + else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { + /* Linux doesn't seem to write into COUNT, we throw an error + * if we notice a write to COUNT + */ + /*er = EMULATE_FAIL; */ + goto done; + } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { + kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", + pc, kvm_read_c0_guest_compare(cop0), + vcpu->arch.gprs[rt]); + + /* If we are writing to COMPARE */ + /* Clear pending timer interrupt, if any */ + kvm_mips_callbacks->dequeue_timer_int(vcpu); + kvm_write_c0_guest_compare(cop0, + vcpu->arch.gprs[rt]); + } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { + kvm_write_c0_guest_status(cop0, + vcpu->arch.gprs[rt]); + /* Make sure that CU1 and NMI bits are never set */ + kvm_clear_c0_guest_status(cop0, + (ST0_CU1 | ST0_NMI)); + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mtc0(inst, opc, vcpu); +#endif + } else { + cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mtc0(inst, opc, vcpu); +#endif + } + + kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, + rd, sel, cop0->reg[rd][sel]); + break; + + case dmtc_op: + printk + ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", + vcpu->arch.pc, rt, rd, sel); + er = EMULATE_FAIL; + break; + + case mfmcz_op: +#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS + cop0->stat[MIPS_CP0_STATUS][0]++; +#endif + if (rt != 0) { + vcpu->arch.gprs[rt] = + kvm_read_c0_guest_status(cop0); + } + /* EI */ + if (inst & 0x20) { + kvm_debug("[%#lx] mfmcz_op: EI\n", + vcpu->arch.pc); + kvm_set_c0_guest_status(cop0, ST0_IE); + } else { + kvm_debug("[%#lx] mfmcz_op: DI\n", + vcpu->arch.pc); + kvm_clear_c0_guest_status(cop0, ST0_IE); + } + + break; + + case wrpgpr_op: + { + uint32_t css = + cop0->reg[MIPS_CP0_STATUS][2] & 0xf; + uint32_t pss = + (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; + /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ + if (css || pss) { + er = EMULATE_FAIL; + break; + } + kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, + vcpu->arch.gprs[rt]); + vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; + } + break; + default: + printk + ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", + vcpu->arch.pc, copz); + er = EMULATE_FAIL; + break; + } + } + +done: + /* + * Rollback PC only if emulation was unsuccessful + */ + if (er == EMULATE_FAIL) { + vcpu->arch.pc = curr_pc; + } + +dont_update_pc: + /* + * This is for special instructions whose emulation + * updates the PC, so do not overwrite the PC under + * any circumstances + */ + + return er; +} + +enum emulation_result +kvm_mips_emulate_store(uint32_t inst, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DO_MMIO; + int32_t op, base, rt, offset; + uint32_t bytes; + void *data = run->mmio.data; + unsigned long curr_pc; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + rt = (inst >> 16) & 0x1f; + base = (inst >> 21) & 0x1f; + offset = inst & 0xffff; + op = (inst >> 26) & 0x3f; + + switch (op) { + case sb_op: + bytes = 1; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + run->mmio.len = bytes; + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + *(u8 *) data = vcpu->arch.gprs[rt]; + kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], + *(uint8_t *) data); + + break; + + case sw_op: + bytes = 4; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + *(uint32_t *) data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(uint32_t *) data); + break; + + case sh_op: + bytes = 2; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + *(uint16_t *) data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(uint32_t *) data); + break; + + default: + printk("Store not yet supported"); + er = EMULATE_FAIL; + break; + } + + /* + * Rollback PC if emulation was unsuccessful + */ + if (er == EMULATE_FAIL) { + vcpu->arch.pc = curr_pc; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DO_MMIO; + int32_t op, base, rt, offset; + uint32_t bytes; + + rt = (inst >> 16) & 0x1f; + base = (inst >> 21) & 0x1f; + offset = inst & 0xffff; + op = (inst >> 26) & 0x3f; + + vcpu->arch.pending_load_cause = cause; + vcpu->arch.io_gpr = rt; + + switch (op) { + case lw_op: + bytes = 4; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + er = EMULATE_FAIL; + break; + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 0; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 0; + break; + + case lh_op: + case lhu_op: + bytes = 2; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + er = EMULATE_FAIL; + break; + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 0; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 0; + + if (op == lh_op) + vcpu->mmio_needed = 2; + else + vcpu->mmio_needed = 1; + + break; + + case lbu_op: + case lb_op: + bytes = 1; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + er = EMULATE_FAIL; + break; + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 0; + vcpu->mmio_is_write = 0; + + if (op == lb_op) + vcpu->mmio_needed = 2; + else + vcpu->mmio_needed = 1; + + break; + + default: + printk("Load not yet supported"); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) +{ + unsigned long offset = (va & ~PAGE_MASK); + struct kvm *kvm = vcpu->kvm; + unsigned long pa; + gfn_t gfn; + pfn_t pfn; + + gfn = va >> PAGE_SHIFT; + + if (gfn >= kvm->arch.guest_pmap_npages) { + printk("%s: Invalid gfn: %#llx\n", __func__, gfn); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + return -1; + } + pfn = kvm->arch.guest_pmap[gfn]; + pa = (pfn << PAGE_SHIFT) | offset; + + printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); + + mips32_SyncICache(CKSEG0ADDR(pa), 32); + return 0; +} + +#define MIPS_CACHE_OP_INDEX_INV 0x0 +#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 +#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 +#define MIPS_CACHE_OP_IMP 0x3 +#define MIPS_CACHE_OP_HIT_INV 0x4 +#define MIPS_CACHE_OP_FILL_WB_INV 0x5 +#define MIPS_CACHE_OP_HIT_HB 0x6 +#define MIPS_CACHE_OP_FETCH_LOCK 0x7 + +#define MIPS_CACHE_ICACHE 0x0 +#define MIPS_CACHE_DCACHE 0x1 +#define MIPS_CACHE_SEC 0x3 + +enum emulation_result +kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + extern void (*r4k_blast_dcache) (void); + extern void (*r4k_blast_icache) (void); + enum emulation_result er = EMULATE_DONE; + int32_t offset, cache, op_inst, op, base; + struct kvm_vcpu_arch *arch = &vcpu->arch; + unsigned long va; + unsigned long curr_pc; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + base = (inst >> 21) & 0x1f; + op_inst = (inst >> 16) & 0x1f; + offset = inst & 0xffff; + cache = (inst >> 16) & 0x3; + op = (inst >> 18) & 0x7; + + va = arch->gprs[base] + offset; + + kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); + + /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate + * the caches entirely by stepping through all the ways/indexes + */ + if (op == MIPS_CACHE_OP_INDEX_INV) { + kvm_debug + ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, + arch->gprs[base], offset); + + if (cache == MIPS_CACHE_DCACHE) + r4k_blast_dcache(); + else if (cache == MIPS_CACHE_ICACHE) + r4k_blast_icache(); + else { + printk("%s: unsupported CACHE INDEX operation\n", + __func__); + return EMULATE_FAIL; + } + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_cache_index(inst, opc, vcpu); +#endif + goto done; + } + + preempt_disable(); + if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { + + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { + kvm_mips_handle_kseg0_tlb_fault(va, vcpu); + } + } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || + KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { + int index; + + /* If an entry already exists then skip */ + if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { + goto skip_fault; + } + + /* If address not in the guest TLB, then give the guest a fault, the + * resulting handler will do the right thing + */ + index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + + if (index < 0) { + vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); + vcpu->arch.host_cp0_badvaddr = va; + er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, + vcpu); + preempt_enable(); + goto dont_update_pc; + } else { + struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; + /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ + if (!TLB_IS_VALID(*tlb, va)) { + er = kvm_mips_emulate_tlbinv_ld(cause, NULL, + run, vcpu); + preempt_enable(); + goto dont_update_pc; + } else { + /* We fault an entry from the guest tlb to the shadow host TLB */ + kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, + NULL, + NULL); + } + } + } else { + printk + ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); + er = EMULATE_FAIL; + preempt_enable(); + goto dont_update_pc; + + } + +skip_fault: + /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ + if (cache == MIPS_CACHE_DCACHE + && (op == MIPS_CACHE_OP_FILL_WB_INV + || op == MIPS_CACHE_OP_HIT_INV)) { + flush_dcache_line(va); + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ + kvm_mips_trans_cache_va(inst, opc, vcpu); +#endif + } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { + flush_dcache_line(va); + flush_icache_line(va); + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + /* Replace the CACHE instruction, with a SYNCI */ + kvm_mips_trans_cache_va(inst, opc, vcpu); +#endif + } else { + printk + ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); + er = EMULATE_FAIL; + preempt_enable(); + goto dont_update_pc; + } + + preempt_enable(); + + dont_update_pc: + /* + * Rollback PC + */ + vcpu->arch.pc = curr_pc; + done: + return er; +} + +enum emulation_result +kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + uint32_t inst; + + /* + * Fetch the instruction. + */ + if (cause & CAUSEF_BD) { + opc += 1; + } + + inst = kvm_get_inst(opc, vcpu); + + switch (((union mips_instruction)inst).r_format.opcode) { + case cop0_op: + er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); + break; + case sb_op: + case sh_op: + case sw_op: + er = kvm_mips_emulate_store(inst, cause, run, vcpu); + break; + case lb_op: + case lbu_op: + case lhu_op: + case lh_op: + case lw_op: + er = kvm_mips_emulate_load(inst, cause, run, vcpu); + break; + + case cache_op: + ++vcpu->stat.cache_exits; + trace_kvm_exit(vcpu, CACHE_EXITS); + er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); + break; + + default: + printk("Instruction emulation not supported (%p/%#x)\n", opc, + inst); + kvm_arch_vcpu_dump_regs(vcpu); + er = EMULATE_FAIL; + break; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_SYSCALL << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + printk("Trying to deliver SYSCALL when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", + arch->pc); + + /* set pc to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x0; + + } else { + kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", + arch->pc); + + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_LD_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = + (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", + arch->pc); + + /* set pc to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_LD_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x0; + } else { + kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_ST_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } else { + kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_ST_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +/* TLBMOD: store into address matching TLB with Dirty bit off */ +enum emulation_result +kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + +#ifdef DEBUG + /* + * If address not in the guest TLB, then we are in trouble + */ + index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); + if (index < 0) { + /* XXXKYMA Invalidate and retry */ + kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); + kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", + __func__, entryhi); + kvm_mips_dump_guest_tlbs(vcpu); + kvm_mips_dump_host_tlbs(); + return EMULATE_FAIL; + } +#endif + + er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", + arch->pc); + + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } else { + kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + } + + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_COP_UNUSABLE << CAUSEB_EXCCODE)); + kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); + + return er; +} + +enum emulation_result +kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_RES_INST << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver RI when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_BREAK << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + printk("Trying to deliver BP when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +/* + * ll/sc, rdhwr, sync emulation + */ + +#define OPCODE 0xfc000000 +#define BASE 0x03e00000 +#define RT 0x001f0000 +#define OFFSET 0x0000ffff +#define LL 0xc0000000 +#define SC 0xe0000000 +#define SPEC0 0x00000000 +#define SPEC3 0x7c000000 +#define RD 0x0000f800 +#define FUNC 0x0000003f +#define SYNC 0x0000000f +#define RDHWR 0x0000003b + +enum emulation_result +kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long curr_pc; + uint32_t inst; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + /* + * Fetch the instruction. + */ + if (cause & CAUSEF_BD) + opc += 1; + + inst = kvm_get_inst(opc, vcpu); + + if (inst == KVM_INVALID_INST) { + printk("%s: Cannot get inst @ %p\n", __func__, opc); + return EMULATE_FAIL; + } + + if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { + int rd = (inst & RD) >> 11; + int rt = (inst & RT) >> 16; + switch (rd) { + case 0: /* CPU number */ + arch->gprs[rt] = 0; + break; + case 1: /* SYNCI length */ + arch->gprs[rt] = min(current_cpu_data.dcache.linesz, + current_cpu_data.icache.linesz); + break; + case 2: /* Read count register */ + printk("RDHWR: Cont register\n"); + arch->gprs[rt] = kvm_read_c0_guest_count(cop0); + break; + case 3: /* Count register resolution */ + switch (current_cpu_data.cputype) { + case CPU_20KC: + case CPU_25KF: + arch->gprs[rt] = 1; + break; + default: + arch->gprs[rt] = 2; + } + break; + case 29: +#if 1 + arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); +#else + /* UserLocal not implemented */ + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); +#endif + break; + + default: + printk("RDHWR not supported\n"); + er = EMULATE_FAIL; + break; + } + } else { + printk("Emulate RI not supported @ %p: %#x\n", opc, inst); + er = EMULATE_FAIL; + } + + /* + * Rollback PC only if emulation was unsuccessful + */ + if (er == EMULATE_FAIL) { + vcpu->arch.pc = curr_pc; + } + return er; +} + +enum emulation_result +kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + enum emulation_result er = EMULATE_DONE; + unsigned long curr_pc; + + if (run->mmio.len > sizeof(*gpr)) { + printk("Bad MMIO length: %d", run->mmio.len); + er = EMULATE_FAIL; + goto done; + } + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, vcpu->arch.pending_load_cause); + if (er == EMULATE_FAIL) + return er; + + switch (run->mmio.len) { + case 4: + *gpr = *(int32_t *) run->mmio.data; + break; + + case 2: + if (vcpu->mmio_needed == 2) + *gpr = *(int16_t *) run->mmio.data; + else + *gpr = *(int16_t *) run->mmio.data; + + break; + case 1: + if (vcpu->mmio_needed == 2) + *gpr = *(int8_t *) run->mmio.data; + else + *gpr = *(u8 *) run->mmio.data; + break; + } + + if (vcpu->arch.pending_load_cause & CAUSEF_BD) + kvm_debug + ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", + vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, + vcpu->mmio_needed); + +done: + return er; +} + +static enum emulation_result +kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_change_c0_guest_cause(cop0, (0xff), + (exccode << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + + kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", + exccode, kvm_read_c0_guest_epc(cop0), + kvm_read_c0_guest_badvaddr(cop0)); + } else { + printk("Trying to deliver EXC when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result +kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + + int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); + + if (usermode) { + switch (exccode) { + case T_INT: + case T_SYSCALL: + case T_BREAK: + case T_RES_INST: + break; + + case T_COP_UNUSABLE: + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) + er = EMULATE_PRIV_FAIL; + break; + + case T_TLB_MOD: + break; + + case T_TLB_LD_MISS: + /* We we are accessing Guest kernel space, then send an address error exception to the guest */ + if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { + printk("%s: LD MISS @ %#lx\n", __func__, + badvaddr); + cause &= ~0xff; + cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); + er = EMULATE_PRIV_FAIL; + } + break; + + case T_TLB_ST_MISS: + /* We we are accessing Guest kernel space, then send an address error exception to the guest */ + if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { + printk("%s: ST MISS @ %#lx\n", __func__, + badvaddr); + cause &= ~0xff; + cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); + er = EMULATE_PRIV_FAIL; + } + break; + + case T_ADDR_ERR_ST: + printk("%s: address error ST @ %#lx\n", __func__, + badvaddr); + if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { + cause &= ~0xff; + cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); + } + er = EMULATE_PRIV_FAIL; + break; + case T_ADDR_ERR_LD: + printk("%s: address error LD @ %#lx\n", __func__, + badvaddr); + if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { + cause &= ~0xff; + cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); + } + er = EMULATE_PRIV_FAIL; + break; + default: + er = EMULATE_PRIV_FAIL; + break; + } + } + + if (er == EMULATE_PRIV_FAIL) { + kvm_mips_emulate_exc(cause, opc, run, vcpu); + } + return er; +} + +/* User Address (UA) fault, this could happen if + * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this + * case we pass on the fault to the guest kernel and let it handle it. + * (2) TLB entry is present in the Guest TLB but not in the shadow, in this + * case we inject the TLB from the Guest TLB into the shadow host TLB + */ +enum emulation_result +kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + unsigned long va = vcpu->arch.host_cp0_badvaddr; + int index; + + kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", + vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); + + /* KVM would not have got the exception if this entry was valid in the shadow host TLB + * Check the Guest TLB, if the entry is not there then send the guest an + * exception. The guest exc handler should then inject an entry into the + * guest TLB + */ + index = kvm_mips_guest_tlb_lookup(vcpu, + (va & VPN2_MASK) | + ASID_MASK(kvm_read_c0_guest_entryhi + (vcpu->arch.cop0))); + if (index < 0) { + if (exccode == T_TLB_LD_MISS) { + er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); + } else if (exccode == T_TLB_ST_MISS) { + er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); + } else { + printk("%s: invalid exc code: %d\n", __func__, exccode); + er = EMULATE_FAIL; + } + } else { + struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; + + /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ + if (!TLB_IS_VALID(*tlb, va)) { + if (exccode == T_TLB_LD_MISS) { + er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, + vcpu); + } else if (exccode == T_TLB_ST_MISS) { + er = kvm_mips_emulate_tlbinv_st(cause, opc, run, + vcpu); + } else { + printk("%s: invalid exc code: %d\n", __func__, + exccode); + er = EMULATE_FAIL; + } + } else { +#ifdef DEBUG + kvm_debug + ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", + tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); +#endif + /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ + kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, + NULL); + } + } + + return er; +} diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c new file mode 100644 index 000000000000..1e5de16afe29 --- /dev/null +++ b/arch/mips/kvm/kvm_mips_int.c @@ -0,0 +1,243 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Interrupt delivery +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/bootmem.h> +#include <asm/page.h> +#include <asm/cacheflush.h> + +#include <linux/kvm_host.h> + +#include "kvm_mips_int.h" + +void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) +{ + set_bit(priority, &vcpu->arch.pending_exceptions); +} + +void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) +{ + clear_bit(priority, &vcpu->arch.pending_exceptions); +} + +void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) +{ + /* Cause bits to reflect the pending timer interrupt, + * the EXC code will be set when we are actually + * delivering the interrupt: + */ + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); + + /* Queue up an INT exception for the core */ + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); + +} + +void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) +{ + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); +} + +void +kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +{ + int intr = (int)irq->irq; + + /* Cause bits to reflect the pending IO interrupt, + * the EXC code will be set when we are actually + * delivering the interrupt: + */ + switch (intr) { + case 2: + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); + /* Queue up an INT exception for the core */ + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO); + break; + + case 3: + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); + break; + + case 4: + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); + break; + + default: + break; + } + +} + +void +kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq) +{ + int intr = (int)irq->irq; + switch (intr) { + case -2: + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO); + break; + + case -3: + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); + break; + + case -4: + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); + break; + + default: + break; + } + +} + +/* Deliver the interrupt of the corresponding priority, if possible. */ +int +kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause) +{ + int allowed = 0; + uint32_t exccode; + + struct kvm_vcpu_arch *arch = &vcpu->arch; + struct mips_coproc *cop0 = vcpu->arch.cop0; + + switch (priority) { + case MIPS_EXC_INT_TIMER: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) { + allowed = 1; + exccode = T_INT; + } + break; + + case MIPS_EXC_INT_IO: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) { + allowed = 1; + exccode = T_INT; + } + break; + + case MIPS_EXC_INT_IPI_1: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) { + allowed = 1; + exccode = T_INT; + } + break; + + case MIPS_EXC_INT_IPI_2: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) { + allowed = 1; + exccode = T_INT; + } + break; + + default: + break; + } + + /* Are we allowed to deliver the interrupt ??? */ + if (allowed) { + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); + + } else + kvm_err("Trying to deliver interrupt when EXL is already set\n"); + + kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE, + (exccode << CAUSEB_EXCCODE)); + + /* XXXSL Set PC to the interrupt exception entry point */ + if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) + arch->pc = KVM_GUEST_KSEG0 + 0x200; + else + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + clear_bit(priority, &vcpu->arch.pending_exceptions); + } + + return allowed; +} + +int +kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause) +{ + return 1; +} + +void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause) +{ + unsigned long *pending = &vcpu->arch.pending_exceptions; + unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr; + unsigned int priority; + + if (!(*pending) && !(*pending_clr)) + return; + + priority = __ffs(*pending_clr); + while (priority <= MIPS_EXC_MAX) { + if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { + if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE) + break; + } + + priority = find_next_bit(pending_clr, + BITS_PER_BYTE * sizeof(*pending_clr), + priority + 1); + } + + priority = __ffs(*pending); + while (priority <= MIPS_EXC_MAX) { + if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { + if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE) + break; + } + + priority = find_next_bit(pending, + BITS_PER_BYTE * sizeof(*pending), + priority + 1); + } + +} + +int kvm_mips_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions); +} diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h new file mode 100644 index 000000000000..20da7d29eede --- /dev/null +++ b/arch/mips/kvm/kvm_mips_int.h @@ -0,0 +1,49 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Interrupts +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +/* MIPS Exception Priorities, exceptions (including interrupts) are queued up + * for the guest in the order specified by their priorities + */ + +#define MIPS_EXC_RESET 0 +#define MIPS_EXC_SRESET 1 +#define MIPS_EXC_DEBUG_ST 2 +#define MIPS_EXC_DEBUG 3 +#define MIPS_EXC_DDB 4 +#define MIPS_EXC_NMI 5 +#define MIPS_EXC_MCHK 6 +#define MIPS_EXC_INT_TIMER 7 +#define MIPS_EXC_INT_IO 8 +#define MIPS_EXC_EXECUTE 9 +#define MIPS_EXC_INT_IPI_1 10 +#define MIPS_EXC_INT_IPI_2 11 +#define MIPS_EXC_MAX 12 +/* XXXSL More to follow */ + +#define C_TI (_ULCAST_(1) << 30) + +#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) +#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) + +void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority); +void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority); +int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); + +void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); +void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); +void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); +void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); +int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); +int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); +void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause); diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h new file mode 100644 index 000000000000..86d3b4cc348b --- /dev/null +++ b/arch/mips/kvm/kvm_mips_opcode.h @@ -0,0 +1,24 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +/* + * Define opcode values not defined in <asm/isnt.h> + */ + +#ifndef __KVM_MIPS_OPCODE_H__ +#define __KVM_MIPS_OPCODE_H__ + +/* COP0 Ops */ +#define mfmcz_op 0x0b /* 01011 */ +#define wrpgpr_op 0x0e /* 01110 */ + +/* COP0 opcodes (only if COP0 and CO=1): */ +#define wait_op 0x20 /* 100000 */ + +#endif /* __KVM_MIPS_OPCODE_H__ */ diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c new file mode 100644 index 000000000000..075904bcac1b --- /dev/null +++ b/arch/mips/kvm/kvm_mips_stats.c @@ -0,0 +1,82 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: COP0 access histogram +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/kvm_host.h> + +char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { + "WAIT", + "CACHE", + "Signal", + "Interrupt", + "COP0/1 Unusable", + "TLB Mod", + "TLB Miss (LD)", + "TLB Miss (ST)", + "Address Err (ST)", + "Address Error (LD)", + "System Call", + "Reserved Inst", + "Break Inst", + "D-Cache Flushes", +}; + +char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { + "Index", + "Random", + "EntryLo0", + "EntryLo1", + "Context", + "PG Mask", + "Wired", + "HWREna", + "BadVAddr", + "Count", + "EntryHI", + "Compare", + "Status", + "Cause", + "EXC PC", + "PRID", + "Config", + "LLAddr", + "Watch Lo", + "Watch Hi", + "X Context", + "Reserved", + "Impl Dep", + "Debug", + "DEPC", + "PerfCnt", + "ErrCtl", + "CacheErr", + "TagLo", + "TagHi", + "ErrorEPC", + "DESAVE" +}; + +int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + int i, j; + + printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); + for (i = 0; i < N_MIPS_COPROC_REGS; i++) { + for (j = 0; j < N_MIPS_COPROC_SEL; j++) { + if (vcpu->arch.cop0->stat[i][j]) + printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, + vcpu->arch.cop0->stat[i][j]); + } + } +#endif + + return 0; +} diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c new file mode 100644 index 000000000000..89511a9258d3 --- /dev/null +++ b/arch/mips/kvm/kvm_tlb.c @@ -0,0 +1,928 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that +* TLB handlers run from KSEG0 +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/mm.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/kvm_host.h> + +#include <asm/cpu.h> +#include <asm/bootinfo.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/cacheflush.h> + +#undef CONFIG_MIPS_MT +#include <asm/r4kcache.h> +#define CONFIG_MIPS_MT + +#define KVM_GUEST_PC_TLB 0 +#define KVM_GUEST_SP_TLB 1 + +#define PRIx64 "llx" + +/* Use VZ EntryHi.EHINV to invalidate TLB entries */ +#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) + +atomic_t kvm_mips_instance; +EXPORT_SYMBOL(kvm_mips_instance); + +/* These function pointers are initialized once the KVM module is loaded */ +pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); +EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); + +void (*kvm_mips_release_pfn_clean) (pfn_t pfn); +EXPORT_SYMBOL(kvm_mips_release_pfn_clean); + +bool(*kvm_mips_is_error_pfn) (pfn_t pfn); +EXPORT_SYMBOL(kvm_mips_is_error_pfn); + +uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) +{ + return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); +} + + +uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) +{ + return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); +} + +inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.commpage_tlb; +} + + +/* + * Structure defining an tlb entry data set. + */ + +void kvm_mips_dump_host_tlbs(void) +{ + unsigned long old_entryhi; + unsigned long old_pagemask; + struct kvm_mips_tlb tlb; + unsigned long flags; + int i; + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + + printk("HOST TLBs:\n"); + printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); + + for (i = 0; i < current_cpu_data.tlbsize; i++) { + write_c0_index(i); + mtc0_tlbw_hazard(); + + tlb_read(); + tlbw_use_hazard(); + + tlb.tlb_hi = read_c0_entryhi(); + tlb.tlb_lo0 = read_c0_entrylo0(); + tlb.tlb_lo1 = read_c0_entrylo1(); + tlb.tlb_mask = read_c0_pagemask(); + + printk("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + } + write_c0_entryhi(old_entryhi); + write_c0_pagemask(old_pagemask); + mtc0_tlbw_hazard(); + local_irq_restore(flags); +} + +void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_mips_tlb tlb; + int i; + + printk("Guest TLBs:\n"); + printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); + + for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { + tlb = vcpu->arch.guest_tlb[i]; + printk("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + } +} + +void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) +{ + int i; + volatile struct kvm_mips_tlb tlb; + + printk("Shadow TLBs:\n"); + for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { + tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i]; + printk("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + } +} + +static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) +{ + pfn_t pfn; + + if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) + return; + + pfn = kvm_mips_gfn_to_pfn(kvm, gfn); + + if (kvm_mips_is_error_pfn(pfn)) { + panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + } + + kvm->arch.guest_pmap[gfn] = pfn; + return; +} + +/* Translate guest KSEG0 addresses to Host PA */ +unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, + unsigned long gva) +{ + gfn_t gfn; + uint32_t offset = gva & ~PAGE_MASK; + struct kvm *kvm = vcpu->kvm; + + if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { + kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, + __builtin_return_address(0), gva); + return KVM_INVALID_PAGE; + } + + gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); + + if (gfn >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, + gva); + return KVM_INVALID_PAGE; + } + kvm_mips_map_page(vcpu->kvm, gfn); + return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; +} + +/* XXXKYMA: Must be called with interrupts disabled */ +/* set flush_dcache_mask == 0 if no dcache flush required */ +int +kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, + unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) +{ + unsigned long flags; + unsigned long old_entryhi; + volatile int idx; + + local_irq_save(flags); + + + old_entryhi = read_c0_entryhi(); + write_c0_entryhi(entryhi); + mtc0_tlbw_hazard(); + + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + + if (idx > current_cpu_data.tlbsize) { + kvm_err("%s: Invalid Index: %d\n", __func__, idx); + kvm_mips_dump_host_tlbs(); + return -1; + } + + if (idx < 0) { + idx = read_c0_random() % current_cpu_data.tlbsize; + write_c0_index(idx); + mtc0_tlbw_hazard(); + } + write_c0_entrylo0(entrylo0); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + tlbw_use_hazard(); + +#ifdef DEBUG + if (debug) { + kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] " + "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", + vcpu->arch.pc, idx, read_c0_entryhi(), + read_c0_entrylo0(), read_c0_entrylo1()); + } +#endif + + /* Flush D-cache */ + if (flush_dcache_mask) { + if (entrylo0 & MIPS3_PG_V) { + ++vcpu->stat.flush_dcache_exits; + flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); + } + if (entrylo1 & MIPS3_PG_V) { + ++vcpu->stat.flush_dcache_exits; + flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | + (0x1 << PAGE_SHIFT)); + } + } + + /* Restore old ASID */ + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + local_irq_restore(flags); + return 0; +} + + +/* XXXKYMA: Must be called with interrupts disabled */ +int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, + struct kvm_vcpu *vcpu) +{ + gfn_t gfn; + pfn_t pfn0, pfn1; + unsigned long vaddr = 0; + unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; + int even; + struct kvm *kvm = vcpu->kvm; + const int flush_dcache_mask = 0; + + + if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { + kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); + kvm_mips_dump_host_tlbs(); + return -1; + } + + gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); + if (gfn >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, + gfn, badvaddr); + kvm_mips_dump_host_tlbs(); + return -1; + } + even = !(gfn & 0x1); + vaddr = badvaddr & (PAGE_MASK << 1); + + kvm_mips_map_page(vcpu->kvm, gfn); + kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); + + if (even) { + pfn0 = kvm->arch.guest_pmap[gfn]; + pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1]; + } else { + pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1]; + pfn1 = kvm->arch.guest_pmap[gfn]; + } + + entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | + (0x1 << 1); + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | + (0x1 << 1); + + return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, + flush_dcache_mask); +} + +int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, + struct kvm_vcpu *vcpu) +{ + pfn_t pfn0, pfn1; + unsigned long flags, old_entryhi = 0, vaddr = 0; + unsigned long entrylo0 = 0, entrylo1 = 0; + + + pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; + pfn1 = 0; + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | + (0x1 << 1); + entrylo1 = 0; + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + vaddr = badvaddr & (PAGE_MASK << 1); + write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); + mtc0_tlbw_hazard(); + write_c0_entrylo0(entrylo0); + mtc0_tlbw_hazard(); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + write_c0_index(kvm_mips_get_commpage_asid(vcpu)); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + +#ifdef DEBUG + kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", + vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), + read_c0_entrylo0(), read_c0_entrylo1()); +#endif + + /* Restore old ASID */ + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + local_irq_restore(flags); + + return 0; +} + +int +kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) +{ + unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; + struct kvm *kvm = vcpu->kvm; + pfn_t pfn0, pfn1; + + + if ((tlb->tlb_hi & VPN2_MASK) == 0) { + pfn0 = 0; + pfn1 = 0; + } else { + kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); + kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); + + pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; + pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; + } + + if (hpa0) + *hpa0 = pfn0 << PAGE_SHIFT; + + if (hpa1) + *hpa1 = pfn1 << PAGE_SHIFT; + + /* Get attributes from the Guest TLB */ + entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? + kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | + (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | + (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); + +#ifdef DEBUG + kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, + tlb->tlb_lo0, tlb->tlb_lo1); +#endif + + return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, + tlb->tlb_mask); +} + +int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) +{ + int i; + int index = -1; + struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; + + + for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { + if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && + (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { + index = i; + break; + } + } + +#ifdef DEBUG + kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", + __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); +#endif + + return index; +} + +int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) +{ + unsigned long old_entryhi, flags; + volatile int idx; + + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + + if (KVM_GUEST_KERNEL_MODE(vcpu)) + write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); + else { + write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); + } + + mtc0_tlbw_hazard(); + + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + + /* Restore old ASID */ + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); + +#ifdef DEBUG + kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); +#endif + + return idx; +} + +int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) +{ + int idx; + unsigned long flags, old_entryhi; + + local_irq_save(flags); + + + old_entryhi = read_c0_entryhi(); + + write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); + mtc0_tlbw_hazard(); + + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + + if (idx >= current_cpu_data.tlbsize) + BUG(); + + if (idx > 0) { + write_c0_entryhi(UNIQUE_ENTRYHI(idx)); + mtc0_tlbw_hazard(); + + write_c0_entrylo0(0); + mtc0_tlbw_hazard(); + + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + mtc0_tlbw_hazard(); + } + + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); + +#ifdef DEBUG + if (idx > 0) { + kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, + (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx); + } +#endif + + return 0; +} + +/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ +int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) +{ + unsigned long flags, old_entryhi; + + if (index >= current_cpu_data.tlbsize) + BUG(); + + local_irq_save(flags); + + + old_entryhi = read_c0_entryhi(); + + write_c0_entryhi(UNIQUE_ENTRYHI(index)); + mtc0_tlbw_hazard(); + + write_c0_index(index); + mtc0_tlbw_hazard(); + + write_c0_entrylo0(0); + mtc0_tlbw_hazard(); + + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); + + return 0; +} + +void kvm_mips_flush_host_tlb(int skip_kseg0) +{ + unsigned long flags; + unsigned long old_entryhi, entryhi; + unsigned long old_pagemask; + int entry = 0; + int maxentry = current_cpu_data.tlbsize; + + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + + /* Blast 'em all away. */ + for (entry = 0; entry < maxentry; entry++) { + + write_c0_index(entry); + mtc0_tlbw_hazard(); + + if (skip_kseg0) { + tlb_read(); + tlbw_use_hazard(); + + entryhi = read_c0_entryhi(); + + /* Don't blow away guest kernel entries */ + if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { + continue; + } + } + + /* Make sure all entries differ. */ + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + mtc0_tlbw_hazard(); + write_c0_entrylo0(0); + mtc0_tlbw_hazard(); + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + mtc0_tlbw_hazard(); + } + + tlbw_use_hazard(); + + write_c0_entryhi(old_entryhi); + write_c0_pagemask(old_pagemask); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); +} + +void +kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, + struct kvm_vcpu *vcpu) +{ + unsigned long asid = asid_cache(cpu); + + if (!(ASID_MASK(ASID_INC(asid)))) { + if (cpu_has_vtag_icache) { + flush_icache_all(); + } + + kvm_local_flush_tlb_all(); /* start new asid cycle */ + + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + + cpu_context(cpu, mm) = asid_cache(cpu) = asid; +} + +void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + unsigned long old_entryhi; + unsigned long old_pagemask; + int entry = 0; + int cpu = smp_processor_id(); + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + + for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_read(); + tlbw_use_hazard(); + + vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi(); + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0(); + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1(); + vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask(); + } + + write_c0_entryhi(old_entryhi); + write_c0_pagemask(old_pagemask); + mtc0_tlbw_hazard(); + + local_irq_restore(flags); + +} + +void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + unsigned long old_ctx; + int entry; + int cpu = smp_processor_id(); + + local_irq_save(flags); + + old_ctx = read_c0_entryhi(); + + for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { + write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi); + mtc0_tlbw_hazard(); + write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0); + write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); + + write_c0_index(entry); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + tlbw_use_hazard(); + } + + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + mtc0_tlbw_hazard(); + local_irq_restore(flags); +} + + +void kvm_local_flush_tlb_all(void) +{ + unsigned long flags; + unsigned long old_ctx; + int entry = 0; + + local_irq_save(flags); + /* Save old context and create impossible VPN2 value */ + old_ctx = read_c0_entryhi(); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + + /* Blast 'em all away. */ + while (entry < current_cpu_data.tlbsize) { + /* Make sure all entries differ. */ + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + entry++; + } + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + mtc0_tlbw_hazard(); + + local_irq_restore(flags); +} + +void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu) +{ + int cpu, entry; + + for_each_possible_cpu(cpu) { + for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { + vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = + UNIQUE_ENTRYHI(entry); + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0; + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0; + vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = + read_c0_pagemask(); +#ifdef DEBUG + kvm_debug + ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n", + cpu, entry, + vcpu->arch.shadow_tlb[cpu][entry].tlb_hi, + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0, + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); +#endif + } + } +} + +/* Restore ASID once we are scheduled back after preemption */ +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long flags; + int newasid = 0; + +#ifdef DEBUG + kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); +#endif + + /* Alocate new kernel and user ASIDs if needed */ + + local_irq_save(flags); + + if (((vcpu->arch. + guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) { + kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); + vcpu->arch.guest_kernel_asid[cpu] = + vcpu->arch.guest_kernel_mm.context.asid[cpu]; + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); + vcpu->arch.guest_user_asid[cpu] = + vcpu->arch.guest_user_mm.context.asid[cpu]; + newasid++; + + kvm_info("[%d]: cpu_context: %#lx\n", cpu, + cpu_context(cpu, current->mm)); + kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n", + cpu, vcpu->arch.guest_kernel_asid[cpu]); + kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, + vcpu->arch.guest_user_asid[cpu]); + } + + if (vcpu->arch.last_sched_cpu != cpu) { + kvm_info("[%d->%d]KVM VCPU[%d] switch\n", + vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); + } + + /* Only reload shadow host TLB if new ASIDs haven't been allocated */ +#if 0 + if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) { + kvm_mips_flush_host_tlb(0); + kvm_shadow_tlb_load(vcpu); + } +#endif + + if (!newasid) { + /* If we preempted while the guest was executing, then reload the pre-empted ASID */ + if (current->flags & PF_VCPU) { + write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); + ehb(); + } + } else { + /* New ASIDs were allocated for the VM */ + + /* Were we in guest context? If so then the pre-empted ASID is no longer + * valid, we need to set it to what it should be based on the mode of + * the Guest (Kernel/User) + */ + if (current->flags & PF_VCPU) { + if (KVM_GUEST_KERNEL_MODE(vcpu)) + write_c0_entryhi(ASID_MASK(vcpu->arch. + guest_kernel_asid[cpu])); + else + write_c0_entryhi(ASID_MASK(vcpu->arch. + guest_user_asid[cpu])); + ehb(); + } + } + + local_irq_restore(flags); + +} + +/* ASID can change if another task is scheduled during preemption */ +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + uint32_t cpu; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + + vcpu->arch.preempt_entryhi = read_c0_entryhi(); + vcpu->arch.last_sched_cpu = cpu; + +#if 0 + if ((atomic_read(&kvm_mips_instance) > 1)) { + kvm_shadow_tlb_put(vcpu); + } +#endif + + if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & + ASID_VERSION_MASK)) { + kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, + cpu_context(cpu, current->mm)); + drop_mmu_context(current->mm, cpu); + } + write_c0_entryhi(cpu_asid(cpu, current->mm)); + ehb(); + + local_irq_restore(flags); +} + +uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned long paddr, flags; + uint32_t inst; + int index; + + if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 || + KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { + local_irq_save(flags); + index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc); + if (index >= 0) { + inst = *(opc); + } else { + index = + kvm_mips_guest_tlb_lookup(vcpu, + ((unsigned long) opc & VPN2_MASK) + | + ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + if (index < 0) { + kvm_err + ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", + __func__, opc, vcpu, read_c0_entryhi()); + kvm_mips_dump_host_tlbs(); + local_irq_restore(flags); + return KVM_INVALID_INST; + } + kvm_mips_handle_mapped_seg_tlb_fault(vcpu, + &vcpu->arch. + guest_tlb[index], + NULL, NULL); + inst = *(opc); + } + local_irq_restore(flags); + } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { + paddr = + kvm_mips_translate_guest_kseg0_to_hpa(vcpu, + (unsigned long) opc); + inst = *(uint32_t *) CKSEG0ADDR(paddr); + } else { + kvm_err("%s: illegal address: %p\n", __func__, opc); + return KVM_INVALID_INST; + } + + return inst; +} + +EXPORT_SYMBOL(kvm_local_flush_tlb_all); +EXPORT_SYMBOL(kvm_shadow_tlb_put); +EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); +EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); +EXPORT_SYMBOL(kvm_mips_init_shadow_tlb); +EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); +EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); +EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); +EXPORT_SYMBOL(kvm_mips_flush_host_tlb); +EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); +EXPORT_SYMBOL(kvm_mips_host_tlb_inv); +EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); +EXPORT_SYMBOL(kvm_shadow_tlb_load); +EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs); +EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); +EXPORT_SYMBOL(kvm_get_inst); +EXPORT_SYMBOL(kvm_arch_vcpu_load); +EXPORT_SYMBOL(kvm_arch_vcpu_put); diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c new file mode 100644 index 000000000000..466aeef044bd --- /dev/null +++ b/arch/mips/kvm/kvm_trap_emul.c @@ -0,0 +1,482 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include <linux/kvm_host.h> + +#include "kvm_mips_opcode.h" +#include "kvm_mips_int.h" + +static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) +{ + gpa_t gpa; + uint32_t kseg = KSEGX(gva); + + if ((kseg == CKSEG0) || (kseg == CKSEG1)) + gpa = CPHYSADDR(gva); + else { + printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); + kvm_mips_dump_host_tlbs(); + gpa = KVM_INVALID_ADDR; + } + +#ifdef DEBUG + kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); +#endif + + return gpa; +} + + +static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { + er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); + } else + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + + switch (er) { + case EMULATE_DONE: + ret = RESUME_GUEST; + break; + + case EMULATE_FAIL: + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + case EMULATE_WAIT: + run->exit_reason = KVM_EXIT_INTR; + ret = RESUME_HOST; + break; + + default: + BUG(); + } + return ret; +} + +static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 + || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { +#ifdef DEBUG + kvm_debug + ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); +#endif + er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); + + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { + /* XXXKYMA: The guest kernel does not expect to get this fault when we are not + * using HIGHMEM. Need to address this in a HIGHMEM kernel + */ + printk + ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + printk + ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) + && KVM_GUEST_KERNEL_MODE(vcpu)) { + if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 + || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { +#ifdef DEBUG + kvm_debug + ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); +#endif + er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { + /* All KSEG0 faults are handled by KVM, as the guest kernel does not + * expect to ever get them + */ + if (kvm_mips_handle_kseg0_tlb_fault + (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else { + kvm_err + ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) + && KVM_GUEST_KERNEL_MODE(vcpu)) { + if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 + || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { +#ifdef DEBUG + kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", + vcpu->arch.pc, badvaddr); +#endif + + /* User Address (UA) fault, this could happen if + * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this + * case we pass on the fault to the guest kernel and let it handle it. + * (2) TLB entry is present in the Guest TLB but not in the shadow, in this + * case we inject the TLB from the Guest TLB into the shadow host TLB + */ + + er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { + if (kvm_mips_handle_kseg0_tlb_fault + (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else { + printk + ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (KVM_GUEST_KERNEL_MODE(vcpu) + && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { +#ifdef DEBUG + kvm_debug("Emulate Store to MMIO space\n"); +#endif + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + if (er == EMULATE_FAIL) { + printk("Emulate Store to MMIO space failed\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } + } else { + printk + ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { +#ifdef DEBUG + kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); +#endif + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + if (er == EMULATE_FAIL) { + printk("Emulate Load from MMIO space failed\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } + } else { + printk + ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + er = EMULATE_FAIL; + } + return ret; +} + +static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_handle_ri(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int +kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); + kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); + kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); + kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); + kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); + + kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); + kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); + kvm_write_c0_guest_pagemask(cop0, + regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); + kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); + kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); + + return 0; +} + +static int +kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); + regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); + regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); + regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); + regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); + + regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); + regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); + regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = + kvm_read_c0_guest_pagemask(cop0); + regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); + regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); + + regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); + + return 0; +} + +static int kvm_trap_emul_vm_init(struct kvm *kvm) +{ + return 0; +} + +static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + uint32_t config1; + int vcpu_id = vcpu->vcpu_id; + + /* Arch specific stuff, set up config registers properly so that the + * guest will come up as expected, for now we simulate a + * MIPS 24kc + */ + kvm_write_c0_guest_prid(cop0, 0x00019300); + kvm_write_c0_guest_config(cop0, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT)); + + /* Read the cache characteristics from the host Config1 Register */ + config1 = (read_c0_config1() & ~0x7f); + + /* Set up MMU size */ + config1 &= ~(0x3f << 25); + config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); + + /* We unset some bits that we aren't emulating */ + config1 &= + ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) | + (1 << CP0C1_WR) | (1 << CP0C1_CA)); + kvm_write_c0_guest_config1(cop0, config1); + + kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); + /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ + kvm_write_c0_guest_config3(cop0, + MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << + CP0C3_ULRI)); + + /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ + kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); + + /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ + kvm_write_c0_guest_intctl(cop0, 0xFC000000); + + /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ + kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); + + return 0; +} + +static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { + /* exit handlers */ + .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, + .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, + .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, + .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, + .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, + .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, + .handle_syscall = kvm_trap_emul_handle_syscall, + .handle_res_inst = kvm_trap_emul_handle_res_inst, + .handle_break = kvm_trap_emul_handle_break, + + .vm_init = kvm_trap_emul_vm_init, + .vcpu_init = kvm_trap_emul_vcpu_init, + .vcpu_setup = kvm_trap_emul_vcpu_setup, + .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, + .queue_timer_int = kvm_mips_queue_timer_int_cb, + .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, + .queue_io_int = kvm_mips_queue_io_int_cb, + .dequeue_io_int = kvm_mips_dequeue_io_int_cb, + .irq_deliver = kvm_mips_irq_deliver_cb, + .irq_clear = kvm_mips_irq_clear_cb, + .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, + .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, +}; + +int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) +{ + *install_callbacks = &kvm_trap_emul_callbacks; + return 0; +} diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h new file mode 100644 index 000000000000..bc9e0f406c08 --- /dev/null +++ b/arch/mips/kvm/trace.h @@ -0,0 +1,46 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal <sanjayl@kymasys.com> +*/ + +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace + +/* + * Tracepoints for VM eists + */ +extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; + +TRACE_EVENT(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( + __field(struct kvm_vcpu *, vcpu) + __field(unsigned int, reason) + ), + + TP_fast_assign( + __entry->vcpu = vcpu; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + kvm_mips_exit_types_str[__entry->reason], + __entry->vcpu->arch.pc) +); + +#endif /* _TRACE_KVM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index a64daee740ee..3b2a1e78a543 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c @@ -19,7 +19,7 @@ */ void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit); */ void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit); */ void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit); int __mips_test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit); int __mips_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock); */ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit); */ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 32b9f21bfd85..8a12d00908e0 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -11,6 +11,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> +#include <asm/mmu_context.h> static inline const char *msk2str(unsigned int mask) { @@ -55,7 +56,7 @@ static void dump_tlb(int first, int last) s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); - asid = s_entryhi & 0xff; + asid = ASID_MASK(s_entryhi); for (i = first; i <= last; i++) { write_c0_index(i); @@ -85,7 +86,7 @@ static void dump_tlb(int first, int last) printk("va=%0*lx asid=%02lx\n", width, (entryhi & ~0x1fffUL), - entryhi & 0xff); + ASID_MASK(entryhi)); printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", width, (entrylo0 << 6) & PAGE_MASK, c0, diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 053d3b0b0317..0580194e7402 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -5,7 +5,8 @@ * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2007 by Maciej W. Rozycki + * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> @@ -19,6 +20,20 @@ #define LONG_S_R sdr #endif +#ifdef CONFIG_CPU_MICROMIPS +#define STORSIZE (LONGSIZE * 2) +#define STORMASK (STORSIZE - 1) +#define FILL64RG t8 +#define FILLPTRG t7 +#undef LONG_S +#define LONG_S LONG_SP +#else +#define STORSIZE LONGSIZE +#define STORMASK LONGMASK +#define FILL64RG a1 +#define FILLPTRG t0 +#endif + #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ @@ -26,23 +41,25 @@ .previous .macro f_fill64 dst, offset, val, fixup - EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) -#if LONGSIZE == 4 - EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) +#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) + EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) +#endif +#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) + EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) #endif .endm @@ -71,16 +88,20 @@ LEAF(memset) 1: FEXPORT(__bzero) - sltiu t0, a2, LONGSIZE /* very small region? */ + sltiu t0, a2, STORSIZE /* very small region? */ bnez t0, .Lsmall_memset - andi t0, a0, LONGMASK /* aligned? */ + andi t0, a0, STORMASK /* aligned? */ +#ifdef CONFIG_CPU_MICROMIPS + move t8, a1 /* used by 'swp' instruction */ + move t9, a1 +#endif #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f - PTR_SUBU t0, LONGSIZE /* alignment in bytes */ + PTR_SUBU t0, STORSIZE /* alignment in bytes */ #else .set noat - li AT, LONGSIZE + li AT, STORSIZE beqz t0, 1f PTR_SUBU t0, AT /* alignment in bytes */ .set at @@ -99,24 +120,27 @@ FEXPORT(__bzero) 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f beqz t1, .Lmemset_partial /* no block to fill */ - andi t0, a2, 0x40-LONGSIZE + andi t0, a2, 0x40-STORSIZE PTR_ADDU t1, a0 /* end address */ .set reorder 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) - f_fill64 a0, -64, a1, .Lfwd_fixup + f_fill64 a0, -64, FILL64RG, .Lfwd_fixup bne t1, a0, 1b .set noreorder .Lmemset_partial: R10KCBARRIER(0(ra)) PTR_LA t1, 2f /* where to start */ +#ifdef CONFIG_CPU_MICROMIPS + LONG_SRL t7, t0, 1 +#endif #if LONGSIZE == 4 - PTR_SUBU t1, t0 + PTR_SUBU t1, FILLPTRG #else .set noat - LONG_SRL AT, t0, 1 + LONG_SRL AT, FILLPTRG, 1 PTR_SUBU t1, AT .set at #endif @@ -126,9 +150,9 @@ FEXPORT(__bzero) .set push .set noreorder .set nomacro - f_fill64 a0, -64, a1, .Lpartial_fixup /* ... but first do longs ... */ + f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */ 2: .set pop - andi a2, LONGMASK /* At most one long to go */ + andi a2, STORMASK /* At most one long to go */ beqz a2, 1f PTR_ADDU a0, a2 /* What's left */ @@ -169,7 +193,7 @@ FEXPORT(__bzero) .Lpartial_fixup: PTR_L t0, TI_TASK($28) - andi a2, LONGMASK + andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 jr ra @@ -177,4 +201,4 @@ FEXPORT(__bzero) .Llast_fixup: jr ra - andi v1, a2, LONGMASK + andi v1, a2, STORMASK diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index cd160be3ce4d..6807f7172eaf 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -13,6 +13,7 @@ #include <linux/compiler.h> #include <linux/preempt.h> #include <linux/export.h> +#include <linux/stringify.h> #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) @@ -34,8 +35,11 @@ * * Workaround: mask EXL bit of the result or place a nop before mfc0. */ -__asm__( - " .macro arch_local_irq_disable\n" +notrace void arch_local_irq_disable(void) +{ + preempt_disable(); + + __asm__ __volatile__( " .set push \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC @@ -52,108 +56,98 @@ __asm__( " .set noreorder \n" " mtc0 $1,$12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : /* no outputs */ + : /* no inputs */ + : "memory"); -notrace void arch_local_irq_disable(void) -{ - preempt_disable(); - __asm__ __volatile__( - "arch_local_irq_disable" - : /* no outputs */ - : /* no inputs */ - : "memory"); preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_disable); -__asm__( - " .macro arch_local_irq_save result \n" +notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + preempt_disable(); + + __asm__ __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\result, $2, 1 \n" - " ori $1, \\result, 0x400 \n" + " mfc0 %[flags], $2, 1 \n" + " ori $1, %[flags], 0x400 \n" " .set noreorder \n" " mtc0 $1, $2, 1 \n" - " andi \\result, \\result, 0x400 \n" + " andi %[flags], %[flags], 0x400 \n" #elif defined(CONFIG_CPU_MIPSR2) /* see irqflags.h for inline function */ #else - " mfc0 \\result, $12 \n" - " ori $1, \\result, 0x1f \n" + " mfc0 %[flags], $12 \n" + " ori $1, %[flags], 0x1f \n" " xori $1, 0x1f \n" " .set noreorder \n" " mtc0 $1, $12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : [flags] "=r" (flags) + : /* no inputs */ + : "memory"); -notrace unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - preempt_disable(); - asm volatile("arch_local_irq_save\t%0" - : "=r" (flags) - : /* no inputs */ - : "memory"); preempt_enable(); + return flags; } EXPORT_SYMBOL(arch_local_irq_save); +notrace void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of branch and call overhead on each + * local_irq_restore() + */ + if (unlikely(!(flags & 0x0400))) + smtc_ipi_replay(); +#endif + preempt_disable(); -__asm__( - " .macro arch_local_irq_restore flags \n" + __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC - "mfc0 $1, $2, 1 \n" - "andi \\flags, 0x400 \n" - "ori $1, 0x400 \n" - "xori $1, 0x400 \n" - "or \\flags, $1 \n" - "mtc0 \\flags, $2, 1 \n" + " mfc0 $1, $2, 1 \n" + " andi %[flags], 0x400 \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $2, 1 \n" #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) /* see irqflags.h for inline function */ #elif defined(CONFIG_CPU_MIPSR2) /* see irqflags.h for inline function */ #else " mfc0 $1, $12 \n" - " andi \\flags, 1 \n" + " andi %[flags], 1 \n" " ori $1, 0x1f \n" " xori $1, 0x1f \n" - " or \\flags, $1 \n" - " mtc0 \\flags, $12 \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); -notrace void arch_local_irq_restore(unsigned long flags) -{ - unsigned long __tmp1; - -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - preempt_disable(); - __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); @@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags) unsigned long __tmp1; preempt_disable(); + __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 \n" + " andi %[flags], 0x400 \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi %[flags], 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); } EXPORT_SYMBOL(__arch_local_irq_restore); diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 91615c2ef0cf..8327698b9937 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <asm/mipsregs.h> +#include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> @@ -21,7 +22,7 @@ static void dump_tlb(int first, int last) unsigned int asid; unsigned long entryhi, entrylo0; - asid = read_c0_entryhi() & 0xfc0; + asid = ASID_MASK(read_c0_entryhi()); for (i = first; i <= last; i++) { write_c0_index(i<<8); @@ -35,7 +36,7 @@ static void dump_tlb(int first, int last) /* Unused entries have a virtual address of KSEG0. */ if ((entryhi & 0xffffe000) != 0x80000000 - && (entryhi & 0xfc0) == asid) { + && (ASID_MASK(entryhi) == asid)) { /* * Only print entries in use */ @@ -44,7 +45,7 @@ static void dump_tlb(int first, int last) printk("va=%08lx asid=%08lx" " [pa=%06lx n=%d d=%d v=%d g=%d]", (entryhi & 0xffffe000), - entryhi & 0xfc0, + ASID_MASK(entryhi), entrylo0 & PAGE_MASK, (entrylo0 & (1 << 11)) ? 1 : 0, (entrylo0 & (1 << 10)) ? 1 : 0, diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S index fdbb970f670d..e362dcdc69d1 100644 --- a/arch/mips/lib/strlen_user.S +++ b/arch/mips/lib/strlen_user.S @@ -3,8 +3,9 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle - * Copyright (c) 1999 Silicon Graphics, Inc. + * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <asm/asm.h> #include <asm/asm-offsets.h> @@ -28,9 +29,9 @@ LEAF(__strlen_user_asm) FEXPORT(__strlen_user_nocheck_asm) move v0, a0 -1: EX(lb, t0, (v0), .Lfault) +1: EX(lbu, v1, (v0), .Lfault) PTR_ADDIU v0, 1 - bnez t0, 1b + bnez v1, 1b PTR_SUBU v0, a0 jr ra END(__strlen_user_asm) diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index bad539487503..92870b6b53ea 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -3,7 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1996, 1999 by Ralf Baechle + * Copyright (C) 1996, 1999 by Ralf Baechle + * Copyright (C) 2011 MIPS Technologies, Inc. */ #include <linux/errno.h> #include <asm/asm.h> @@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm) bnez v0, .Lfault FEXPORT(__strncpy_from_user_nocheck_asm) - move v0, zero - move v1, a1 .set noreorder -1: EX(lbu, t0, (v1), .Lfault) + move t0, zero + move v1, a1 +1: EX(lbu, v0, (v1), .Lfault) PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) - beqz t0, 2f - sb t0, (a0) - PTR_ADDIU v0, 1 - .set reorder - PTR_ADDIU a0, 1 - bne v0, a2, 1b -2: PTR_ADDU t0, a1, v0 - xor t0, a1 - bltz t0, .Lfault + beqz v0, 2f + sb v0, (a0) + PTR_ADDIU t0, 1 + bne t0, a2, 1b + PTR_ADDIU a0, 1 +2: PTR_ADDU v0, a1, t0 + xor v0, a1 + bltz v0, .Lfault + nop jr ra # return n + move v0, t0 END(__strncpy_from_user_asm) -.Lfault: li v0, -EFAULT - jr ra +.Lfault: jr ra + li v0, -EFAULT .section __ex_table,"a" PTR 1b, .Lfault diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index beea03c8c0ce..fcacea5e61f1 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm) PTR_ADDU a1, a0 # stop pointer 1: beq v0, a1, 1f # limit reached? EX(lb, t0, (v0), .Lfault) - PTR_ADDU v0, 1 + PTR_ADDIU v0, 1 bnez t0, 1b 1: PTR_SUBU v0, a0 jr ra diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index afb5a0bcf7a5..f03771900813 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -45,6 +45,7 @@ #include <asm/signal.h> #include <asm/mipsregs.h> #include <asm/fpu_emulator.h> +#include <asm/fpu.h> #include <asm/uaccess.h> #include <asm/branch.h> @@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); /* Determine rounding mode from the RM bits of the FCSR */ #define modeindex(v) ((v) & FPU_CSR_RM) +/* microMIPS bitfields */ +#define MM_POOL32A_MINOR_MASK 0x3f +#define MM_POOL32A_MINOR_SHIFT 0x6 +#define MM_MIPS32_COND_FC 0x30 + /* Convert Mips rounding mode (0..3) to IEEE library modes. */ static const unsigned char ieee_rm[4] = { [FPU_CSR_RN] = IEEE754_RN, @@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = { }; #endif +/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ +static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; + +/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */ +static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0}; +static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0}; +static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0}; +static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0}; + +/* + * This functions translates a 32-bit microMIPS instruction + * into a 32-bit MIPS32 instruction. Returns 0 on success + * and SIGILL otherwise. + */ +static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr) +{ + union mips_instruction insn = *insn_ptr; + union mips_instruction mips32_insn = insn; + int func, fmt, op; + + switch (insn.mm_i_format.opcode) { + case mm_ldc132_op: + mips32_insn.mm_i_format.opcode = ldc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_lwc132_op: + mips32_insn.mm_i_format.opcode = lwc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_sdc132_op: + mips32_insn.mm_i_format.opcode = sdc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_swc132_op: + mips32_insn.mm_i_format.opcode = swc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_pool32i_op: + /* NOTE: offset is << by 1 if in microMIPS mode. */ + if ((insn.mm_i_format.rt == mm_bc1f_op) || + (insn.mm_i_format.rt == mm_bc1t_op)) { + mips32_insn.fb_format.opcode = cop1_op; + mips32_insn.fb_format.bc = bc_op; + mips32_insn.fb_format.flag = + (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0; + } else + return SIGILL; + break; + case mm_pool32f_op: + switch (insn.mm_fp0_format.func) { + case mm_32f_01_op: + case mm_32f_11_op: + case mm_32f_02_op: + case mm_32f_12_op: + case mm_32f_41_op: + case mm_32f_51_op: + case mm_32f_42_op: + case mm_32f_52_op: + op = insn.mm_fp0_format.func; + if (op == mm_32f_01_op) + func = madd_s_op; + else if (op == mm_32f_11_op) + func = madd_d_op; + else if (op == mm_32f_02_op) + func = nmadd_s_op; + else if (op == mm_32f_12_op) + func = nmadd_d_op; + else if (op == mm_32f_41_op) + func = msub_s_op; + else if (op == mm_32f_51_op) + func = msub_d_op; + else if (op == mm_32f_42_op) + func = nmsub_s_op; + else + func = nmsub_d_op; + mips32_insn.fp6_format.opcode = cop1x_op; + mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr; + mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft; + mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs; + mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd; + mips32_insn.fp6_format.func = func; + break; + case mm_32f_10_op: + func = -1; /* Invalid */ + op = insn.mm_fp5_format.op & 0x7; + if (op == mm_ldxc1_op) + func = ldxc1_op; + else if (op == mm_sdxc1_op) + func = sdxc1_op; + else if (op == mm_lwxc1_op) + func = lwxc1_op; + else if (op == mm_swxc1_op) + func = swxc1_op; + + if (func != -1) { + mips32_insn.r_format.opcode = cop1x_op; + mips32_insn.r_format.rs = + insn.mm_fp5_format.base; + mips32_insn.r_format.rt = + insn.mm_fp5_format.index; + mips32_insn.r_format.rd = 0; + mips32_insn.r_format.re = insn.mm_fp5_format.fd; + mips32_insn.r_format.func = func; + } else + return SIGILL; + break; + case mm_32f_40_op: + op = -1; /* Invalid */ + if (insn.mm_fp2_format.op == mm_fmovt_op) + op = 1; + else if (insn.mm_fp2_format.op == mm_fmovf_op) + op = 0; + if (op != -1) { + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp2_format.fmt]; + mips32_insn.fp0_format.ft = + (insn.mm_fp2_format.cc<<2) + op; + mips32_insn.fp0_format.fs = + insn.mm_fp2_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp2_format.fd; + mips32_insn.fp0_format.func = fmovc_op; + } else + return SIGILL; + break; + case mm_32f_60_op: + func = -1; /* Invalid */ + if (insn.mm_fp0_format.op == mm_fadd_op) + func = fadd_op; + else if (insn.mm_fp0_format.op == mm_fsub_op) + func = fsub_op; + else if (insn.mm_fp0_format.op == mm_fmul_op) + func = fmul_op; + else if (insn.mm_fp0_format.op == mm_fdiv_op) + func = fdiv_op; + if (func != -1) { + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp0_format.fmt]; + mips32_insn.fp0_format.ft = + insn.mm_fp0_format.ft; + mips32_insn.fp0_format.fs = + insn.mm_fp0_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp0_format.fd; + mips32_insn.fp0_format.func = func; + } else + return SIGILL; + break; + case mm_32f_70_op: + func = -1; /* Invalid */ + if (insn.mm_fp0_format.op == mm_fmovn_op) + func = fmovn_op; + else if (insn.mm_fp0_format.op == mm_fmovz_op) + func = fmovz_op; + if (func != -1) { + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp0_format.fmt]; + mips32_insn.fp0_format.ft = + insn.mm_fp0_format.ft; + mips32_insn.fp0_format.fs = + insn.mm_fp0_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp0_format.fd; + mips32_insn.fp0_format.func = func; + } else + return SIGILL; + break; + case mm_32f_73_op: /* POOL32FXF */ + switch (insn.mm_fp1_format.op) { + case mm_movf0_op: + case mm_movf1_op: + case mm_movt0_op: + case mm_movt1_op: + if ((insn.mm_fp1_format.op & 0x7f) == + mm_movf0_op) + op = 0; + else + op = 1; + mips32_insn.r_format.opcode = spec_op; + mips32_insn.r_format.rs = insn.mm_fp4_format.fs; + mips32_insn.r_format.rt = + (insn.mm_fp4_format.cc << 2) + op; + mips32_insn.r_format.rd = insn.mm_fp4_format.rt; + mips32_insn.r_format.re = 0; + mips32_insn.r_format.func = movc_op; + break; + case mm_fcvtd0_op: + case mm_fcvtd1_op: + case mm_fcvts0_op: + case mm_fcvts1_op: + if ((insn.mm_fp1_format.op & 0x7f) == + mm_fcvtd0_op) { + func = fcvtd_op; + fmt = swl_format[insn.mm_fp3_format.fmt]; + } else { + func = fcvts_op; + fmt = dwl_format[insn.mm_fp3_format.fmt]; + } + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = fmt; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp3_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp3_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_fmov0_op: + case mm_fmov1_op: + case mm_fabs0_op: + case mm_fabs1_op: + case mm_fneg0_op: + case mm_fneg1_op: + if ((insn.mm_fp1_format.op & 0x7f) == + mm_fmov0_op) + func = fmov_op; + else if ((insn.mm_fp1_format.op & 0x7f) == + mm_fabs0_op) + func = fabs_op; + else + func = fneg_op; + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp3_format.fmt]; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp3_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp3_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_ffloorl_op: + case mm_ffloorw_op: + case mm_fceill_op: + case mm_fceilw_op: + case mm_ftruncl_op: + case mm_ftruncw_op: + case mm_froundl_op: + case mm_froundw_op: + case mm_fcvtl_op: + case mm_fcvtw_op: + if (insn.mm_fp1_format.op == mm_ffloorl_op) + func = ffloorl_op; + else if (insn.mm_fp1_format.op == mm_ffloorw_op) + func = ffloor_op; + else if (insn.mm_fp1_format.op == mm_fceill_op) + func = fceill_op; + else if (insn.mm_fp1_format.op == mm_fceilw_op) + func = fceil_op; + else if (insn.mm_fp1_format.op == mm_ftruncl_op) + func = ftruncl_op; + else if (insn.mm_fp1_format.op == mm_ftruncw_op) + func = ftrunc_op; + else if (insn.mm_fp1_format.op == mm_froundl_op) + func = froundl_op; + else if (insn.mm_fp1_format.op == mm_froundw_op) + func = fround_op; + else if (insn.mm_fp1_format.op == mm_fcvtl_op) + func = fcvtl_op; + else + func = fcvtw_op; + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sd_format[insn.mm_fp1_format.fmt]; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp1_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp1_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_frsqrt_op: + case mm_fsqrt_op: + case mm_frecip_op: + if (insn.mm_fp1_format.op == mm_frsqrt_op) + func = frsqrt_op; + else if (insn.mm_fp1_format.op == mm_fsqrt_op) + func = fsqrt_op; + else + func = frecip_op; + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp1_format.fmt]; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp1_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp1_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_mfc1_op: + case mm_mtc1_op: + case mm_cfc1_op: + case mm_ctc1_op: + if (insn.mm_fp1_format.op == mm_mfc1_op) + op = mfc_op; + else if (insn.mm_fp1_format.op == mm_mtc1_op) + op = mtc_op; + else if (insn.mm_fp1_format.op == mm_cfc1_op) + op = cfc_op; + else + op = ctc_op; + mips32_insn.fp1_format.opcode = cop1_op; + mips32_insn.fp1_format.op = op; + mips32_insn.fp1_format.rt = + insn.mm_fp1_format.rt; + mips32_insn.fp1_format.fs = + insn.mm_fp1_format.fs; + mips32_insn.fp1_format.fd = 0; + mips32_insn.fp1_format.func = 0; + break; + default: + return SIGILL; + break; + } + break; + case mm_32f_74_op: /* c.cond.fmt */ + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp4_format.fmt]; + mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt; + mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs; + mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2; + mips32_insn.fp0_format.func = + insn.mm_fp4_format.cond | MM_MIPS32_COND_FC; + break; + default: + return SIGILL; + break; + } + break; + default: + return SIGILL; + break; + } + + *insn_ptr = mips32_insn; + return 0; +} + +int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) +{ + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + + switch (insn.mm_i_format.opcode) { + case mm_pool32a_op: + if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == + mm_pool32axf_op) { + switch (insn.mm_i_format.simmediate >> + MM_POOL32A_MINOR_SHIFT) { + case mm_jalr_op: + case mm_jalrhb_op: + case mm_jalrs_op: + case mm_jalrshb_op: + if (insn.mm_i_format.rt != 0) /* Not mm_jr */ + regs->regs[insn.mm_i_format.rt] = + regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + break; + } + } + break; + case mm_pool32i_op: + switch (insn.mm_i_format.rt) { + case mm_bltzals_op: + case mm_bltzal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bltz_op: + if ((long)regs->regs[insn.mm_i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bgezals_op: + case mm_bgezal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bgez_op: + if ((long)regs->regs[insn.mm_i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_blez_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bgtz_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bc2f_op: + case mm_bc1f_op: + bc_false = 1; + /* Fall through */ + case mm_bc2t_op: + case mm_bc1t_op: + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + if (bc_false) + fcr31 = ~fcr31; + + bit = (insn.mm_i_format.rs >> 2); + bit += (bit != 0); + bit += 23; + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + } + break; + case mm_pool16c_op: + switch (insn.mm_i_format.rt) { + case mm_jalr16_op: + case mm_jalrs16_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_jr16_op: + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + break; + } + break; + case mm_beqz16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + case mm_bnez16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + case mm_b16_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc + + (insn.mm_b0_format.simmediate << 1); + return 1; + break; + case mm_beq32_op: + if (regs->regs[insn.mm_i_format.rs] == + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bne32_op: + if (regs->regs[insn.mm_i_format.rs] != + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + case mm_jalx32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + return 1; + break; + case mm_jals32_op: + case mm_jal32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_j32_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 27; + *contpc <<= 27; + *contpc |= (insn.j_format.target << 1); + set_isa16_mode(*contpc); + return 1; + break; + } + return 0; +} /* * Redundant with logic already in kernel/branch.c, @@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = { * a single subroutine should be used across both * modules. */ -static int isBranchInstr(mips_instruction * i) +static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) { - switch (MIPSInst_OPCODE(*i)) { + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + unsigned int fcr31; + unsigned int bit = 0; + + switch (insn.i_format.opcode) { case spec_op: - switch (MIPSInst_FUNC(*i)) { + switch (insn.r_format.func) { case jalr_op: + regs->regs[insn.r_format.rd] = + regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ case jr_op: + *contpc = regs->regs[insn.r_format.rs]; return 1; + break; } break; - case bcond_op: - switch (MIPSInst_RT(*i)) { + switch (insn.i_format.rt) { + case bltzal_op: + case bltzall_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ case bltz_op: - case bgez_op: case bltzl_op: - case bgezl_op: - case bltzal_op: + if ((long)regs->regs[insn.i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; case bgezal_op: - case bltzall_op: case bgezall_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case bgez_op: + case bgezl_op: + if ((long)regs->regs[insn.i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; return 1; + break; } break; - - case j_op: - case jal_op: case jalx_op: + set_isa16_mode(bit); + case jal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case j_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + /* Set microMIPS mode bit: XOR for jalx. */ + *contpc ^= bit; + return 1; + break; case beq_op: - case bne_op: - case blez_op: - case bgtz_op: case beql_op: + if (regs->regs[insn.i_format.rs] == + regs->regs[insn.i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case bne_op: case bnel_op: + if (regs->regs[insn.i_format.rs] != + regs->regs[insn.i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case blez_op: case blezl_op: + if ((long)regs->regs[insn.i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case bgtz_op: case bgtzl_op: + if ((long)regs->regs[insn.i_format.rs] > 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; return 1; - + break; case cop0_op: case cop1_op: case cop2_op: case cop1x_op: - if (MIPSInst_RS(*i) == bc_op) - return 1; + if (insn.i_format.rs == bc_op) { + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + bit = (insn.i_format.rt >> 2); + bit += (bit != 0); + bit += 23; + switch (insn.i_format.rt & 3) { + case 0: /* bc1f */ + case 2: /* bc1fl */ + if (~fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case 1: /* bc1t */ + case 3: /* bc1tl */ + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + } + } break; } - return 0; } @@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp) */ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - void *__user *fault_addr) + struct mm_decoded_insn dec_insn, void *__user *fault_addr) { mips_instruction ir; - unsigned long emulpc, contpc; + unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc; unsigned int cond; - - if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGBUS; - } - if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGSEGV; - } + int pc_inc; /* XXX NEC Vr54xx bug workaround */ - if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir)) - xcp->cp0_cause &= ~CAUSEF_BD; + if (xcp->cp0_cause & CAUSEF_BD) { + if (dec_insn.micro_mips_mode) { + if (!mm_isBranchInstr(xcp, dec_insn, &contpc)) + xcp->cp0_cause &= ~CAUSEF_BD; + } else { + if (!isBranchInstr(xcp, dec_insn, &contpc)) + xcp->cp0_cause &= ~CAUSEF_BD; + } + } if (xcp->cp0_cause & CAUSEF_BD) { /* @@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * Linux MIPS branch emulator operates on context, updating the * cp0_epc. */ - emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */ + ir = dec_insn.next_insn; /* process delay slot instr */ + pc_inc = dec_insn.next_pc_inc; + } else { + ir = dec_insn.insn; /* process current instr */ + pc_inc = dec_insn.pc_inc; + } - if (__compute_return_epc(xcp) < 0) { -#ifdef CP1DBG - printk("failed to emulate branch at %p\n", - (void *) (xcp->cp0_epc)); -#endif + /* + * Since microMIPS FPU instructios are a subset of MIPS32 FPU + * instructions, we want to convert microMIPS FPU instructions + * into MIPS32 instructions so that we could reuse all of the + * FPU emulation code. + * + * NOTE: We cannot do this for branch instructions since they + * are not a subset. Example: Cannot emulate a 16-bit + * aligned target address with a MIPS32 instruction. + */ + if (dec_insn.micro_mips_mode) { + /* + * If next instruction is a 16-bit instruction, then it + * it cannot be a FPU instruction. This could happen + * since we can be called for non-FPU instructions. + */ + if ((pc_inc == 2) || + (microMIPS32_to_MIPS32((union mips_instruction *)&ir) + == SIGILL)) return SIGILL; - } - if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)emulpc; - return SIGBUS; - } - if (__get_user(ir, (mips_instruction __user *) emulpc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)emulpc; - return SIGSEGV; - } - /* __compute_return_epc() will have updated cp0_epc */ - contpc = xcp->cp0_epc; - /* In order not to confuse ptrace() et al, tweak context */ - xcp->cp0_epc = emulpc - 4; - } else { - emulpc = xcp->cp0_epc; - contpc = xcp->cp0_epc + 4; } emul: @@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, /* branch taken: emulate dslot * instruction */ - xcp->cp0_epc += 4; - contpc = (xcp->cp0_epc + - (MIPSInst_SIMM(ir) << 2)); - - if (!access_ok(VERIFY_READ, xcp->cp0_epc, - sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGBUS; - } - if (__get_user(ir, - (mips_instruction __user *) xcp->cp0_epc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGSEGV; - } + xcp->cp0_epc += dec_insn.pc_inc; + + contpc = MIPSInst_SIMM(ir); + ir = dec_insn.next_insn; + if (dec_insn.micro_mips_mode) { + contpc = (xcp->cp0_epc + (contpc << 1)); + + /* If 16-bit instruction, not FPU. */ + if ((dec_insn.next_pc_inc == 2) || + (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) { + + /* + * Since this instruction will + * be put on the stack with + * 32-bit words, get around + * this problem by putting a + * NOP16 as the second one. + */ + if (dec_insn.next_pc_inc == 2) + ir = (ir & (~0xffff)) | MM_NOP16; + + /* + * Single step the non-CP1 + * instruction in the dslot. + */ + return mips_dsemul(xcp, ir, contpc); + } + } else + contpc = (xcp->cp0_epc + (contpc << 2)); switch (MIPSInst_OPCODE(ir)) { case lwc1_op: @@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * branch likely nullifies * dslot if not taken */ - xcp->cp0_epc += 4; - contpc += 4; + xcp->cp0_epc += dec_insn.pc_inc; + contpc += dec_insn.pc_inc; /* * else continue & execute * dslot as normal insn @@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, void *__user *fault_addr) { unsigned long oldepc, prevepc; - mips_instruction insn; + struct mm_decoded_insn dec_insn; + u16 instr[4]; + u16 *instr_ptr; int sig = 0; oldepc = xcp->cp0_epc; do { prevepc = xcp->cp0_epc; - if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGBUS; - } - if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGSEGV; + if (get_isa16_mode(prevepc) && cpu_has_mmips) { + /* + * Get next 2 microMIPS instructions and convert them + * into 32-bit instructions. + */ + if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) || + (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) || + (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) || + (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) { + MIPS_FPU_EMU_INC_STATS(errors); + return SIGBUS; + } + instr_ptr = instr; + + /* Get first instruction. */ + if (mm_insn_16bit(*instr_ptr)) { + /* Duplicate the half-word. */ + dec_insn.insn = (*instr_ptr << 16) | + (*instr_ptr); + /* 16-bit instruction. */ + dec_insn.pc_inc = 2; + instr_ptr += 1; + } else { + dec_insn.insn = (*instr_ptr << 16) | + *(instr_ptr+1); + /* 32-bit instruction. */ + dec_insn.pc_inc = 4; + instr_ptr += 2; + } + /* Get second instruction. */ + if (mm_insn_16bit(*instr_ptr)) { + /* Duplicate the half-word. */ + dec_insn.next_insn = (*instr_ptr << 16) | + (*instr_ptr); + /* 16-bit instruction. */ + dec_insn.next_pc_inc = 2; + } else { + dec_insn.next_insn = (*instr_ptr << 16) | + *(instr_ptr+1); + /* 32-bit instruction. */ + dec_insn.next_pc_inc = 4; + } + dec_insn.micro_mips_mode = 1; + } else { + if ((get_user(dec_insn.insn, + (mips_instruction __user *) xcp->cp0_epc)) || + (get_user(dec_insn.next_insn, + (mips_instruction __user *)(xcp->cp0_epc+4)))) { + MIPS_FPU_EMU_INC_STATS(errors); + return SIGBUS; + } + dec_insn.pc_inc = 4; + dec_insn.next_pc_inc = 4; + dec_insn.micro_mips_mode = 0; } - if (insn == 0) - xcp->cp0_epc += 4; /* skip nops */ + + if ((dec_insn.insn == 0) || + ((dec_insn.pc_inc == 2) && + ((dec_insn.insn & 0xffff) == MM_NOP16))) + xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */ else { /* * The 'ieee754_csr' is an alias of @@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, */ /* convert to ieee library modes */ ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; - sig = cop1Emulate(xcp, ctx, fault_addr); + sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr); /* revert to mips rounding mode */ ieee754_csr.rm = mips_rm[ieee754_csr.rm]; } diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 384a3b0091ea..7ea622ab8dad 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) struct emuframe __user *fr; int err; - if (ir == 0) { /* a nop is easy */ + if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) || + (ir == 0)) { + /* NOP is easy */ regs->cp0_epc = cpc; regs->cp0_cause &= ~CAUSEF_BD; return 0; @@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) return SIGBUS; - err = __put_user(ir, &fr->emul); - err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); + if (get_isa16_mode(regs->cp0_epc)) { + err = __put_user(ir >> 16, (u16 __user *)(&fr->emul)); + err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2)); + err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst)); + err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2)); + } else { + err = __put_user(ir, &fr->emul); + err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); + } + err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); err |= __put_user(cpc, &fr->epc); @@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) return SIGBUS; } - regs->cp0_epc = (unsigned long) &fr->emul; + regs->cp0_epc = ((unsigned long) &fr->emul) | + get_isa16_mode(regs->cp0_epc); flush_cache_sigtramp((unsigned long)&fr->badinst); @@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp) unsigned long epc; u32 insn, cookie; int err = 0; + u16 instr[2]; fr = (struct emuframe __user *) - (xcp->cp0_epc - sizeof(mips_instruction)); + (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction)); /* * If we can't even access the area, something is very wrong, but we'll @@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp) * - Is the instruction pointed to by the EPC an BREAK_MATH? * - Is the following memory word the BD_COOKIE? */ - err = __get_user(insn, &fr->badinst); + if (get_isa16_mode(xcp->cp0_epc)) { + err = __get_user(instr[0], (u16 __user *)(&fr->badinst)); + err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2)); + insn = (instr[0] << 16) | instr[1]; + } else { + err = __get_user(insn, &fr->badinst); + } err |= __get_user(cookie, &fr->cookie); if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) { diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 1dcec30ad1c4..e87aae1f2e80 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -4,7 +4,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o uasm.o + tlbex.o tlbex-fault.o uasm-mips.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o + +obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 2078915eacb9..21813beec7a5 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -33,6 +33,7 @@ #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/traps.h> +#include <asm/dma-coherence.h> /* * Special Variant of smp_call_function for use by cache functions: @@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; } -static void (* r4k_blast_dcache)(void); +void (* r4k_blast_dcache)(void); +EXPORT_SYMBOL(r4k_blast_dcache); static void __cpuinit r4k_blast_dcache_setup(void) { @@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void) r4k_blast_icache_page_indexed = blast_icache64_page_indexed; } -static void (* r4k_blast_icache)(void); +void (* r4k_blast_icache)(void); +EXPORT_SYMBOL(r4k_blast_icache); static void __cpuinit r4k_blast_icache_setup(void) { @@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void) } } -#if defined(CONFIG_DMA_NONCOHERENT) - -static int __cpuinitdata coherentio; - -static int __init setcoherentio(char *str) -{ - coherentio = 1; - - return 0; -} - -early_param("coherentio", setcoherentio); -#endif - static void __cpuinit r4k_cache_error_setup(void) { extern char __weak except_vec2_generic; @@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void) build_clear_page(); build_copy_page(); -#if !defined(CONFIG_MIPS_CMP) + + /* + * We want to run CMP kernels on core with and without coherent + * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether + * or not to flush caches. + */ local_r4k___flush_cache_all(NULL); -#endif + coherency_setup(); board_cache_error_setup = r4k_cache_error_setup; } diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 07cec4407b0c..5aeb3eb0b72f 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -48,6 +48,7 @@ void (*flush_icache_all)(void); EXPORT_SYMBOL_GPL(local_flush_data_cache_page); EXPORT_SYMBOL(flush_data_cache_page); +EXPORT_SYMBOL(flush_icache_all); #ifdef CONFIG_DMA_NONCOHERENT diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index f9ef83829a52..caf92ecb37d6 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -22,6 +22,26 @@ #include <dma-coherence.h> +int coherentio = 0; /* User defined DMA coherency from command line. */ +EXPORT_SYMBOL_GPL(coherentio); +int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ + +static int __init setcoherentio(char *str) +{ + coherentio = 1; + pr_info("Hardware DMA cache coherency (command line)\n"); + return 0; +} +early_param("coherentio", setcoherentio); + +static int __init setnocoherentio(char *str) +{ + coherentio = 0; + pr_info("Software DMA cache coherency (command line)\n"); + return 0; +} +early_param("nocoherentio", setnocoherentio); + static inline struct page *dma_addr_to_page(struct device *dev, dma_addr_t dma_addr) { @@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); - ret = UNCAC_ADDR(ret); + if (!hw_coherentio) + ret = UNCAC_ADDR(ret); } } @@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev) && !hw_coherentio) addr = CAC_ADDR(addr); free_pages(addr, get_order(size)); diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index a29fba55b53e..4eb8dcfaf1ce 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; + static atomic_t run_once = ATOMIC_INIT(0); + + if (atomic_xchg(&run_once, 1)) { + return; + } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; + static atomic_t run_once = ATOMIC_INIT(0); + + if (atomic_xchg(&run_once, 1)) { + return; + } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index a63d1ed0827f..4a13c150f31b 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -51,7 +51,7 @@ void local_flush_tlb_all(void) #endif local_irq_save(flags); - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = ASID_MASK(read_c0_entryhi()); write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { @@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", - cpu_context(cpu, mm) & ASID_MASK, start, end); + ASID_MASK(cpu_context(cpu, mm)), start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { - int oldpid = read_c0_entryhi() & ASID_MASK; - int newpid = cpu_context(cpu, mm) & ASID_MASK; + int oldpid = ASID_MASK(read_c0_entryhi()); + int newpid = ASID_MASK(cpu_context(cpu, mm)); start &= PAGE_MASK; end += PAGE_SIZE - 1; @@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) #ifdef DEBUG_TLB printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); #endif - newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; + newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); page &= PAGE_MASK; local_irq_save(flags); - oldpid = read_c0_entryhi() & ASID_MASK; + oldpid = ASID_MASK(read_c0_entryhi()); write_c0_entryhi(page | newpid); BARRIER; tlb_probe(); @@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = read_c0_entryhi() & ASID_MASK; + pid = ASID_MASK(read_c0_entryhi()); #ifdef DEBUG_TLB - if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { + if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", (cpu_context(cpu, vma->vm_mm)), pid); } @@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = ASID_MASK(read_c0_entryhi()); old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); @@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #endif local_irq_save(flags); - old_ctx = read_c0_entryhi() & ASID_MASK; + old_ctx = ASID_MASK(read_c0_entryhi()); write_c0_entrylo0(entrylo0); write_c0_entryhi(entryhi); write_c0_index(wired); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 493131c81a29..09653b290d53 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -13,6 +13,7 @@ #include <linux/smp.h> #include <linux/mm.h> #include <linux/hugetlb.h> +#include <linux/module.h> #include <asm/cpu.h> #include <asm/bootinfo.h> @@ -94,6 +95,7 @@ void local_flush_tlb_all(void) FLUSH_ITLB; EXIT_CRITICAL(flags); } +EXPORT_SYMBOL(local_flush_tlb_all); /* All entries common to a mm share an asid. To effectively flush these entries, we just bump the asid. */ @@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ENTER_CRITICAL(flags); - pid = read_c0_entryhi() & ASID_MASK; + pid = ASID_MASK(read_c0_entryhi()); address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); pgdp = pgd_offset(vma->vm_mm, address); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 91c2499f806a..122f9207f49e 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) if (current->active_mm != vma->vm_mm) return; - pid = read_c0_entryhi() & ASID_MASK; + pid = ASID_MASK(read_c0_entryhi()); local_irq_save(flags); address &= PAGE_MASK; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 820e6612d744..4d46d3787576 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -29,6 +29,7 @@ #include <linux/init.h> #include <linux/cache.h> +#include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> #include <asm/war.h> @@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata; static int check_for_high_segbits __cpuinitdata; #endif +static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop, + unsigned int i_const) +{ + unsigned int **p; + + for (p = start; p < stop; p++) { +#ifndef CONFIG_CPU_MICROMIPS + unsigned int *ip; + + ip = *p; + *ip = (*ip & 0xffff0000) | i_const; +#else + unsigned short *ip; + + ip = ((unsigned short *)((unsigned int)*p - 1)); + if ((*ip & 0xf000) == 0x4000) { + *ip &= 0xfff1; + *ip |= (i_const << 1); + } else if ((*ip & 0xf000) == 0x6000) { + *ip &= 0xfff1; + *ip |= ((i_const >> 2) << 1); + } else { + ip++; + *ip = i_const; + } +#endif + local_flush_icache_range((unsigned long)ip, + (unsigned long)ip + sizeof(*ip)); + } +} + +#define asid_insn_fixup(section, const) \ +do { \ + extern unsigned int *__start_ ## section; \ + extern unsigned int *__stop_ ## section; \ + insn_fixup(&__start_ ## section, &__stop_ ## section, const); \ +} while(0) + +/* + * Caller is assumed to flush the caches before the first context switch. + */ +static void __cpuinit setup_asid(unsigned int inc, unsigned int mask, + unsigned int version_mask, + unsigned int first_version) +{ + extern asmlinkage void handle_ri_rdhwr_vivt(void); + unsigned long *vivt_exc; + +#ifdef CONFIG_CPU_MICROMIPS + /* + * Worst case optimised microMIPS addiu instructions support + * only a 3-bit immediate value. + */ + if(inc > 7) + panic("Invalid ASID increment value!"); +#endif + asid_insn_fixup(__asid_inc, inc); + asid_insn_fixup(__asid_mask, mask); + asid_insn_fixup(__asid_version_mask, version_mask); + asid_insn_fixup(__asid_first_version, first_version); + + /* Patch up the 'handle_ri_rdhwr_vivt' handler. */ + vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt; +#ifdef CONFIG_CPU_MICROMIPS + vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1); +#endif + vivt_exc++; + *vivt_exc = (*vivt_exc & ~mask) | mask; + + current_cpu_data.asid_cache = first_version; +} + static int check_for_high_segbits __cpuinitdata; static unsigned int kscratch_used_mask __cpuinitdata; @@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; #ifdef CONFIG_MIPS_PGD_C0_CONTEXT -u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; +u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned; static void __cpuinit build_r4000_setup_pgd(void) { const int a0 = 4; const int a1 = 5; - u32 *p = tlbmiss_handler_setup_pgd; + u32 *p = tlbmiss_handler_setup_pgd_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); + memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void) uasm_i_jr(&p, 31); UASM_i_MTC0(&p, a0, 31, pgd_reg); } - if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) - panic("tlbmiss_handler_setup_pgd space exceeded"); + if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)) + panic("tlbmiss_handler_setup_pgd_array space exceeded"); uasm_resolve_relocs(relocs, labels); - pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", - (unsigned int)(p - tlbmiss_handler_setup_pgd)); + pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n", + (unsigned int)(p - tlbmiss_handler_setup_pgd_array)); dump_handler("tlbmiss_handler", - tlbmiss_handler_setup_pgd, - ARRAY_SIZE(tlbmiss_handler_setup_pgd)); + tlbmiss_handler_setup_pgd_array, + ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)); } #endif @@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_l_nopage_tlbl(&l, p); build_restore_work_registers(&p); +#ifdef CONFIG_CPU_MICROMIPS + if ((unsigned long)tlb_do_page_fault_0 & 1) { + uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); + uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); + uasm_i_jr(&p, K0); + } else +#endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); @@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void) uasm_l_nopage_tlbs(&l, p); build_restore_work_registers(&p); +#ifdef CONFIG_CPU_MICROMIPS + if ((unsigned long)tlb_do_page_fault_1 & 1) { + uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); + uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); + uasm_i_jr(&p, K0); + } else +#endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) uasm_l_nopage_tlbm(&l, p); build_restore_work_registers(&p); +#ifdef CONFIG_CPU_MICROMIPS + if ((unsigned long)tlb_do_page_fault_1 & 1) { + uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); + uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); + uasm_i_jr(&p, K0); + } else +#endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void) case CPU_TX3922: case CPU_TX3927: #ifndef CONFIG_MIPS_PGD_C0_CONTEXT - build_r3000_tlb_refill_handler(); + setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000); + if (cpu_has_local_ebase) + build_r3000_tlb_refill_handler(); if (!run_once) { + if (!cpu_has_local_ebase) + build_r3000_tlb_refill_handler(); build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); @@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void) break; default: +#ifndef CONFIG_MIPS_MT_SMTC + setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000); +#else + setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000); +#endif if (!run_once) { scratch_reg = allocate_kscratch(); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT @@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void) build_r4000_tlb_load_handler(); build_r4000_tlb_store_handler(); build_r4000_tlb_modify_handler(); + if (!cpu_has_local_ebase) + build_r4000_tlb_refill_handler(); run_once++; } - build_r4000_tlb_refill_handler(); + if (cpu_has_local_ebase) + build_r4000_tlb_refill_handler(); } } @@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void) local_flush_icache_range((unsigned long)handle_tlbm, (unsigned long)handle_tlbm + sizeof(handle_tlbm)); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT - local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, - (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); + local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array, + (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm)); #endif } diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c new file mode 100644 index 000000000000..162ee6d62788 --- /dev/null +++ b/arch/mips/mm/uasm-micromips.c @@ -0,0 +1,221 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * A small micro-assembler. It is intentionally kept simple, does only + * support a subset of instructions, and does not try to hide pipeline + * effects like branch delay slots. + * + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer + * Copyright (C) 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> + +#include <asm/inst.h> +#include <asm/elf.h> +#include <asm/bugs.h> +#define UASM_ISA _UASM_ISA_MICROMIPS +#include <asm/uasm.h> + +#define RS_MASK 0x1f +#define RS_SH 16 +#define RT_MASK 0x1f +#define RT_SH 21 +#define SCIMM_MASK 0x3ff +#define SCIMM_SH 16 + +/* This macro sets the non-variable bits of an instruction. */ +#define M(a, b, c, d, e, f) \ + ((a) << OP_SH \ + | (b) << RT_SH \ + | (c) << RS_SH \ + | (d) << RD_SH \ + | (e) << RE_SH \ + | (f) << FUNC_SH) + +/* Define these when we are not the ISA the kernel is being compiled with. */ +#ifndef CONFIG_CPU_MICROMIPS +#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) +#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) +#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) +#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) +#endif + +#include "uasm.c" + +static struct insn insn_table_MM[] __uasminitdata = { + { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, + { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, + { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, + { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beql, 0, 0 }, + { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM }, + { insn_bgezl, 0, 0 }, + { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM }, + { insn_bltzl, 0, 0 }, + { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM }, + { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM }, + { insn_daddu, 0, 0 }, + { insn_daddiu, 0, 0 }, + { insn_dmfc0, 0, 0 }, + { insn_dmtc0, 0, 0 }, + { insn_dsll, 0, 0 }, + { insn_dsll32, 0, 0 }, + { insn_dsra, 0, 0 }, + { insn_dsrl, 0, 0 }, + { insn_dsrl32, 0, 0 }, + { insn_drotr, 0, 0 }, + { insn_drotr32, 0, 0 }, + { insn_dsubu, 0, 0 }, + { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 }, + { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE }, + { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE }, + { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, + { insn_ld, 0, 0 }, + { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, + { insn_lld, 0, 0 }, + { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, + { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD }, + { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD }, + { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD }, + { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, + { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM }, + { insn_rfe, 0, 0 }, + { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM }, + { insn_scd, 0, 0 }, + { insn_sd, 0, 0 }, + { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, + { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, + { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD }, + { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, + { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, + { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, + { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, + { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, + { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, + { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, + { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, + { insn_dins, 0, 0 }, + { insn_dinsm, 0, 0 }, + { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM}, + { insn_bbit0, 0, 0 }, + { insn_bbit1, 0, 0 }, + { insn_lwx, 0, 0 }, + { insn_ldx, 0, 0 }, + { insn_invalid, 0, 0 } +}; + +#undef M + +static inline __uasminit u32 build_bimm(s32 arg) +{ + WARN(arg > 0xffff || arg < -0x10000, + KERN_WARNING "Micro-assembler field overflow\n"); + + WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); + + return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); +} + +static inline __uasminit u32 build_jimm(u32 arg) +{ + + WARN(arg & ~((JIMM_MASK << 2) | 1), + KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg >> 1) & JIMM_MASK; +} + +/* + * The order of opcode arguments is implicitly left to right, + * starting with RS and ending with FUNC or IMM. + */ +static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) +{ + struct insn *ip = NULL; + unsigned int i; + va_list ap; + u32 op; + + for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++) + if (insn_table_MM[i].opcode == opc) { + ip = &insn_table_MM[i]; + break; + } + + if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) + panic("Unsupported Micro-assembler instruction %d", opc); + + op = ip->match; + va_start(ap, opc); + if (ip->fields & RS) { + if (opc == insn_mfc0 || opc == insn_mtc0) + op |= build_rt(va_arg(ap, u32)); + else + op |= build_rs(va_arg(ap, u32)); + } + if (ip->fields & RT) { + if (opc == insn_mfc0 || opc == insn_mtc0) + op |= build_rs(va_arg(ap, u32)); + else + op |= build_rt(va_arg(ap, u32)); + } + if (ip->fields & RD) + op |= build_rd(va_arg(ap, u32)); + if (ip->fields & RE) + op |= build_re(va_arg(ap, u32)); + if (ip->fields & SIMM) + op |= build_simm(va_arg(ap, s32)); + if (ip->fields & UIMM) + op |= build_uimm(va_arg(ap, u32)); + if (ip->fields & BIMM) + op |= build_bimm(va_arg(ap, s32)); + if (ip->fields & JIMM) + op |= build_jimm(va_arg(ap, u32)); + if (ip->fields & FUNC) + op |= build_func(va_arg(ap, u32)); + if (ip->fields & SET) + op |= build_set(va_arg(ap, u32)); + if (ip->fields & SCIMM) + op |= build_scimm(va_arg(ap, u32)); + va_end(ap); + +#ifdef CONFIG_CPU_LITTLE_ENDIAN + **buf = ((op & 0xffff) << 16) | (op >> 16); +#else + **buf = op; +#endif + (*buf)++; +} + +static inline void __uasminit +__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) +{ + long laddr = (long)lab->addr; + long raddr = (long)rel->addr; + + switch (rel->type) { + case R_MIPS_PC16: +#ifdef CONFIG_CPU_LITTLE_ENDIAN + *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16); +#else + *rel->addr |= build_bimm(laddr - (raddr + 4)); +#endif + break; + + default: + panic("Unsupported Micro-assembler relocation %d", + rel->type); + } +} diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c new file mode 100644 index 000000000000..5fcdd8fe3e83 --- /dev/null +++ b/arch/mips/mm/uasm-mips.c @@ -0,0 +1,205 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * A small micro-assembler. It is intentionally kept simple, does only + * support a subset of instructions, and does not try to hide pipeline + * effects like branch delay slots. + * + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer + * Copyright (C) 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> + +#include <asm/inst.h> +#include <asm/elf.h> +#include <asm/bugs.h> +#define UASM_ISA _UASM_ISA_CLASSIC +#include <asm/uasm.h> + +#define RS_MASK 0x1f +#define RS_SH 21 +#define RT_MASK 0x1f +#define RT_SH 16 +#define SCIMM_MASK 0xfffff +#define SCIMM_SH 6 + +/* This macro sets the non-variable bits of an instruction. */ +#define M(a, b, c, d, e, f) \ + ((a) << OP_SH \ + | (b) << RS_SH \ + | (c) << RT_SH \ + | (d) << RD_SH \ + | (e) << RE_SH \ + | (f) << FUNC_SH) + +/* Define these when we are not the ISA the kernel is being compiled with. */ +#ifdef CONFIG_CPU_MICROMIPS +#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) +#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) +#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) +#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) +#endif + +#include "uasm.c" + +static struct insn insn_table[] __uasminitdata = { + { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, + { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, + { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, + { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, + { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, + { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, + { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, + { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, + { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, + { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, + { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, + { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, + { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, + { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, + { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, + { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, + { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, + { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, + { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, + { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, + { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, + { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, + { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, + { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, + { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, + { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, + { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, + { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, + { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, + { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, + { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, + { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, + { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, + { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, + { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, + { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, + { insn_invalid, 0, 0 } +}; + +#undef M + +static inline __uasminit u32 build_bimm(s32 arg) +{ + WARN(arg > 0x1ffff || arg < -0x20000, + KERN_WARNING "Micro-assembler field overflow\n"); + + WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); + + return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); +} + +static inline __uasminit u32 build_jimm(u32 arg) +{ + WARN(arg & ~(JIMM_MASK << 2), + KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg >> 2) & JIMM_MASK; +} + +/* + * The order of opcode arguments is implicitly left to right, + * starting with RS and ending with FUNC or IMM. + */ +static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) +{ + struct insn *ip = NULL; + unsigned int i; + va_list ap; + u32 op; + + for (i = 0; insn_table[i].opcode != insn_invalid; i++) + if (insn_table[i].opcode == opc) { + ip = &insn_table[i]; + break; + } + + if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) + panic("Unsupported Micro-assembler instruction %d", opc); + + op = ip->match; + va_start(ap, opc); + if (ip->fields & RS) + op |= build_rs(va_arg(ap, u32)); + if (ip->fields & RT) + op |= build_rt(va_arg(ap, u32)); + if (ip->fields & RD) + op |= build_rd(va_arg(ap, u32)); + if (ip->fields & RE) + op |= build_re(va_arg(ap, u32)); + if (ip->fields & SIMM) + op |= build_simm(va_arg(ap, s32)); + if (ip->fields & UIMM) + op |= build_uimm(va_arg(ap, u32)); + if (ip->fields & BIMM) + op |= build_bimm(va_arg(ap, s32)); + if (ip->fields & JIMM) + op |= build_jimm(va_arg(ap, u32)); + if (ip->fields & FUNC) + op |= build_func(va_arg(ap, u32)); + if (ip->fields & SET) + op |= build_set(va_arg(ap, u32)); + if (ip->fields & SCIMM) + op |= build_scimm(va_arg(ap, u32)); + va_end(ap); + + **buf = op; + (*buf)++; +} + +static inline void __uasminit +__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) +{ + long laddr = (long)lab->addr; + long raddr = (long)rel->addr; + + switch (rel->type) { + case R_MIPS_PC16: + *rel->addr |= build_bimm(laddr - (raddr + 4)); + break; + + default: + panic("Unsupported Micro-assembler relocation %d", + rel->type); + } +} diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 942ff6c2eba2..7eb5e4355d25 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -10,17 +10,9 @@ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/init.h> - -#include <asm/inst.h> -#include <asm/elf.h> -#include <asm/bugs.h> -#include <asm/uasm.h> - enum fields { RS = 0x001, RT = 0x002, @@ -37,10 +29,6 @@ enum fields { #define OP_MASK 0x3f #define OP_SH 26 -#define RS_MASK 0x1f -#define RS_SH 21 -#define RT_MASK 0x1f -#define RT_SH 16 #define RD_MASK 0x1f #define RD_SH 11 #define RE_MASK 0x1f @@ -53,8 +41,6 @@ enum fields { #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 -#define SCIMM_MASK 0xfffff -#define SCIMM_SH 6 enum opcode { insn_invalid, @@ -77,85 +63,6 @@ struct insn { enum fields fields; }; -/* This macro sets the non-variable bits of an instruction. */ -#define M(a, b, c, d, e, f) \ - ((a) << OP_SH \ - | (b) << RS_SH \ - | (c) << RT_SH \ - | (d) << RD_SH \ - | (e) << RE_SH \ - | (f) << FUNC_SH) - -static struct insn insn_table[] __uasminitdata = { - { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, - { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, - { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, - { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, - { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, - { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, - { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, - { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, - { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, - { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, - { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, - { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, - { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, - { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, - { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, - { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, - { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, - { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, - { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, - { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, - { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, - { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, - { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, - { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, - { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, - { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, - { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, - { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, - { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, - { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, - { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, - { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, - { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, - { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, - { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, - { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, - { insn_invalid, 0, 0 } -}; - -#undef M - static inline __uasminit u32 build_rs(u32 arg) { WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); @@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg) return arg & IMM_MASK; } -static inline __uasminit u32 build_bimm(s32 arg) -{ - WARN(arg > 0x1ffff || arg < -0x20000, - KERN_WARNING "Micro-assembler field overflow\n"); - - WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); - - return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); -} - -static inline __uasminit u32 build_jimm(u32 arg) -{ - WARN(arg & ~(JIMM_MASK << 2), - KERN_WARNING "Micro-assembler field overflow\n"); - - return (arg >> 2) & JIMM_MASK; -} - static inline __uasminit u32 build_scimm(u32 arg) { WARN(arg & ~SCIMM_MASK, @@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg) return arg & SET_MASK; } -/* - * The order of opcode arguments is implicitly left to right, - * starting with RS and ending with FUNC or IMM. - */ -static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) -{ - struct insn *ip = NULL; - unsigned int i; - va_list ap; - u32 op; - - for (i = 0; insn_table[i].opcode != insn_invalid; i++) - if (insn_table[i].opcode == opc) { - ip = &insn_table[i]; - break; - } - - if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) - panic("Unsupported Micro-assembler instruction %d", opc); - - op = ip->match; - va_start(ap, opc); - if (ip->fields & RS) - op |= build_rs(va_arg(ap, u32)); - if (ip->fields & RT) - op |= build_rt(va_arg(ap, u32)); - if (ip->fields & RD) - op |= build_rd(va_arg(ap, u32)); - if (ip->fields & RE) - op |= build_re(va_arg(ap, u32)); - if (ip->fields & SIMM) - op |= build_simm(va_arg(ap, s32)); - if (ip->fields & UIMM) - op |= build_uimm(va_arg(ap, u32)); - if (ip->fields & BIMM) - op |= build_bimm(va_arg(ap, s32)); - if (ip->fields & JIMM) - op |= build_jimm(va_arg(ap, u32)); - if (ip->fields & FUNC) - op |= build_func(va_arg(ap, u32)); - if (ip->fields & SET) - op |= build_set(va_arg(ap, u32)); - if (ip->fields & SCIMM) - op |= build_scimm(va_arg(ap, u32)); - va_end(ap); - - **buf = op; - (*buf)++; -} +static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); #define I_u1u2u3(op) \ Ip_u1u2u3(op) \ @@ -445,7 +286,7 @@ I_u3u1u2(_ldx) #ifdef CONFIG_CPU_CAVIUM_OCTEON #include <asm/octeon/octeon.h> -void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, +void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, unsigned int c) { if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) @@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, else build_insn(buf, insn_pref, c, a, b); } -UASM_EXPORT_SYMBOL(uasm_i_pref); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref)); #else I_u2s3u1(_pref) #endif /* Handle labels. */ -void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) +void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) { (*lab)->addr = addr; (*lab)->lab = lid; (*lab)++; } -UASM_EXPORT_SYMBOL(uasm_build_label); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); -int __uasminit uasm_in_compat_space_p(long addr) +int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) { /* Is this address in 32bit compat space? */ #ifdef CONFIG_64BIT @@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr) return 1; #endif } -UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); static int __uasminit uasm_rel_highest(long val) { @@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val) #endif } -int __uasminit uasm_rel_hi(long val) +int __uasminit ISAFUNC(uasm_rel_hi)(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } -UASM_EXPORT_SYMBOL(uasm_rel_hi); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); -int __uasminit uasm_rel_lo(long val) +int __uasminit ISAFUNC(uasm_rel_lo)(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } -UASM_EXPORT_SYMBOL(uasm_rel_lo); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); -void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) +void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) { - if (!uasm_in_compat_space_p(addr)) { - uasm_i_lui(buf, rs, uasm_rel_highest(addr)); + if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { + ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); if (uasm_rel_higher(addr)) - uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); - if (uasm_rel_hi(addr)) { - uasm_i_dsll(buf, rs, rs, 16); - uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); - uasm_i_dsll(buf, rs, rs, 16); + ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr)); + if (ISAFUNC(uasm_rel_hi(addr))) { + ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16); + ISAFUNC(uasm_i_daddiu)(buf, rs, rs, + ISAFUNC(uasm_rel_hi)(addr)); + ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16); } else - uasm_i_dsll32(buf, rs, rs, 0); + ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0); } else - uasm_i_lui(buf, rs, uasm_rel_hi(addr)); + ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr))); } -UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); +UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); -void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) +void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) { - UASM_i_LA_mostly(buf, rs, addr); - if (uasm_rel_lo(addr)) { - if (!uasm_in_compat_space_p(addr)) - uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); + ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); + if (ISAFUNC(uasm_rel_lo(addr))) { + if (!ISAFUNC(uasm_in_compat_space_p)(addr)) + ISAFUNC(uasm_i_daddiu)(buf, rs, rs, + ISAFUNC(uasm_rel_lo(addr))); else - uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); + ISAFUNC(uasm_i_addiu)(buf, rs, rs, + ISAFUNC(uasm_rel_lo(addr))); } } -UASM_EXPORT_SYMBOL(UASM_i_LA); +UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); /* Handle relocations. */ void __uasminit -uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) +ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) { (*rel)->addr = addr; (*rel)->type = R_MIPS_PC16; (*rel)->lab = lid; (*rel)++; } -UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); static inline void __uasminit -__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) -{ - long laddr = (long)lab->addr; - long raddr = (long)rel->addr; - - switch (rel->type) { - case R_MIPS_PC16: - *rel->addr |= build_bimm(laddr - (raddr + 4)); - break; - - default: - panic("Unsupported Micro-assembler relocation %d", - rel->type); - } -} +__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); void __uasminit -uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) +ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) { struct uasm_label *l; @@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) if (rel->lab == l->lab) __resolve_relocs(rel, l); } -UASM_EXPORT_SYMBOL(uasm_resolve_relocs); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); void __uasminit -uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) +ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != UASM_LABEL_INVALID; rel++) if (rel->addr >= first && rel->addr < end) rel->addr += off; } -UASM_EXPORT_SYMBOL(uasm_move_relocs); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); void __uasminit -uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) +ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != UASM_LABEL_INVALID; lab++) if (lab->addr >= first && lab->addr < end) lab->addr += off; } -UASM_EXPORT_SYMBOL(uasm_move_labels); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); void __uasminit -uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, +ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); memcpy(target, first, (end - first) * sizeof(u32)); - uasm_move_relocs(rel, first, end, off); - uasm_move_labels(lab, first, end, off); + ISAFUNC(uasm_move_relocs(rel, first, end, off)); + ISAFUNC(uasm_move_labels(lab, first, end, off)); } -UASM_EXPORT_SYMBOL(uasm_copy_handler); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); -int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) +int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) { for (; rel->lab != UASM_LABEL_INVALID; rel++) { if (rel->addr == addr @@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) return 0; } -UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); /* Convenience functions for labeled branches. */ void __uasminit -uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bltz(p, reg, 0); + ISAFUNC(uasm_i_bltz)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bltz); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); void __uasminit -uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) +ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_b(p, 0); + ISAFUNC(uasm_i_b)(p, 0); } -UASM_EXPORT_SYMBOL(uasm_il_b); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); void __uasminit -uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_beqz(p, reg, 0); + ISAFUNC(uasm_i_beqz)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_beqz); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); void __uasminit -uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_beqzl(p, reg, 0); + ISAFUNC(uasm_i_beqzl)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_beqzl); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); void __uasminit -uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, +ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bne(p, reg1, reg2, 0); + ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bne); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); void __uasminit -uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bnez(p, reg, 0); + ISAFUNC(uasm_i_bnez)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bnez); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); void __uasminit -uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bgezl(p, reg, 0); + ISAFUNC(uasm_i_bgezl)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bgezl); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); void __uasminit -uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bgez(p, reg, 0); + ISAFUNC(uasm_i_bgez)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bgez); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); void __uasminit -uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, +ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bbit0(p, reg, bit, 0); + ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bbit0); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); void __uasminit -uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, +ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bbit1(p, reg, bit, 0); + ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bbit1); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1)); diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 6079ef33b5f0..0388fc8b5613 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -5,9 +5,8 @@ # Copyright (C) 2008 Wind River Systems, Inc. # written by Ralf Baechle <ralf@linux-mips.org> # -obj-y := malta-amon.o malta-cmdline.o \ - malta-display.o malta-init.o malta-int.o \ - malta-memory.o malta-platform.o \ +obj-y := malta-amon.o malta-display.o malta-init.o \ + malta-int.o malta-memory.o malta-platform.o \ malta-reset.o malta-setup.o malta-time.o obj-$(CONFIG_EARLY_PRINTK) += malta-console.o diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform index 5b548b5a4fcf..2cc72c9b38e3 100644 --- a/arch/mips/mti-malta/Platform +++ b/arch/mips/mti-malta/Platform @@ -3,5 +3,9 @@ # platform-$(CONFIG_MIPS_MALTA) += mti-malta/ cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta -load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 +ifdef CONFIG_KVM_GUEST + load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000 +else + load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 +endif all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c deleted file mode 100644 index 5576a306a145..000000000000 --- a/arch/mips/mti-malta/malta-cmdline.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * Kernel command line creation using the prom monitor (YAMON) argc/argv. - */ -#include <linux/init.h> -#include <linux/string.h> - -#include <asm/bootinfo.h> - -extern int prom_argc; -extern int *_prom_argv; - -/* - * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. - * This macro take care of sign extension. - */ -#define prom_argv(index) ((char *)(long)_prom_argv[(index)]) - -char * __init prom_getcmdline(void) -{ - return &(arcs_cmdline[0]); -} - - -void __init prom_init_cmdline(void) -{ - char *cp; - int actr; - - actr = 1; /* Always ignore argv[0] */ - - cp = &(arcs_cmdline[0]); - while(actr < prom_argc) { - strcpy(cp, prom_argv(actr)); - cp += strlen(prom_argv(actr)); - *cp++ = ' '; - actr++; - } - if (cp != &(arcs_cmdline[0])) { - /* get rid of trailing space */ - --cp; - *cp = '\0'; - } -} diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c index 9bc58a24e80a..d4f807191ecd 100644 --- a/arch/mips/mti-malta/malta-display.c +++ b/arch/mips/mti-malta/malta-display.c @@ -1,28 +1,20 @@ /* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * * Display routines for display messages in MIPS boards ascii display. + * + * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard <carstenl@mips.com> + * Steven J. Hill <sjhill@mips.com> */ - #include <linux/compiler.h> #include <linux/timer.h> -#include <asm/io.h> +#include <linux/io.h> + #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> extern const char display_string[]; static unsigned int display_count; @@ -36,11 +28,11 @@ void mips_display_message(const char *str) if (unlikely(display == NULL)) display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int)); - for (i = 0; i <= 14; i=i+2) { - if (*str) - __raw_writel(*str++, display + i); - else - __raw_writel(' ', display + i); + for (i = 0; i <= 14; i += 2) { + if (*str) + __raw_writel(*str++, display + i); + else + __raw_writel(' ', display + i); } } diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index c2cbce9e435e..ff8caffd3266 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -1,54 +1,28 @@ /* - * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. - * All rights reserved. - * Authors: Carsten Langgaard <carstenl@mips.com> - * Maciej W. Rozycki <macro@mips.com> - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * * PROM library initialisation code. + * + * Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard <carstenl@mips.com> + * Maciej W. Rozycki <macro@mips.com> + * Steven J. Hill <sjhill@mips.com> */ #include <linux/init.h> #include <linux/string.h> #include <linux/kernel.h> -#include <asm/bootinfo.h> -#include <asm/gt64120.h> -#include <asm/io.h> #include <asm/cacheflush.h> #include <asm/smp-ops.h> #include <asm/traps.h> - +#include <asm/fw/fw.h> #include <asm/gcmpregs.h> -#include <asm/mips-boards/prom.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/bonito64.h> -#include <asm/mips-boards/msc01_pci.h> - #include <asm/mips-boards/malta.h> -int prom_argc; -int *_prom_argv, *_prom_envp; - -/* - * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. - * This macro take care of sign extension, if running in 64-bit mode. - */ -#define prom_envp(index) ((char *)(long)_prom_envp[(index)]) - -int init_debug; - static int mips_revision_corid; int mips_revision_sconid; @@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120; /* MIPS System controller register base */ unsigned long _pcictrl_msc; -char *prom_getenv(char *envname) -{ - /* - * Return a pointer to the given environment variable. - * In 64-bit mode: we're using 64-bit pointers, but all pointers - * in the PROM structures are only 32-bit, so we need some - * workarounds, if we are running in 64-bit mode. - */ - int i, index=0; - - i = strlen(envname); - - while (prom_envp(index)) { - if(strncmp(envname, prom_envp(index), i) == 0) { - return(prom_envp(index+1)); - } - index += 2; - } - - return NULL; -} - -static inline unsigned char str2hexnum(unsigned char c) -{ - if (c >= '0' && c <= '9') - return c - '0'; - if (c >= 'a' && c <= 'f') - return c - 'a' + 10; - return 0; /* foo */ -} - -static inline void str2eaddr(unsigned char *ea, unsigned char *str) -{ - int i; - - for (i = 0; i < 6; i++) { - unsigned char num; - - if((*str == '.') || (*str == ':')) - str++; - num = str2hexnum(*str++) << 4; - num |= (str2hexnum(*str++)); - ea[i] = num; - } -} - -int get_ethernet_addr(char *ethernet_addr) -{ - char *ethaddr_str; - - ethaddr_str = prom_getenv("ethaddr"); - if (!ethaddr_str) { - printk("ethaddr not set in boot prom\n"); - return -1; - } - str2eaddr(ethernet_addr, ethaddr_str); - - if (init_debug > 1) { - int i; - printk("get_ethernet_addr: "); - for (i=0; i<5; i++) - printk("%02x:", (unsigned char)*(ethernet_addr+i)); - printk("%02x\n", *(ethernet_addr+i)); - } - - return 0; -} - #ifdef CONFIG_SERIAL_8250_CONSOLE static void __init console_config(void) { @@ -138,17 +44,23 @@ static void __init console_config(void) char parity = '\0', bits = '\0', flow = '\0'; char *s; - if ((strstr(prom_getcmdline(), "console=")) == NULL) { - s = prom_getenv("modetty0"); + if ((strstr(fw_getcmdline(), "console=")) == NULL) { + s = fw_getenv("modetty0"); if (s) { while (*s >= '0' && *s <= '9') baud = baud*10 + *s++ - '0'; - if (*s == ',') s++; - if (*s) parity = *s++; - if (*s == ',') s++; - if (*s) bits = *s++; - if (*s == ',') s++; - if (*s == 'h') flow = 'r'; + if (*s == ',') + s++; + if (*s) + parity = *s++; + if (*s == ',') + s++; + if (*s) + bits = *s++; + if (*s == ',') + s++; + if (*s == 'h') + flow = 'r'; } if (baud == 0) baud = 38400; @@ -158,8 +70,9 @@ static void __init console_config(void) bits = '8'; if (flow == '\0') flow = 'r'; - sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow); - strcat(prom_getcmdline(), console_string); + sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, + parity, bits, flow); + strcat(fw_getcmdline(), console_string); pr_info("Config serial console:%s\n", console_string); } } @@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops; void __init prom_init(void) { - prom_argc = fw_arg0; - _prom_argv = (int *) fw_arg1; - _prom_envp = (int *) fw_arg2; - mips_display_message("LINUX"); /* @@ -306,7 +215,7 @@ void __init prom_init(void) case MIPS_REVISION_SCON_SOCIT: case MIPS_REVISION_SCON_ROCIT: _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000); - mips_pci_controller: +mips_pci_controller: mb(); MSC_READ(MSC01_PCI_CFG, data); MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT); @@ -348,13 +257,13 @@ void __init prom_init(void) default: /* Unknown system controller */ mips_display_message("SC Error"); - while (1); /* We die here... */ + while (1); /* We die here... */ } board_nmi_handler_setup = mips_nmi_setup; board_ejtag_handler_setup = mips_ejtag_setup; - prom_init_cmdline(); - prom_meminit(); + fw_init_cmdline(); + fw_meminit(); #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index e364af70e6cf..0a1339ac3ec8 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -47,7 +47,6 @@ #include <asm/setup.h> int gcmp_present = -1; -int gic_present; static unsigned long _msc01_biu_base; static unsigned long _gcmp_base; static unsigned int ipi_map[NR_CPUS]; @@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void) { int irq; + if (gic_compare_int()) + do_IRQ(MIPS_GIC_IRQ_BASE); + irq = gic_get_int(); if (irq < 0) return; /* interrupt has already been cleared */ diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index f3d43aa023a9..1f73d63e92a7 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -1,73 +1,45 @@ /* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * * PROM library functions for acquiring/using memory descriptors given to * us from the YAMON. + * + * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard <carstenl@mips.com> + * Steven J. Hill <sjhill@mips.com> */ #include <linux/init.h> -#include <linux/mm.h> #include <linux/bootmem.h> -#include <linux/pfn.h> #include <linux/string.h> #include <asm/bootinfo.h> -#include <asm/page.h> #include <asm/sections.h> +#include <asm/fw/fw.h> -#include <asm/mips-boards/prom.h> - -/*#define DEBUG*/ - -enum yamon_memtypes { - yamon_dontuse, - yamon_prom, - yamon_free, -}; -static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS]; - -#ifdef DEBUG -static char *mtypes[3] = { - "Dont use memory", - "YAMON PROM memory", - "Free memory", -}; -#endif +static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS]; /* determined physical memory size, not overridden by command line args */ unsigned long physical_memsize = 0L; -static struct prom_pmemblock * __init prom_getmdesc(void) +fw_memblock_t * __init fw_getmdesc(void) { - char *memsize_str; + char *memsize_str, *ptr; unsigned int memsize; - char *ptr; static char cmdline[COMMAND_LINE_SIZE] __initdata; + long val; + int tmp; /* otherwise look in the environment */ - memsize_str = prom_getenv("memsize"); + memsize_str = fw_getenv("memsize"); if (!memsize_str) { - printk(KERN_WARNING - "memsize not set in boot prom, set to default (32Mb)\n"); + pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { -#ifdef DEBUG - pr_debug("prom_memsize = %s\n", memsize_str); -#endif - physical_memsize = simple_strtol(memsize_str, NULL, 0); + tmp = kstrtol(memsize_str, 0, &val); + physical_memsize = (unsigned long)val; } #ifdef CONFIG_CPU_BIG_ENDIAN @@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void) memset(mdesc, 0, sizeof(mdesc)); - mdesc[0].type = yamon_dontuse; + mdesc[0].type = fw_dontuse; mdesc[0].base = 0x00000000; mdesc[0].size = 0x00001000; - mdesc[1].type = yamon_prom; + mdesc[1].type = fw_code; mdesc[1].base = 0x00001000; mdesc[1].size = 0x000ef000; @@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void) * This mean that this area can't be used as DMA memory for PCI * devices. */ - mdesc[2].type = yamon_dontuse; + mdesc[2].type = fw_dontuse; mdesc[2].base = 0x000f0000; mdesc[2].size = 0x00010000; - mdesc[3].type = yamon_dontuse; + mdesc[3].type = fw_dontuse; mdesc[3].base = 0x00100000; - mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base; + mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - + mdesc[3].base; - mdesc[4].type = yamon_free; + mdesc[4].type = fw_free; mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - mdesc[4].base; return &mdesc[0]; } -static int __init prom_memtype_classify(unsigned int type) +static int __init fw_memtype_classify(unsigned int type) { switch (type) { - case yamon_free: + case fw_free: return BOOT_MEM_RAM; - case yamon_prom: + case fw_code: return BOOT_MEM_ROM_DATA; default: return BOOT_MEM_RESERVED; } } -void __init prom_meminit(void) +void __init fw_meminit(void) { - struct prom_pmemblock *p; + fw_memblock_t *p; -#ifdef DEBUG - pr_debug("YAMON MEMORY DESCRIPTOR dump:\n"); - p = prom_getmdesc(); - while (p->size) { - int i = 0; - pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n", - i, p, p->base, p->size, mtypes[p->type]); - p++; - i++; - } -#endif - p = prom_getmdesc(); + p = fw_getmdesc(); while (p->size) { long type; unsigned long base, size; - type = prom_memtype_classify(p->type); + type = fw_memtype_classify(p->type); base = p->base; size = p->size; @@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void) continue; addr = boot_mem_map.map[i].addr; - free_init_pages("prom memory", + free_init_pages("YAMON memory", addr, addr + boot_mem_map.map[i].size); } } diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index 200f64df2c9b..c72a06936781 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -25,13 +25,13 @@ #include <linux/screen_info.h> #include <linux/time.h> -#include <asm/bootinfo.h> +#include <asm/fw/fw.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> #include <asm/mips-boards/malta.h> #include <asm/mips-boards/maltaint.h> #include <asm/dma.h> #include <asm/traps.h> +#include <asm/gcmpregs.h> #ifdef CONFIG_VT #include <linux/console.h> #endif @@ -105,6 +105,66 @@ static void __init fd_activate(void) } #endif +static int __init plat_enable_iocoherency(void) +{ + int supported = 0; + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { + if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { + BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; + pr_info("Enabled Bonito CPU coherency\n"); + supported = 1; + } + if (strstr(fw_getcmdline(), "iobcuncached")) { + BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; + BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & + ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | + BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); + pr_info("Disabled Bonito IOBC coherency\n"); + } else { + BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; + BONITO_PCIMEMBASECFG |= + (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | + BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); + pr_info("Enabled Bonito IOBC coherency\n"); + } + } else if (gcmp_niocu() != 0) { + /* Nothing special needs to be done to enable coherency */ + pr_info("CMP IOCU detected\n"); + if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { + pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); + return 0; + } + supported = 1; + } + hw_coherentio = supported; + return supported; +} + +static void __init plat_setup_iocoherency(void) +{ +#ifdef CONFIG_DMA_NONCOHERENT + /* + * Kernel has been configured with software coherency + * but we might choose to turn it off and use hardware + * coherency instead. + */ + if (plat_enable_iocoherency()) { + if (coherentio == 0) + pr_info("Hardware DMA cache coherency disabled\n"); + else + pr_info("Hardware DMA cache coherency enabled\n"); + } else { + if (coherentio == 1) + pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n"); + else + pr_info("Software DMA cache coherency enabled\n"); + } +#else + if (!plat_enable_iocoherency()) + panic("Hardware DMA cache coherency not supported!"); +#endif +} + #ifdef CONFIG_BLK_DEV_IDE static void __init pci_clock_check(void) { @@ -115,16 +175,15 @@ static void __init pci_clock_check(void) 33, 20, 25, 30, 12, 16, 37, 10 }; int pciclock = pciclocks[jmpr]; - char *argptr = prom_getcmdline(); + char *argptr = fw_getcmdline(); if (pciclock != 33 && !strstr(argptr, "idebus=")) { - printk(KERN_WARNING "WARNING: PCI clock is %dMHz, " - "setting idebus\n", pciclock); + pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n", + pciclock); argptr += strlen(argptr); sprintf(argptr, " idebus=%d", pciclock); if (pciclock < 20 || pciclock > 66) - printk(KERN_WARNING "WARNING: IDE timing " - "calculations will be incorrect\n"); + pr_warn("WARNING: IDE timing calculations will be incorrect\n"); } } #endif @@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void) { char *argptr; - argptr = prom_getcmdline(); + argptr = fw_getcmdline(); if (strstr(argptr, "debug")) { BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE; - printk(KERN_INFO "Enabled Bonito debug mode\n"); + pr_info("Enabled Bonito debug mode\n"); } else BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE; #ifdef CONFIG_DMA_COHERENT if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; - printk(KERN_INFO "Enabled Bonito CPU coherency\n"); + pr_info("Enabled Bonito CPU coherency\n"); - argptr = prom_getcmdline(); + argptr = fw_getcmdline(); if (strstr(argptr, "iobcuncached")) { BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); - printk(KERN_INFO "Disabled Bonito IOBC coherency\n"); + pr_info("Disabled Bonito IOBC coherency\n"); } else { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; BONITO_PCIMEMBASECFG |= (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); - printk(KERN_INFO "Enabled Bonito IOBC coherency\n"); + pr_info("Enabled Bonito IOBC coherency\n"); } } else panic("Hardware DMA cache coherency not supported"); @@ -207,6 +266,8 @@ void __init plat_mem_setup(void) if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) bonito_quirks_setup(); + plat_setup_iocoherency(); + #ifdef CONFIG_BLK_DEV_IDE pci_clock_check(); #endif diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index a144b89cf9ba..0ad305f75802 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -39,12 +39,9 @@ #include <asm/gic.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> - #include <asm/mips-boards/maltaint.h> unsigned long cpu_khz; -int gic_frequency; static int mips_cpu_timer_irq; static int mips_cpu_perf_irq; @@ -74,7 +71,24 @@ static void __init estimate_frequencies(void) { unsigned long flags; unsigned int count, start; +#ifdef CONFIG_IRQ_GIC unsigned int giccount = 0, gicstart = 0; +#endif + +#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ) + unsigned int prid = read_c0_prid() & 0xffff00; + + /* + * XXXKYMA: hardwire the CPU frequency to Host Freq/4 + */ + count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3; + if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && + (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) + count *= 2; + + mips_hpt_frequency = count; + return; +#endif local_irq_save(flags); @@ -84,26 +98,32 @@ static void __init estimate_frequencies(void) /* Initialize counters. */ start = read_c0_count(); +#ifdef CONFIG_IRQ_GIC if (gic_present) GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); +#endif /* Read counter exactly on falling edge of update flag. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); +#ifdef CONFIG_IRQ_GIC if (gic_present) GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); +#endif local_irq_restore(flags); count -= start; - if (gic_present) - giccount -= gicstart; - mips_hpt_frequency = count; - if (gic_present) + +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + giccount -= gicstart; gic_frequency = giccount; + } +#endif } void read_persistent_clock(struct timespec *ts) @@ -159,24 +179,27 @@ void __init plat_time_init(void) (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) freq *= 2; freq = freqround(freq, 5000); - pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000, + printk("CPU frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); cpu_khz = freq / 1000; - if (gic_present) { - freq = freqround(gic_frequency, 5000); - pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000, - (freq%1000000)*100/1000000); - gic_clocksource_init(gic_frequency); - } else - init_r4k_clocksource(); + mips_scroll_message(); #ifdef CONFIG_I8253 /* Only Malta has a PIT. */ setup_pit_timer(); #endif - mips_scroll_message(); +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + freq = freqround(gic_frequency, 5000); + printk("GIC frequency %d.%02d MHz\n", freq/1000000, + (freq%1000000)*100/1000000); +#ifdef CONFIG_CSRC_GIC + gic_clocksource_init(gic_frequency); +#endif + } +#endif plat_perf_setup(); } diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile index 10ec701ce6c7..be114209217c 100644 --- a/arch/mips/mti-sead3/Makefile +++ b/arch/mips/mti-sead3/Makefile @@ -8,10 +8,10 @@ # Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved. # Steven J. Hill <sjhill@mips.com> # -obj-y := sead3-lcd.o sead3-cmdline.o \ - sead3-display.o sead3-init.o sead3-int.o \ - sead3-mtd.o sead3-net.o sead3-platform.o \ - sead3-reset.o sead3-setup.o sead3-time.o +obj-y := sead3-lcd.o sead3-display.o sead3-init.o \ + sead3-int.o sead3-mtd.o sead3-net.o \ + sead3-platform.o sead3-reset.o \ + sead3-setup.o sead3-time.o obj-y += sead3-i2c-dev.o sead3-i2c.o \ sead3-pic32-i2c-drv.o sead3-pic32-bus.o \ diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c index 322148c353ed..0a168c948b01 100644 --- a/arch/mips/mti-sead3/leds-sead3.c +++ b/arch/mips/mti-sead3/leds-sead3.c @@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev, static struct led_classdev sead3_pled = { .name = "sead3::pled", .brightness_set = sead3_pled_set, + .flags = LED_CORE_SUSPENDRESUME, }; static struct led_classdev sead3_fled = { .name = "sead3::fled", .brightness_set = sead3_fled_set, + .flags = LED_CORE_SUSPENDRESUME, }; -#ifdef CONFIG_PM -static int sead3_led_suspend(struct platform_device *dev, - pm_message_t state) -{ - led_classdev_suspend(&sead3_pled); - led_classdev_suspend(&sead3_fled); - return 0; -} - -static int sead3_led_resume(struct platform_device *dev) -{ - led_classdev_resume(&sead3_pled); - led_classdev_resume(&sead3_fled); - return 0; -} -#else -#define sead3_led_suspend NULL -#define sead3_led_resume NULL -#endif - static int sead3_led_probe(struct platform_device *pdev) { int ret; @@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev) static struct platform_driver sead3_led_driver = { .probe = sead3_led_probe, .remove = sead3_led_remove, - .suspend = sead3_led_suspend, - .resume = sead3_led_resume, .driver = { .name = DRVNAME, .owner = THIS_MODULE, diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c deleted file mode 100644 index a2e6cec67f57..000000000000 --- a/arch/mips/mti-sead3/sead3-cmdline.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include <linux/init.h> -#include <linux/string.h> - -#include <asm/bootinfo.h> - -extern int prom_argc; -extern int *_prom_argv; - -/* - * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. - * This macro take care of sign extension. - */ -#define prom_argv(index) ((char *)(long)_prom_argv[(index)]) - -char * __init prom_getcmdline(void) -{ - return &(arcs_cmdline[0]); -} - -void __init prom_init_cmdline(void) -{ - char *cp; - int actr; - - actr = 1; /* Always ignore argv[0] */ - - cp = &(arcs_cmdline[0]); - while (actr < prom_argc) { - strcpy(cp, prom_argv(actr)); - cp += strlen(prom_argv(actr)); - *cp++ = ' '; - actr++; - } - if (cp != &(arcs_cmdline[0])) { - /* get rid of trailing space */ - --cp; - *cp = '\0'; - } -} diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c index 2ddef19a9adc..031f47d69770 100644 --- a/arch/mips/mti-sead3/sead3-console.c +++ b/arch/mips/mti-sead3/sead3-console.c @@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr) __raw_writel(value, PORT(base_addr, offset)); } -void __init prom_init_early_console(char port) +void __init fw_init_early_console(char port) { console_port = port; } diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c index e389326cfa42..94875991907b 100644 --- a/arch/mips/mti-sead3/sead3-display.c +++ b/arch/mips/mti-sead3/sead3-display.c @@ -8,7 +8,6 @@ #include <linux/timer.h> #include <linux/io.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> static unsigned int display_count; static unsigned int max_display_count; diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c index f95abaa1aa5d..bfbd17b120a2 100644 --- a/arch/mips/mti-sead3/sead3-init.c +++ b/arch/mips/mti-sead3/sead3-init.c @@ -12,38 +12,51 @@ #include <asm/cacheflush.h> #include <asm/traps.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> - -extern void prom_init_early_console(char port); +#include <asm/fw/fw.h> extern char except_vec_nmi; extern char except_vec_ejtag_debug; -int prom_argc; -int *_prom_argv, *_prom_envp; - -#define prom_envp(index) ((char *)(long)_prom_envp[(index)]) - -char *prom_getenv(char *envname) +#ifdef CONFIG_SERIAL_8250_CONSOLE +static void __init console_config(void) { - /* - * Return a pointer to the given environment variable. - * In 64-bit mode: we're using 64-bit pointers, but all pointers - * in the PROM structures are only 32-bit, so we need some - * workarounds, if we are running in 64-bit mode. - */ - int i, index = 0; - - i = strlen(envname); - - while (prom_envp(index)) { - if (strncmp(envname, prom_envp(index), i) == 0) - return prom_envp(index+1); - index += 2; + char console_string[40]; + int baud = 0; + char parity = '\0', bits = '\0', flow = '\0'; + char *s; + + if ((strstr(fw_getcmdline(), "console=")) == NULL) { + s = fw_getenv("modetty0"); + if (s) { + while (*s >= '0' && *s <= '9') + baud = baud*10 + *s++ - '0'; + if (*s == ',') + s++; + if (*s) + parity = *s++; + if (*s == ',') + s++; + if (*s) + bits = *s++; + if (*s == ',') + s++; + if (*s == 'h') + flow = 'r'; + } + if (baud == 0) + baud = 38400; + if (parity != 'n' && parity != 'o' && parity != 'e') + parity = 'n'; + if (bits != '7' && bits != '8') + bits = '8'; + if (flow == '\0') + flow = 'r'; + sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, + parity, bits, flow); + strcat(fw_getcmdline(), console_string); } - - return NULL; } +#endif static void __init mips_nmi_setup(void) { @@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void) base = cpu_has_veic ? (void *)(CAC_BASE + 0xa80) : (void *)(CAC_BASE + 0x380); +#ifdef CONFIG_CPU_MICROMIPS + /* + * Decrement the exception vector address by one for microMIPS. + */ + memcpy(base, (&except_vec_nmi - 1), 0x80); + + /* + * This is a hack. We do not know if the boot loader was built with + * microMIPS instructions or not. If it was not, the NMI exception + * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded + * assembly below forces us into microMIPS mode if we are a pure + * microMIPS kernel. The assembly instructions are: + * + * 3C1A8000 lui k0,0x8000 + * 375A0381 ori k0,k0,0x381 + * 03400008 jr k0 + * 00000000 nop + * + * The mode switch occurs by jumping to the unaligned exception + * vector address at 0x80000381 which would have been 0x80000380 + * in MIPS32 mode. The jump to the unaligned address transitions + * us into microMIPS mode. + */ + if (!cpu_has_veic) { + void *base2 = (void *)(CAC_BASE + 0xa80); + *((unsigned int *)base2) = 0x3c1a8000; + *((unsigned int *)base2 + 1) = 0x375a0381; + *((unsigned int *)base2 + 2) = 0x03400008; + *((unsigned int *)base2 + 3) = 0x00000000; + flush_icache_range((unsigned long)base2, + (unsigned long)base2 + 0x10); + } +#else memcpy(base, &except_vec_nmi, 0x80); +#endif flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); } @@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void) base = cpu_has_veic ? (void *)(CAC_BASE + 0xa00) : (void *)(CAC_BASE + 0x300); +#ifdef CONFIG_CPU_MICROMIPS + /* Deja vu... */ + memcpy(base, (&except_vec_ejtag_debug - 1), 0x80); + if (!cpu_has_veic) { + void *base2 = (void *)(CAC_BASE + 0xa00); + *((unsigned int *)base2) = 0x3c1a8000; + *((unsigned int *)base2 + 1) = 0x375a0301; + *((unsigned int *)base2 + 2) = 0x03400008; + *((unsigned int *)base2 + 3) = 0x00000000; + flush_icache_range((unsigned long)base2, + (unsigned long)base2 + 0x10); + } +#else memcpy(base, &except_vec_ejtag_debug, 0x80); +#endif flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); } void __init prom_init(void) { - prom_argc = fw_arg0; - _prom_argv = (int *) fw_arg1; - _prom_envp = (int *) fw_arg2; - board_nmi_handler_setup = mips_nmi_setup; board_ejtag_handler_setup = mips_ejtag_setup; - prom_init_cmdline(); + fw_init_cmdline(); #ifdef CONFIG_EARLY_PRINTK - if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL) - prom_init_early_console(0); - else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL) - prom_init_early_console(1); + if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL) + fw_init_early_console(0); + else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL) + fw_init_early_console(1); #endif #ifdef CONFIG_SERIAL_8250_CONSOLE - if ((strstr(prom_getcmdline(), "console=")) == NULL) - strcat(prom_getcmdline(), " console=ttyS0,38400n8r"); + if ((strstr(fw_getcmdline(), "console=")) == NULL) + strcat(fw_getcmdline(), " console=ttyS0,38400n8r"); + console_config(); #endif } diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c index e26e08274fc5..6a560ac03def 100644 --- a/arch/mips/mti-sead3/sead3-int.c +++ b/arch/mips/mti-sead3/sead3-int.c @@ -20,7 +20,6 @@ #define SEAD_CONFIG_BASE 0x1b100110 #define SEAD_CONFIG_SIZE 4 -int gic_present; static unsigned long sead3_config_reg; /* diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c index f012fd164cee..b5059dc899f4 100644 --- a/arch/mips/mti-sead3/sead3-setup.c +++ b/arch/mips/mti-sead3/sead3-setup.c @@ -11,10 +11,6 @@ #include <linux/bootmem.h> #include <asm/mips-boards/generic.h> -#include <asm/prom.h> - -int coherentio; /* 0 => no DMA cache coherency (may be set by user) */ -int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */ const char *get_system_type(void) { diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index 239e4e32757f..96b42eb9b5e2 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -11,7 +11,6 @@ #include <asm/time.h> #include <asm/irq.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> unsigned long cpu_khz; diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig index 3c05bf9e280a..e0873a31ebaa 100644 --- a/arch/mips/netlogic/Kconfig +++ b/arch/mips/netlogic/Kconfig @@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD if NLM_XLP_BOARD config DT_XLP_EVP - bool "Built-in device tree for XLP EVP/SVP boards" + bool "Built-in device tree for XLP EVP boards" default y help - Add an FDT blob for XLP EVP and SVP boards into the kernel. + Add an FDT blob for XLP EVP boards into the kernel. This DTB will be used if the firmware does not pass in a DTB - pointer to the kernel. The corresponding DTS file is at - arch/mips/netlogic/dts/xlp_evp.dts + pointer to the kernel. The corresponding DTS file is at + arch/mips/netlogic/dts/xlp_evp.dts + +config DT_XLP_SVP + bool "Built-in device tree for XLP SVP boards" + default y + help + Add an FDT blob for XLP VP boards into the kernel. + This DTB will be used if the firmware does not pass in a DTB + pointer to the kernel. The corresponding DTS file is at + arch/mips/netlogic/dts/xlp_svp.dts config NLM_MULTINODE bool "Support for multi-chip boards" diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 2bb95dcfe20a..ffba52489bef 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -148,8 +148,7 @@ void nlm_cpus_done(void) int nlm_cpu_ready[NR_CPUS]; unsigned long nlm_next_gp; unsigned long nlm_next_sp; - -cpumask_t phys_cpu_present_map; +static cpumask_t phys_cpu_present_mask; void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) { @@ -169,11 +168,12 @@ void __init nlm_smp_setup(void) { unsigned int boot_cpu; int num_cpus, i, ncore; + char buf[64]; boot_cpu = hard_smp_processor_id(); - cpumask_clear(&phys_cpu_present_map); + cpumask_clear(&phys_cpu_present_mask); - cpumask_set_cpu(boot_cpu, &phys_cpu_present_map); + cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask); __cpu_number_map[boot_cpu] = 0; __cpu_logical_map[0] = boot_cpu; set_cpu_possible(0, true); @@ -185,7 +185,7 @@ void __init nlm_smp_setup(void) * it is only set for ASPs (see smpboot.S) */ if (nlm_cpu_ready[i]) { - cpumask_set_cpu(i, &phys_cpu_present_map); + cpumask_set_cpu(i, &phys_cpu_present_mask); __cpu_number_map[i] = num_cpus; __cpu_logical_map[num_cpus] = i; set_cpu_possible(num_cpus, true); @@ -193,16 +193,19 @@ void __init nlm_smp_setup(void) } } + cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask); + pr_info("Physical CPU mask: %s\n", buf); + cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask); + pr_info("Possible CPU mask: %s\n", buf); + /* check with the cores we have worken up */ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) ncore += hweight32(nlm_get_node(i)->coremask); - pr_info("Phys CPU present map: %lx, possible map %lx\n", - (unsigned long)cpumask_bits(&phys_cpu_present_map)[0], - (unsigned long)cpumask_bits(cpu_possible_mask)[0]); - pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore, nlm_threads_per_core, num_cpus); + + /* switch NMI handler to boot CPUs */ nlm_set_nmi_handler(nlm_boot_secondary_cpus); } diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile index d117d46413aa..aecb6fa9a9c3 100644 --- a/arch/mips/netlogic/dts/Makefile +++ b/arch/mips/netlogic/dts/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o +obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts index 7628b5464fc7..e14f42308064 100644 --- a/arch/mips/netlogic/dts/xlp_evp.dts +++ b/arch/mips/netlogic/dts/xlp_evp.dts @@ -20,7 +20,7 @@ #address-cells = <2>; #size-cells = <1>; compatible = "simple-bus"; - ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG + ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG 1 0 0 0x16000000 0x01000000>; // GBU chipselects serial0: serial@30000 { diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts new file mode 100644 index 000000000000..8af4bdbe5d99 --- /dev/null +++ b/arch/mips/netlogic/dts/xlp_svp.dts @@ -0,0 +1,124 @@ +/* + * XLP3XX Device Tree Source for SVP boards + */ + +/dts-v1/; +/ { + model = "netlogic,XLP-SVP"; + compatible = "netlogic,xlp"; + #address-cells = <2>; + #size-cells = <2>; + + memory { + device_type = "memory"; + reg = <0 0x00100000 0 0x0FF00000 // 255M at 1M + 0 0x20000000 0 0xa0000000 // 2560M at 512M + 0 0xe0000000 0 0x40000000>; + }; + + soc { + #address-cells = <2>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG + 1 0 0 0x16000000 0x01000000>; // GBU chipselects + + serial0: serial@30000 { + device_type = "serial"; + compatible = "ns16550"; + reg = <0 0x30100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <133333333>; + interrupt-parent = <&pic>; + interrupts = <17>; + }; + serial1: serial@31000 { + device_type = "serial"; + compatible = "ns16550"; + reg = <0 0x31100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <133333333>; + interrupt-parent = <&pic>; + interrupts = <18>; + }; + i2c0: ocores@32000 { + compatible = "opencores,i2c-ocores"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x32100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <32000000>; + interrupt-parent = <&pic>; + interrupts = <30>; + }; + i2c1: ocores@33000 { + compatible = "opencores,i2c-ocores"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x33100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <32000000>; + interrupt-parent = <&pic>; + interrupts = <31>; + + rtc@68 { + compatible = "dallas,ds1374"; + reg = <0x68>; + }; + + dtt@4c { + compatible = "national,lm90"; + reg = <0x4c>; + }; + }; + pic: pic@4000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + reg = <0 0x4000 0x200>; + }; + + nor_flash@1,0 { + compatible = "cfi-flash"; + #address-cells = <1>; + #size-cells = <1>; + bank-width = <2>; + reg = <1 0 0x1000000>; + + partition@0 { + label = "x-loader"; + reg = <0x0 0x100000>; /* 1M */ + read-only; + }; + + partition@100000 { + label = "u-boot"; + reg = <0x100000 0x100000>; /* 1M */ + }; + + partition@200000 { + label = "kernel"; + reg = <0x200000 0x500000>; /* 5M */ + }; + + partition@700000 { + label = "rootfs"; + reg = <0x700000 0x800000>; /* 8M */ + }; + + partition@f00000 { + label = "env"; + reg = <0xf00000 0x100000>; /* 1M */ + read-only; + }; + }; + }; + + chosen { + bootargs = "console=ttyS0,115200 rdinit=/sbin/init"; + }; +}; diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c index c68fd4026104..87560e4db35f 100644 --- a/arch/mips/netlogic/xlp/nlm_hal.c +++ b/arch/mips/netlogic/xlp/nlm_hal.c @@ -61,43 +61,61 @@ void nlm_node_init(int node) int nlm_irq_to_irt(int irq) { - if (!PIC_IRQ_IS_IRT(irq)) - return -1; + uint64_t pcibase; + int devoff, irt; switch (irq) { case PIC_UART_0_IRQ: - return PIC_IRT_UART_0_INDEX; + devoff = XLP_IO_UART0_OFFSET(0); + break; case PIC_UART_1_IRQ: - return PIC_IRT_UART_1_INDEX; - case PIC_PCIE_LINK_0_IRQ: - return PIC_IRT_PCIE_LINK_0_INDEX; - case PIC_PCIE_LINK_1_IRQ: - return PIC_IRT_PCIE_LINK_1_INDEX; - case PIC_PCIE_LINK_2_IRQ: - return PIC_IRT_PCIE_LINK_2_INDEX; - case PIC_PCIE_LINK_3_IRQ: - return PIC_IRT_PCIE_LINK_3_INDEX; + devoff = XLP_IO_UART1_OFFSET(0); + break; case PIC_EHCI_0_IRQ: - return PIC_IRT_EHCI_0_INDEX; + devoff = XLP_IO_USB_EHCI0_OFFSET(0); + break; case PIC_EHCI_1_IRQ: - return PIC_IRT_EHCI_1_INDEX; + devoff = XLP_IO_USB_EHCI1_OFFSET(0); + break; case PIC_OHCI_0_IRQ: - return PIC_IRT_OHCI_0_INDEX; + devoff = XLP_IO_USB_OHCI0_OFFSET(0); + break; case PIC_OHCI_1_IRQ: - return PIC_IRT_OHCI_1_INDEX; + devoff = XLP_IO_USB_OHCI1_OFFSET(0); + break; case PIC_OHCI_2_IRQ: - return PIC_IRT_OHCI_2_INDEX; + devoff = XLP_IO_USB_OHCI2_OFFSET(0); + break; case PIC_OHCI_3_IRQ: - return PIC_IRT_OHCI_3_INDEX; + devoff = XLP_IO_USB_OHCI3_OFFSET(0); + break; case PIC_MMC_IRQ: - return PIC_IRT_MMC_INDEX; + devoff = XLP_IO_SD_OFFSET(0); + break; case PIC_I2C_0_IRQ: - return PIC_IRT_I2C_0_INDEX; + devoff = XLP_IO_I2C0_OFFSET(0); + break; case PIC_I2C_1_IRQ: - return PIC_IRT_I2C_1_INDEX; + devoff = XLP_IO_I2C1_OFFSET(0); + break; default: - return -1; + devoff = 0; + break; } + + if (devoff != 0) { + pcibase = nlm_pcicfg_base(devoff); + irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff; + /* HW bug, I2C 1 irt entry is off by one */ + if (irq == PIC_I2C_1_IRQ) + irt = irt + 1; + } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) { + /* HW bug, PCI IRT entries are bad on early silicon, fix */ + irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ); + } else { + irt = -1; + } + return irt; } unsigned int nlm_get_core_frequency(int node, int core) diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 4894d62043ac..af319143b591 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -56,7 +56,7 @@ uint64_t nlm_io_base; struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; cpumask_t nlm_cpumask = CPU_MASK_CPU0; unsigned int nlm_threads_per_core; -extern u32 __dtb_start[]; +extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[]; static void nlm_linux_exit(void) { @@ -82,8 +82,24 @@ void __init plat_mem_setup(void) * 64-bit, so convert pointer. */ fdtp = (void *)(long)fw_arg0; - if (!fdtp) - fdtp = __dtb_start; + if (!fdtp) { + switch (current_cpu_data.processor_id & 0xff00) { +#ifdef CONFIG_DT_XLP_SVP + case PRID_IMP_NETLOGIC_XLP3XX: + fdtp = __dtb_xlp_svp_begin; + break; +#endif +#ifdef CONFIG_DT_XLP_EVP + case PRID_IMP_NETLOGIC_XLP8XX: + fdtp = __dtb_xlp_evp_begin; + break; +#endif + default: + /* Pick a built-in if any, and hope for the best */ + fdtp = __dtb_start; + break; + } + } fdtp = phys_to_virt(__pa(fdtp)); early_init_devtree(fdtp); } diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c index 1d0b66c62fd1..9c401dd78337 100644 --- a/arch/mips/netlogic/xlp/usb-init.c +++ b/arch/mips/netlogic/xlp/usb-init.c @@ -42,7 +42,30 @@ #include <asm/netlogic/haldefs.h> #include <asm/netlogic/xlp-hal/iomap.h> #include <asm/netlogic/xlp-hal/xlp.h> -#include <asm/netlogic/xlp-hal/usb.h> + +/* + * USB glue logic registers, used only during initialization + */ +#define USB_CTL_0 0x01 +#define USB_PHY_0 0x0A +#define USB_PHY_RESET 0x01 +#define USB_PHY_PORT_RESET_0 0x10 +#define USB_PHY_PORT_RESET_1 0x20 +#define USB_CONTROLLER_RESET 0x01 +#define USB_INT_STATUS 0x0E +#define USB_INT_EN 0x0F +#define USB_PHY_INTERRUPT_EN 0x01 +#define USB_OHCI_INTERRUPT_EN 0x02 +#define USB_OHCI_INTERRUPT1_EN 0x04 +#define USB_OHCI_INTERRUPT2_EN 0x08 +#define USB_CTRL_INTERRUPT_EN 0x10 + +#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r) +#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v) +#define nlm_get_usb_pcibase(node, inst) \ + nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst)) +#define nlm_get_usb_regbase(node, inst) \ + (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ) static void nlm_usb_intr_en(int node, int port) { @@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev) dev->dev.coherent_dma_mask = DMA_BIT_MASK(64); switch (dev->devfn) { case 0x10: - dev->irq = PIC_EHCI_0_IRQ; - break; + dev->irq = PIC_EHCI_0_IRQ; + break; case 0x11: - dev->irq = PIC_OHCI_0_IRQ; - break; + dev->irq = PIC_OHCI_0_IRQ; + break; case 0x12: - dev->irq = PIC_OHCI_1_IRQ; - break; + dev->irq = PIC_OHCI_1_IRQ; + break; case 0x13: - dev->irq = PIC_EHCI_1_IRQ; - break; + dev->irq = PIC_EHCI_1_IRQ; + break; case 0x14: - dev->irq = PIC_OHCI_2_IRQ; - break; + dev->irq = PIC_OHCI_2_IRQ; + break; case 0x15: - dev->irq = PIC_OHCI_3_IRQ; - break; + dev->irq = PIC_OHCI_3_IRQ; + break; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI, diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index 1fd361462c03..e4b1140cdae0 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -41,7 +41,7 @@ static int (*save_perf_irq)(void); * first hardware thread in the core for setup and init. * Skip CPUs with non-zero hardware thread id (4 hwt per core) */ -#ifdef CONFIG_CPU_XLR +#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP) #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0) #else #define oprofile_skip_cpu(c) 0 diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c index 412ec025cf55..18517dd0f709 100644 --- a/arch/mips/pci/pci-ar71xx.c +++ b/arch/mips/pci/pci-ar71xx.c @@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev) if (!res) return -EINVAL; - apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res); - if (!apc->cfg_base) - return -ENOMEM; + apc->cfg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->cfg_base)) + return PTR_ERR(apc->cfg_base); apc->irq = platform_get_irq(pdev, 0); if (apc->irq < 0) diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c index 8a0700d448fe..65ec032fa0b4 100644 --- a/arch/mips/pci/pci-ar724x.c +++ b/arch/mips/pci/pci-ar724x.c @@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev) if (!res) return -EINVAL; - apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res); - if (apc->ctrl_base == NULL) - return -EBUSY; + apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->ctrl_base)) + return PTR_ERR(apc->ctrl_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); if (!res) return -EINVAL; - apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res); - if (!apc->devcfg_base) - return -EBUSY; + apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->devcfg_base)) + return PTR_ERR(apc->devcfg_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base"); if (!res) return -EINVAL; - apc->crp_base = devm_request_and_ioremap(&pdev->dev, res); - if (apc->crp_base == NULL) - return -EBUSY; + apc->crp_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->crp_base)) + return PTR_ERR(apc->crp_base); apc->irq = platform_get_irq(pdev, 0); if (apc->irq < 0) diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c index 88e781c6b5ba..2eb954239bc5 100644 --- a/arch/mips/pci/pci-bcm63xx.c +++ b/arch/mips/pci/pci-bcm63xx.c @@ -121,11 +121,17 @@ void __iomem *pci_iospace_start; static void __init bcm63xx_reset_pcie(void) { u32 val; + u32 reg; /* enable SERDES */ - val = bcm_misc_readl(MISC_SERDES_CTRL_REG); + if (BCMCPU_IS_6328()) + reg = MISC_SERDES_CTRL_6328_REG; + else + reg = MISC_SERDES_CTRL_6362_REG; + + val = bcm_misc_readl(reg); val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN; - bcm_misc_writel(val, MISC_SERDES_CTRL_REG); + bcm_misc_writel(val, reg); /* reset the PCIe core */ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1); @@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void) switch (bcm63xx_get_cpu_id()) { case BCM6328_CPU_ID: + case BCM6362_CPU_ID: return bcm63xx_register_pcie(); case BCM6348_CPU_ID: case BCM6358_CPU_ID: diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c index 5bd9d8f468cc..a01baff52cae 100644 --- a/arch/mips/powertv/init.c +++ b/arch/mips/powertv/init.c @@ -29,10 +29,11 @@ #include <asm/cacheflush.h> #include <asm/traps.h> -#include <asm/mips-boards/prom.h> #include <asm/mips-boards/generic.h> #include <asm/mach-powertv/asic.h> +#include "init.h" + static int *_prom_envp; unsigned long _prom_memsize; diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h index b194c34ca966..c1a8bd0dbe4b 100644 --- a/arch/mips/powertv/init.h +++ b/arch/mips/powertv/init.h @@ -23,4 +23,6 @@ #ifndef _POWERTV_INIT_H #define _POWERTV_INIT_H extern unsigned long _prom_memsize; +extern void prom_meminit(void); +extern char *prom_getenv(char *name); #endif diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c index 6e5f1bdc59b5..bc2f3ca22b41 100644 --- a/arch/mips/powertv/memory.c +++ b/arch/mips/powertv/memory.c @@ -29,7 +29,6 @@ #include <asm/page.h> #include <asm/sections.h> -#include <asm/mips-boards/prom.h> #include <asm/mach-powertv/asic.h> #include <asm/mach-powertv/ioremap.h> diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c index 820b8480f222..24689bff1039 100644 --- a/arch/mips/powertv/powertv_setup.c +++ b/arch/mips/powertv/powertv_setup.c @@ -31,7 +31,6 @@ #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/mips-boards/generic.h> -#include <asm/mips-boards/prom.h> #include <asm/dma.h> #include <asm/asm.h> #include <asm/traps.h> diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig index a0b0197cab0a..026e823d871d 100644 --- a/arch/mips/ralink/Kconfig +++ b/arch/mips/ralink/Kconfig @@ -6,12 +6,23 @@ choice help Select Ralink MIPS SoC type. + config SOC_RT288X + bool "RT288x" + config SOC_RT305X bool "RT305x" select USB_ARCH_HAS_HCD select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI + config SOC_RT3883 + bool "RT3883" + select USB_ARCH_HAS_OHCI + select USB_ARCH_HAS_EHCI + + config SOC_MT7620 + bool "MT7620" + endchoice choice @@ -23,10 +34,22 @@ choice config DTB_RT_NONE bool "None" + config DTB_RT2880_EVAL + bool "RT2880 eval kit" + depends on SOC_RT288X + config DTB_RT305X_EVAL bool "RT305x eval kit" depends on SOC_RT305X + config DTB_RT3883_EVAL + bool "RT3883 eval kit" + depends on SOC_RT3883 + + config DTB_MT7620A_EVAL + bool "MT7620A eval kit" + depends on SOC_MT7620 + endchoice endif diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile index 939757f0e71f..38cf1a880aaa 100644 --- a/arch/mips/ralink/Makefile +++ b/arch/mips/ralink/Makefile @@ -8,7 +8,10 @@ obj-y := prom.o of.o reset.o clk.o irq.o +obj-$(CONFIG_SOC_RT288X) += rt288x.o obj-$(CONFIG_SOC_RT305X) += rt305x.o +obj-$(CONFIG_SOC_RT3883) += rt3883.o +obj-$(CONFIG_SOC_MT7620) += mt7620.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform index 6babd65765e6..cda4b6645c50 100644 --- a/arch/mips/ralink/Platform +++ b/arch/mips/ralink/Platform @@ -5,6 +5,24 @@ core-$(CONFIG_RALINK) += arch/mips/ralink/ cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink # +# Ralink RT288x +# +load-$(CONFIG_SOC_RT288X) += 0xffffffff88000000 +cflags-$(CONFIG_SOC_RT288X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x + +# # Ralink RT305x # load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000 +cflags-$(CONFIG_SOC_RT305X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x + +# +# Ralink RT3883 +# +load-$(CONFIG_SOC_RT3883) += 0xffffffff80000000 +cflags-$(CONFIG_SOC_RT3883) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883 + +# +# Ralink MT7620 +# +load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000 diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h index 300990313e1b..83144c3fc5ac 100644 --- a/arch/mips/ralink/common.h +++ b/arch/mips/ralink/common.h @@ -22,13 +22,22 @@ struct ralink_pinmux { struct ralink_pinmux_grp *mode; struct ralink_pinmux_grp *uart; int uart_shift; + u32 uart_mask; void (*wdt_reset)(void); + struct ralink_pinmux_grp *pci; + int pci_shift; + u32 pci_mask; }; -extern struct ralink_pinmux gpio_pinmux; +extern struct ralink_pinmux rt_gpio_pinmux; struct ralink_soc_info { unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; unsigned char *compatible; + + unsigned long mem_base; + unsigned long mem_size; + unsigned long mem_size_min; + unsigned long mem_size_max; }; extern struct ralink_soc_info soc_info; diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile index 1a69fb300955..18194fa93e80 100644 --- a/arch/mips/ralink/dts/Makefile +++ b/arch/mips/ralink/dts/Makefile @@ -1 +1,4 @@ +obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o +obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o +obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi new file mode 100644 index 000000000000..08bf24fefe9f --- /dev/null +++ b/arch/mips/ralink/dts/mt7620a.dtsi @@ -0,0 +1,58 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,mtk7620a-soc"; + + cpus { + cpu@0 { + compatible = "mips,mips24KEc"; + }; + }; + + cpuintc: cpuintc@0 { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@10000000 { + compatible = "palmbus"; + reg = <0x10000000 0x200000>; + ranges = <0x0 0x10000000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc@0 { + compatible = "ralink,mt7620a-sysc"; + reg = <0x0 0x100>; + }; + + intc: intc@200 { + compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + }; + + memc@300 { + compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc"; + reg = <0x300 0x100>; + }; + + uartlite@c00 { + compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a"; + reg = <0xc00 0x100>; + + interrupt-parent = <&intc>; + interrupts = <12>; + + reg-shift = <2>; + }; + }; +}; diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts new file mode 100644 index 000000000000..35eb874ab7f1 --- /dev/null +++ b/arch/mips/ralink/dts/mt7620a_eval.dts @@ -0,0 +1,16 @@ +/dts-v1/; + +/include/ "mt7620a.dtsi" + +/ { + compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc"; + model = "Ralink MT7620A evaluation board"; + + memory@0 { + reg = <0x0 0x2000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; +}; diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi new file mode 100644 index 000000000000..182afde2f2e1 --- /dev/null +++ b/arch/mips/ralink/dts/rt2880.dtsi @@ -0,0 +1,58 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,rt2880-soc"; + + cpus { + cpu@0 { + compatible = "mips,mips4KEc"; + }; + }; + + cpuintc: cpuintc@0 { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@300000 { + compatible = "palmbus"; + reg = <0x300000 0x200000>; + ranges = <0x0 0x300000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc@0 { + compatible = "ralink,rt2880-sysc"; + reg = <0x0 0x100>; + }; + + intc: intc@200 { + compatible = "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + }; + + memc@300 { + compatible = "ralink,rt2880-memc"; + reg = <0x300 0x100>; + }; + + uartlite@c00 { + compatible = "ralink,rt2880-uart", "ns16550a"; + reg = <0xc00 0x100>; + + interrupt-parent = <&intc>; + interrupts = <8>; + + reg-shift = <2>; + }; + }; +}; diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts new file mode 100644 index 000000000000..322d7002595b --- /dev/null +++ b/arch/mips/ralink/dts/rt2880_eval.dts @@ -0,0 +1,46 @@ +/dts-v1/; + +/include/ "rt2880.dtsi" + +/ { + compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc"; + model = "Ralink RT2880 evaluation board"; + + memory@0 { + reg = <0x8000000 0x2000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; + + cfi@1f000000 { + compatible = "cfi-flash"; + reg = <0x1f000000 0x400000>; + + bank-width = <2>; + device-width = <2>; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "uboot"; + reg = <0x0 0x30000>; + read-only; + }; + partition@30000 { + label = "uboot-env"; + reg = <0x30000 0x10000>; + read-only; + }; + partition@40000 { + label = "calibration"; + reg = <0x40000 0x10000>; + read-only; + }; + partition@50000 { + label = "linux"; + reg = <0x50000 0x3b0000>; + }; + }; +}; diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi index 069d0660e1dd..ef7da1e227e6 100644 --- a/arch/mips/ralink/dts/rt3050.dtsi +++ b/arch/mips/ralink/dts/rt3050.dtsi @@ -1,7 +1,7 @@ / { #address-cells = <1>; #size-cells = <1>; - compatible = "ralink,rt3050-soc", "ralink,rt3052-soc"; + compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc"; cpus { cpu@0 { @@ -9,10 +9,6 @@ }; }; - chosen { - bootargs = "console=ttyS0,57600 init=/init"; - }; - cpuintc: cpuintc@0 { #address-cells = <0>; #interrupt-cells = <1>; @@ -23,7 +19,7 @@ palmbus@10000000 { compatible = "palmbus"; reg = <0x10000000 0x200000>; - ranges = <0x0 0x10000000 0x1FFFFF>; + ranges = <0x0 0x10000000 0x1FFFFF>; #address-cells = <1>; #size-cells = <1>; @@ -33,11 +29,6 @@ reg = <0x0 0x100>; }; - timer@100 { - compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt"; - reg = <0x100 0x100>; - }; - intc: intc@200 { compatible = "ralink,rt3052-intc", "ralink,rt2880-intc"; reg = <0x200 0x100>; @@ -54,45 +45,6 @@ reg = <0x300 0x100>; }; - gpio0: gpio@600 { - compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; - reg = <0x600 0x34>; - - gpio-controller; - #gpio-cells = <2>; - - ralink,ngpio = <24>; - ralink,regs = [ 00 04 08 0c - 20 24 28 2c - 30 34 ]; - }; - - gpio1: gpio@638 { - compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; - reg = <0x638 0x24>; - - gpio-controller; - #gpio-cells = <2>; - - ralink,ngpio = <16>; - ralink,regs = [ 00 04 08 0c - 10 14 18 1c - 20 24 ]; - }; - - gpio2: gpio@660 { - compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; - reg = <0x660 0x24>; - - gpio-controller; - #gpio-cells = <2>; - - ralink,ngpio = <12>; - ralink,regs = [ 00 04 08 0c - 10 14 18 1c - 20 24 ]; - }; - uartlite@c00 { compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a"; reg = <0xc00 0x100>; diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts index 148a590bc419..c18c9a84f4c4 100644 --- a/arch/mips/ralink/dts/rt3052_eval.dts +++ b/arch/mips/ralink/dts/rt3052_eval.dts @@ -1,10 +1,8 @@ /dts-v1/; -/include/ "rt3050.dtsi" +#include "rt3050.dtsi" / { - #address-cells = <1>; - #size-cells = <1>; compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc"; model = "Ralink RT3052 evaluation board"; @@ -12,12 +10,8 @@ reg = <0x0 0x2000000>; }; - palmbus@10000000 { - sysc@0 { - ralink,pinmmux = "uartlite", "spi"; - ralink,uartmux = "gpio"; - ralink,wdtmux = <0>; - }; + chosen { + bootargs = "console=ttyS0,57600"; }; cfi@1f000000 { diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi new file mode 100644 index 000000000000..3b131dd0d5ac --- /dev/null +++ b/arch/mips/ralink/dts/rt3883.dtsi @@ -0,0 +1,58 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,rt3883-soc"; + + cpus { + cpu@0 { + compatible = "mips,mips74Kc"; + }; + }; + + cpuintc: cpuintc@0 { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@10000000 { + compatible = "palmbus"; + reg = <0x10000000 0x200000>; + ranges = <0x0 0x10000000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc@0 { + compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc"; + reg = <0x0 0x100>; + }; + + intc: intc@200 { + compatible = "ralink,rt3883-intc", "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + }; + + memc@300 { + compatible = "ralink,rt3883-memc", "ralink,rt3050-memc"; + reg = <0x300 0x100>; + }; + + uartlite@c00 { + compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a"; + reg = <0xc00 0x100>; + + interrupt-parent = <&intc>; + interrupts = <12>; + + reg-shift = <2>; + }; + }; +}; diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts new file mode 100644 index 000000000000..2fa6b330bf4f --- /dev/null +++ b/arch/mips/ralink/dts/rt3883_eval.dts @@ -0,0 +1,16 @@ +/dts-v1/; + +/include/ "rt3883.dtsi" + +/ { + compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc"; + model = "Ralink RT3883 evaluation board"; + + memory@0 { + reg = <0x0 0x2000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; +}; diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c index c4ae47eb24ab..b46d0419d09b 100644 --- a/arch/mips/ralink/early_printk.c +++ b/arch/mips/ralink/early_printk.c @@ -11,7 +11,11 @@ #include <asm/addrspace.h> +#ifdef CONFIG_SOC_RT288X +#define EARLY_UART_BASE 0x300c00 +#else #define EARLY_UART_BASE 0x10000c00 +#endif #define UART_REG_RX 0x00 #define UART_REG_TX 0x04 diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 6d054c5ec9ab..320b1f1043ff 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -31,6 +31,7 @@ #define INTC_INT_GLOBAL BIT(31) #define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) +#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4) #define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5) #define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6) #define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7) @@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void) else if (pending & STATUSF_IP6) do_IRQ(RALINK_CPU_IRQ_WIFI); + else if (pending & STATUSF_IP4) + do_IRQ(RALINK_CPU_IRQ_PCI); + else if (pending & STATUSF_IP2) do_IRQ(RALINK_CPU_IRQ_INTC); @@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node, irq_set_chained_handler(irq, ralink_intc_irq_handler); irq_set_handler_data(irq, domain); + /* tell the kernel which irq is used for performance monitoring */ cp0_perfcount_irq = irq_create_mapping(domain, 9); return 0; diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c new file mode 100644 index 000000000000..0018b1a661f6 --- /dev/null +++ b/arch/mips/ralink/mt7620.c @@ -0,0 +1,234 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> + +#include <asm/mipsregs.h> +#include <asm/mach-ralink/ralink_regs.h> +#include <asm/mach-ralink/mt7620.h> + +#include "common.h" + +/* does the board have sdram or ddram */ +static int dram_type; + +/* the pll dividers */ +static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 }; + +static struct ralink_pinmux_grp mode_mux[] = { + { + .name = "i2c", + .mask = MT7620_GPIO_MODE_I2C, + .gpio_first = 1, + .gpio_last = 2, + }, { + .name = "spi", + .mask = MT7620_GPIO_MODE_SPI, + .gpio_first = 3, + .gpio_last = 6, + }, { + .name = "uartlite", + .mask = MT7620_GPIO_MODE_UART1, + .gpio_first = 15, + .gpio_last = 16, + }, { + .name = "wdt", + .mask = MT7620_GPIO_MODE_WDT, + .gpio_first = 17, + .gpio_last = 17, + }, { + .name = "mdio", + .mask = MT7620_GPIO_MODE_MDIO, + .gpio_first = 22, + .gpio_last = 23, + }, { + .name = "rgmii1", + .mask = MT7620_GPIO_MODE_RGMII1, + .gpio_first = 24, + .gpio_last = 35, + }, { + .name = "spi refclk", + .mask = MT7620_GPIO_MODE_SPI_REF_CLK, + .gpio_first = 37, + .gpio_last = 39, + }, { + .name = "jtag", + .mask = MT7620_GPIO_MODE_JTAG, + .gpio_first = 40, + .gpio_last = 44, + }, { + /* shared lines with jtag */ + .name = "ephy", + .mask = MT7620_GPIO_MODE_EPHY, + .gpio_first = 40, + .gpio_last = 44, + }, { + .name = "nand", + .mask = MT7620_GPIO_MODE_JTAG, + .gpio_first = 45, + .gpio_last = 59, + }, { + .name = "rgmii2", + .mask = MT7620_GPIO_MODE_RGMII2, + .gpio_first = 60, + .gpio_last = 71, + }, { + .name = "wled", + .mask = MT7620_GPIO_MODE_WLED, + .gpio_first = 72, + .gpio_last = 72, + }, {0} +}; + +static struct ralink_pinmux_grp uart_mux[] = { + { + .name = "uartf", + .mask = MT7620_GPIO_MODE_UARTF, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "pcm uartf", + .mask = MT7620_GPIO_MODE_PCM_UARTF, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "pcm i2s", + .mask = MT7620_GPIO_MODE_PCM_I2S, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "i2s uartf", + .mask = MT7620_GPIO_MODE_I2S_UARTF, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "pcm gpio", + .mask = MT7620_GPIO_MODE_PCM_GPIO, + .gpio_first = 11, + .gpio_last = 14, + }, { + .name = "gpio uartf", + .mask = MT7620_GPIO_MODE_GPIO_UARTF, + .gpio_first = 7, + .gpio_last = 10, + }, { + .name = "gpio i2s", + .mask = MT7620_GPIO_MODE_GPIO_I2S, + .gpio_first = 7, + .gpio_last = 10, + }, { + .name = "gpio", + .mask = MT7620_GPIO_MODE_GPIO, + }, {0} +}; + +struct ralink_pinmux rt_gpio_pinmux = { + .mode = mode_mux, + .uart = uart_mux, + .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT, + .uart_mask = MT7620_GPIO_MODE_UART0_MASK, +}; + +void __init ralink_clk_init(void) +{ + unsigned long cpu_rate, sys_rate; + u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0); + u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1); + u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK; + u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK; + + if (cpu_clk) { + cpu_rate = 480000000; + } else if (!swconfig) { + cpu_rate = 600000000; + } else { + u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO; + u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO; + + cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000; + } + + if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM) + sys_rate = cpu_rate / 4; + else + sys_rate = cpu_rate / 3; + + ralink_clk_add("cpu", cpu_rate); + ralink_clk_add("10000100.timer", 40000000); + ralink_clk_add("10000500.uart", 40000000); + ralink_clk_add("10000c00.uartlite", 40000000); +} + +void __init ralink_of_remap(void) +{ + rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc"); + rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc"); + + if (!rt_sysc_membase || !rt_memc_membase) + panic("Failed to remap core resources"); +} + +void prom_soc_init(struct ralink_soc_info *soc_info) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); + unsigned char *name = NULL; + u32 n0; + u32 n1; + u32 rev; + u32 cfg0; + + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); + + if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) { + name = "MT7620N"; + soc_info->compatible = "ralink,mt7620n-soc"; + } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) { + name = "MT7620A"; + soc_info->compatible = "ralink,mt7620a-soc"; + } else { + panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1); + } + + rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); + + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s ver:%u eco:%u", + name, + (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, + (rev & CHIP_REV_ECO_MASK)); + + cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0); + dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK; + + switch (dram_type) { + case SYSCFG0_DRAM_TYPE_SDRAM: + soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN; + soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR1: + soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR2: + soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX; + break; + default: + BUG(); + } + soc_info->mem_base = MT7620_DRAM_BASE; +} diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 4165e70775be..fb1569580def 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -11,6 +11,7 @@ #include <linux/io.h> #include <linux/clk.h> #include <linux/init.h> +#include <linux/sizes.h> #include <linux/of_fdt.h> #include <linux/kernel.h> #include <linux/bootmem.h> @@ -85,6 +86,14 @@ void __init plat_mem_setup(void) * parsed resulting in our memory appearing */ __dt_setup_arch(&__dtb_start); + + if (soc_info.mem_size) + add_memory_region(soc_info.mem_base, soc_info.mem_size, + BOOT_MEM_RAM); + else + detect_memory_region(soc_info.mem_base, + soc_info.mem_size_min * SZ_1M, + soc_info.mem_size_max * SZ_1M); } static int __init plat_of_setup(void) diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c new file mode 100644 index 000000000000..f87de1ab2198 --- /dev/null +++ b/arch/mips/ralink/rt288x.c @@ -0,0 +1,143 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> + +#include <asm/mipsregs.h> +#include <asm/mach-ralink/ralink_regs.h> +#include <asm/mach-ralink/rt288x.h> + +#include "common.h" + +static struct ralink_pinmux_grp mode_mux[] = { + { + .name = "i2c", + .mask = RT2880_GPIO_MODE_I2C, + .gpio_first = 1, + .gpio_last = 2, + }, { + .name = "spi", + .mask = RT2880_GPIO_MODE_SPI, + .gpio_first = 3, + .gpio_last = 6, + }, { + .name = "uartlite", + .mask = RT2880_GPIO_MODE_UART0, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "jtag", + .mask = RT2880_GPIO_MODE_JTAG, + .gpio_first = 17, + .gpio_last = 21, + }, { + .name = "mdio", + .mask = RT2880_GPIO_MODE_MDIO, + .gpio_first = 22, + .gpio_last = 23, + }, { + .name = "sdram", + .mask = RT2880_GPIO_MODE_SDRAM, + .gpio_first = 24, + .gpio_last = 39, + }, { + .name = "pci", + .mask = RT2880_GPIO_MODE_PCI, + .gpio_first = 40, + .gpio_last = 71, + }, {0} +}; + +static void rt288x_wdt_reset(void) +{ + u32 t; + + /* enable WDT reset output on pin SRAM_CS_N */ + t = rt_sysc_r32(SYSC_REG_CLKCFG); + t |= CLKCFG_SRAM_CS_N_WDT; + rt_sysc_w32(t, SYSC_REG_CLKCFG); +} + +struct ralink_pinmux rt_gpio_pinmux = { + .mode = mode_mux, + .wdt_reset = rt288x_wdt_reset, +}; + +void __init ralink_clk_init(void) +{ + unsigned long cpu_rate; + u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); + t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK); + + switch (t) { + case SYSTEM_CONFIG_CPUCLK_250: + cpu_rate = 250000000; + break; + case SYSTEM_CONFIG_CPUCLK_266: + cpu_rate = 266666667; + break; + case SYSTEM_CONFIG_CPUCLK_280: + cpu_rate = 280000000; + break; + case SYSTEM_CONFIG_CPUCLK_300: + cpu_rate = 300000000; + break; + } + + ralink_clk_add("cpu", cpu_rate); + ralink_clk_add("300100.timer", cpu_rate / 2); + ralink_clk_add("300120.watchdog", cpu_rate / 2); + ralink_clk_add("300500.uart", cpu_rate / 2); + ralink_clk_add("300c00.uartlite", cpu_rate / 2); + ralink_clk_add("400000.ethernet", cpu_rate / 2); +} + +void __init ralink_of_remap(void) +{ + rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc"); + rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc"); + + if (!rt_sysc_membase || !rt_memc_membase) + panic("Failed to remap core resources"); +} + +void prom_soc_init(struct ralink_soc_info *soc_info) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE); + const char *name; + u32 n0; + u32 n1; + u32 id; + + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); + id = __raw_readl(sysc + SYSC_REG_CHIP_ID); + + if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) { + soc_info->compatible = "ralink,r2880-soc"; + name = "RT2880"; + } else { + panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1); + } + + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s id:%u rev:%u", + name, + (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, + (id & CHIP_ID_REV_MASK)); + + soc_info->mem_base = RT2880_SDRAM_BASE; + soc_info->mem_size_min = RT2880_MEM_SIZE_MIN; + soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; +} diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c index 0a4bbdcf59d9..ca7ee3a33790 100644 --- a/arch/mips/ralink/rt305x.c +++ b/arch/mips/ralink/rt305x.c @@ -22,7 +22,7 @@ enum rt305x_soc_type rt305x_soc; -struct ralink_pinmux_grp mode_mux[] = { +static struct ralink_pinmux_grp mode_mux[] = { { .name = "i2c", .mask = RT305X_GPIO_MODE_I2C, @@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = { }, {0} }; -struct ralink_pinmux_grp uart_mux[] = { +static struct ralink_pinmux_grp uart_mux[] = { { .name = "uartf", .mask = RT305X_GPIO_MODE_UARTF, @@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = { .name = "gpio uartf", .mask = RT305X_GPIO_MODE_GPIO_UARTF, .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, + .gpio_last = RT305X_GPIO_10, }, { .name = "gpio i2s", .mask = RT305X_GPIO_MODE_GPIO_I2S, .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, + .gpio_last = RT305X_GPIO_10, }, { .name = "gpio", .mask = RT305X_GPIO_MODE_GPIO, }, {0} }; -void rt305x_wdt_reset(void) +static void rt305x_wdt_reset(void) { u32 t; @@ -114,16 +114,53 @@ void rt305x_wdt_reset(void) rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); } -struct ralink_pinmux gpio_pinmux = { +struct ralink_pinmux rt_gpio_pinmux = { .mode = mode_mux, .uart = uart_mux, .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, + .uart_mask = RT305X_GPIO_MODE_UART0_MASK, .wdt_reset = rt305x_wdt_reset, }; +static unsigned long rt5350_get_mem_size(void) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); + unsigned long ret; + u32 t; + + t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG); + t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) & + RT5350_SYSCFG0_DRAM_SIZE_MASK; + + switch (t) { + case RT5350_SYSCFG0_DRAM_SIZE_2M: + ret = 2; + break; + case RT5350_SYSCFG0_DRAM_SIZE_8M: + ret = 8; + break; + case RT5350_SYSCFG0_DRAM_SIZE_16M: + ret = 16; + break; + case RT5350_SYSCFG0_DRAM_SIZE_32M: + ret = 32; + break; + case RT5350_SYSCFG0_DRAM_SIZE_64M: + ret = 64; + break; + default: + panic("rt5350: invalid DRAM size: %u", t); + break; + } + + return ret; +} + void __init ralink_clk_init(void) { unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate; + unsigned long wmac_rate = 40000000; + u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); if (soc_is_rt305x() || soc_is_rt3350()) { @@ -176,11 +213,21 @@ void __init ralink_clk_init(void) BUG(); } + if (soc_is_rt3352() || soc_is_rt5350()) { + u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0); + + if (!(val & RT3352_CLKCFG0_XTAL_SEL)) + wmac_rate = 20000000; + } + ralink_clk_add("cpu", cpu_rate); ralink_clk_add("10000b00.spi", sys_rate); ralink_clk_add("10000100.timer", wdt_rate); + ralink_clk_add("10000120.watchdog", wdt_rate); ralink_clk_add("10000500.uart", uart_rate); ralink_clk_add("10000c00.uartlite", uart_rate); + ralink_clk_add("10100000.ethernet", sys_rate); + ralink_clk_add("10180000.wmac", wmac_rate); } void __init ralink_of_remap(void) @@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info) name, (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, (id & CHIP_ID_REV_MASK)); + + soc_info->mem_base = RT305X_SDRAM_BASE; + if (soc_is_rt5350()) { + soc_info->mem_size = rt5350_get_mem_size(); + } else if (soc_is_rt305x() || soc_is_rt3350()) { + soc_info->mem_size_min = RT305X_MEM_SIZE_MIN; + soc_info->mem_size_max = RT305X_MEM_SIZE_MAX; + } else if (soc_is_rt3352()) { + soc_info->mem_size_min = RT3352_MEM_SIZE_MIN; + soc_info->mem_size_max = RT3352_MEM_SIZE_MAX; + } } diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c new file mode 100644 index 000000000000..b474ac284b83 --- /dev/null +++ b/arch/mips/ralink/rt3883.c @@ -0,0 +1,246 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> + +#include <asm/mipsregs.h> +#include <asm/mach-ralink/ralink_regs.h> +#include <asm/mach-ralink/rt3883.h> + +#include "common.h" + +static struct ralink_pinmux_grp mode_mux[] = { + { + .name = "i2c", + .mask = RT3883_GPIO_MODE_I2C, + .gpio_first = RT3883_GPIO_I2C_SD, + .gpio_last = RT3883_GPIO_I2C_SCLK, + }, { + .name = "spi", + .mask = RT3883_GPIO_MODE_SPI, + .gpio_first = RT3883_GPIO_SPI_CS0, + .gpio_last = RT3883_GPIO_SPI_MISO, + }, { + .name = "uartlite", + .mask = RT3883_GPIO_MODE_UART1, + .gpio_first = RT3883_GPIO_UART1_TXD, + .gpio_last = RT3883_GPIO_UART1_RXD, + }, { + .name = "jtag", + .mask = RT3883_GPIO_MODE_JTAG, + .gpio_first = RT3883_GPIO_JTAG_TDO, + .gpio_last = RT3883_GPIO_JTAG_TCLK, + }, { + .name = "mdio", + .mask = RT3883_GPIO_MODE_MDIO, + .gpio_first = RT3883_GPIO_MDIO_MDC, + .gpio_last = RT3883_GPIO_MDIO_MDIO, + }, { + .name = "ge1", + .mask = RT3883_GPIO_MODE_GE1, + .gpio_first = RT3883_GPIO_GE1_TXD0, + .gpio_last = RT3883_GPIO_GE1_RXCLK, + }, { + .name = "ge2", + .mask = RT3883_GPIO_MODE_GE2, + .gpio_first = RT3883_GPIO_GE2_TXD0, + .gpio_last = RT3883_GPIO_GE2_RXCLK, + }, { + .name = "pci", + .mask = RT3883_GPIO_MODE_PCI, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "lna a", + .mask = RT3883_GPIO_MODE_LNA_A, + .gpio_first = RT3883_GPIO_LNA_PE_A0, + .gpio_last = RT3883_GPIO_LNA_PE_A2, + }, { + .name = "lna g", + .mask = RT3883_GPIO_MODE_LNA_G, + .gpio_first = RT3883_GPIO_LNA_PE_G0, + .gpio_last = RT3883_GPIO_LNA_PE_G2, + }, {0} +}; + +static struct ralink_pinmux_grp uart_mux[] = { + { + .name = "uartf", + .mask = RT3883_GPIO_MODE_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "pcm uartf", + .mask = RT3883_GPIO_MODE_PCM_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "pcm i2s", + .mask = RT3883_GPIO_MODE_PCM_I2S, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "i2s uartf", + .mask = RT3883_GPIO_MODE_I2S_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "pcm gpio", + .mask = RT3883_GPIO_MODE_PCM_GPIO, + .gpio_first = RT3883_GPIO_11, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "gpio uartf", + .mask = RT3883_GPIO_MODE_GPIO_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_10, + }, { + .name = "gpio i2s", + .mask = RT3883_GPIO_MODE_GPIO_I2S, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_10, + }, { + .name = "gpio", + .mask = RT3883_GPIO_MODE_GPIO, + }, {0} +}; + +static struct ralink_pinmux_grp pci_mux[] = { + { + .name = "pci-dev", + .mask = 0, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-host2", + .mask = 1, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-host1", + .mask = 2, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-fnc", + .mask = 3, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-gpio", + .mask = 7, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, {0} +}; + +static void rt3883_wdt_reset(void) +{ + u32 t; + + /* enable WDT reset output on GPIO 2 */ + t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1); + t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT; + rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); +} + +struct ralink_pinmux rt_gpio_pinmux = { + .mode = mode_mux, + .uart = uart_mux, + .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT, + .uart_mask = RT3883_GPIO_MODE_UART0_MASK, + .wdt_reset = rt3883_wdt_reset, + .pci = pci_mux, + .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT, + .pci_mask = RT3883_GPIO_MODE_PCI_MASK, +}; + +void __init ralink_clk_init(void) +{ + unsigned long cpu_rate, sys_rate; + u32 syscfg0; + u32 clksel; + u32 ddr2; + + syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0); + clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) & + RT3883_SYSCFG0_CPUCLK_MASK); + ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2; + + switch (clksel) { + case RT3883_SYSCFG0_CPUCLK_250: + cpu_rate = 250000000; + sys_rate = (ddr2) ? 125000000 : 83000000; + break; + case RT3883_SYSCFG0_CPUCLK_384: + cpu_rate = 384000000; + sys_rate = (ddr2) ? 128000000 : 96000000; + break; + case RT3883_SYSCFG0_CPUCLK_480: + cpu_rate = 480000000; + sys_rate = (ddr2) ? 160000000 : 120000000; + break; + case RT3883_SYSCFG0_CPUCLK_500: + cpu_rate = 500000000; + sys_rate = (ddr2) ? 166000000 : 125000000; + break; + } + + ralink_clk_add("cpu", cpu_rate); + ralink_clk_add("10000100.timer", sys_rate); + ralink_clk_add("10000120.watchdog", sys_rate); + ralink_clk_add("10000500.uart", 40000000); + ralink_clk_add("10000b00.spi", sys_rate); + ralink_clk_add("10000c00.uartlite", 40000000); + ralink_clk_add("10100000.ethernet", sys_rate); +} + +void __init ralink_of_remap(void) +{ + rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc"); + rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc"); + + if (!rt_sysc_membase || !rt_memc_membase) + panic("Failed to remap core resources"); +} + +void prom_soc_init(struct ralink_soc_info *soc_info) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE); + const char *name; + u32 n0; + u32 n1; + u32 id; + + n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3); + n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7); + id = __raw_readl(sysc + RT3883_SYSC_REG_REVID); + + if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) { + soc_info->compatible = "ralink,rt3883-soc"; + name = "RT3883"; + } else { + panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1); + } + + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s ver:%u eco:%u", + name, + (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK, + (id & RT3883_REVID_ECO_ID_MASK)); + + soc_info->mem_base = RT3883_SDRAM_BASE; + soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; + soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; +} diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index 1d1919a44e88..7a53b1e28a93 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c @@ -114,7 +114,7 @@ void __init replicate_kernel_text() * data structures on the first couple of pages of the first slot of each * node. If this is the case, getfirstfree(node) > getslotstart(node, 0). */ -pfn_t node_getfirstfree(cnodeid_t cnode) +unsigned long node_getfirstfree(cnodeid_t cnode) { unsigned long loadbase = REP_BASE; nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 5f2bddb1860e..1230f56429d7 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -255,14 +255,14 @@ static void __init dump_topology(void) } } -static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) +static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); - return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); + return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); } -static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) +static unsigned long __init slot_psize_compute(cnodeid_t node, int slot) { nasid_t nasid; lboard_t *brd; @@ -353,7 +353,7 @@ static void __init mlreset(void) static void __init szmem(void) { - pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ + unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ int slot; cnodeid_t node; @@ -390,10 +390,10 @@ static void __init szmem(void) static void __init node_mem_init(cnodeid_t node) { - pfn_t slot_firstpfn = slot_getbasepfn(node, 0); - pfn_t slot_freepfn = node_getfirstfree(node); + unsigned long slot_firstpfn = slot_getbasepfn(node, 0); + unsigned long slot_freepfn = node_getfirstfree(node); unsigned long bootmap_size; - pfn_t start_pfn, end_pfn; + unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); @@ -467,7 +467,7 @@ void __init paging_init(void) pagetable_init(); for_each_online_node(node) { - pfn_t start_pfn, end_pfn; + unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index fff58ac176f3..2e21b761cb9c 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode, /* Nothing to do ... */ } -int rt_timer_irq; +unsigned int rt_timer_irq; static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); static DEFINE_PER_CPU(char [11], hub_rt_name); diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index f517e08e7f0d..a134ff4da12e 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -59,11 +59,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } - -asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi, - u32 mask_lo, int fd, - const char __user *pathname) -{ - return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo, - fd, pathname); -} diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3fe5259e2fea..915fbb4fc2fe 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -150,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) CURRENT_THREAD_INFO(r11, r1) ld r10,TI_FLAGS(r11) andi. r11,r10,_TIF_SYSCALL_T_OR_A - bne- syscall_dotrace + bne syscall_dotrace .Lsyscall_dotrace_cont: cmpldi 0,r0,NR_syscalls bge- syscall_enosys diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index cd6e19d263b3..8a285876aef8 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -126,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, return sys_sync_file_range(fd, offset, nbytes, flags); } - -asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags, - unsigned mask_hi, unsigned mask_lo, - int dfd, const char __user *pathname) -{ - u64 mask = ((u64)mask_hi << 32) | mask_lo; - return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); -} diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 2d72d9e96c15..9cb1b975b353 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -793,10 +793,6 @@ ENTRY(sys32_stime_wrapper) llgtr %r2,%r2 # long * jg compat_sys_stime # branch to system call -ENTRY(sys32_sysctl_wrapper) - llgtr %r2,%r2 # struct compat_sysctl_args * - jg compat_sys_sysctl - ENTRY(sys32_fstat64_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # struct stat64 * @@ -1349,15 +1345,6 @@ ENTRY(sys_fanotify_init_wrapper) llgfr %r3,%r3 # unsigned int jg sys_fanotify_init # branch to system call -ENTRY(sys_fanotify_mark_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned int - sllg %r4,%r4,32 # get high word of 64bit mask - lr %r4,%r5 # get low word of 64bit mask - llgfr %r5,%r6 # unsigned int - llgt %r6,164(%r15) # char * - jg sys_fanotify_mark # branch to system call - ENTRY(sys_prlimit64_wrapper) lgfr %r2,%r2 # pid_t llgfr %r3,%r3 # unsigned int diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 9f214e992eed..913410bd74a3 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -157,7 +157,7 @@ SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */ SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper) SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper) SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper) -SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper) +SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl) SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */ SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper) SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper) @@ -341,7 +341,7 @@ SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper) -SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper) +SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark) SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper) SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 2e680b5245c9..f7c72b6efc27 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -239,15 +239,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ nop nop - .globl sys32_fanotify_mark -sys32_fanotify_mark: - sethi %hi(sys_fanotify_mark), %g1 - sllx %o2, 32, %o2 - or %o2, %o3, %o2 - mov %o4, %o3 - jmpl %g1 + %lo(sys_fanotify_mark), %g0 - mov %o5, %o4 - .section __ex_table,"a" .align 4 .word 1b, __retl_efault, 2b, __retl_efault diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 8fd932080215..6d81597064b6 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -84,7 +84,7 @@ sys_call_table32: .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init -/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime +/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c index cfe79c9529b3..f9e862539314 100644 --- a/arch/unicore32/kernel/sys.c +++ b/arch/unicore32/kernel/sys.c @@ -28,19 +28,11 @@ #include <asm/syscalls.h> #include <asm/cacheflush.h> -/* Note: used by the compat code even in 64-bit Linux. */ -SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, - unsigned long, fd, unsigned long, off_4k) -{ - return sys_mmap_pgoff(addr, len, prot, flags, fd, - off_4k); -} - /* Provide the actual syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), +#define sys_mmap2 sys_mmap_pgoff /* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */ void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls-1] = sys_ni_syscall, diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 4e4907c67d92..8e0ceecdc957 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -243,12 +243,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, ((u64)len_hi << 32) | len_lo); } - -asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags, - u32 mask_lo, u32 mask_hi, - int fd, const char __user *pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, - ((u64)mask_hi << 32) | mask_lo, - fd, pathname); -} diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 0ef202e232d6..82c34ee25a65 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -50,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned, asmlinkage long sys32_sigreturn(void); asmlinkage long sys32_rt_sigreturn(void); -asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, - const char __user *); - #endif /* CONFIG_COMPAT */ #endif /* _ASM_X86_SYS_IA32_H */ diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 5f87b35fd2ef..2917a6452c49 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -37,8 +37,8 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *); unsigned long sys_sigreturn(void); /* kernel/vm86_32.c */ -int sys_vm86old(struct vm86_struct __user *); -int sys_vm86(unsigned long, unsigned long); +asmlinkage long sys_vm86old(struct vm86_struct __user *); +asmlinkage long sys_vm86(unsigned long, unsigned long); #else /* CONFIG_X86_32 */ diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index b3a4866661c5..2af848dfa754 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -120,6 +120,9 @@ #define MSR_CORE_C6_RESIDENCY 0x000003fd #define MSR_CORE_C7_RESIDENCY 0x000003fe #define MSR_PKG_C2_RESIDENCY 0x0000060d +#define MSR_PKG_C8_RESIDENCY 0x00000630 +#define MSR_PKG_C9_RESIDENCY 0x00000631 +#define MSR_PKG_C10_RESIDENCY 0x00000632 /* Run Time Average Power Limiting (RAPL) Interface */ diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 1cf5766dde16..e8edcf52e069 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -33,6 +33,7 @@ #include <linux/capability.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/syscalls.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> @@ -48,7 +49,6 @@ #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> -#include <asm/syscalls.h> /* * Known problems: @@ -202,36 +202,32 @@ out: static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); -int sys_vm86old(struct vm86_struct __user *v86) +SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ - struct task_struct *tsk; - int tmp, ret = -EPERM; + struct task_struct *tsk = current; + int tmp; - tsk = current; if (tsk->thread.saved_sp0) - goto out; + return -EPERM; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, vm86plus) - sizeof(info.regs)); - ret = -EFAULT; if (tmp) - goto out; + return -EFAULT; memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); info.regs32 = current_pt_regs(); tsk->thread.vm86_info = v86; do_sys_vm86(&info, tsk); - ret = 0; /* we never return here */ -out: - return ret; + return 0; /* we never return here */ } -int sys_vm86(unsigned long cmd, unsigned long arg) +SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. @@ -239,7 +235,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg) * return to 32 bit user space. */ struct task_struct *tsk; - int tmp, ret; + int tmp; struct vm86plus_struct __user *v86; tsk = current; @@ -248,8 +244,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg) case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: - ret = do_vm86_irq_handling(cmd, (int)arg); - goto out; + return do_vm86_irq_handling(cmd, (int)arg); case VM86_PLUS_INSTALL_CHECK: /* * NOTE: on old vm86 stuff this will return the error @@ -257,28 +252,23 @@ int sys_vm86(unsigned long cmd, unsigned long arg) * interpreted as (invalid) address to vm86_struct. * So the installation check works. */ - ret = 0; - goto out; + return 0; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ - ret = -EPERM; if (tsk->thread.saved_sp0) - goto out; + return -EPERM; v86 = (struct vm86plus_struct __user *)arg; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); - ret = -EFAULT; if (tmp) - goto out; + return -EFAULT; info.regs32 = current_pt_regs(); info.vm86plus.is_vm86pus = 1; tsk->thread.vm86_info = (struct vm86_struct __user *)v86; do_sys_vm86(&info, tsk); - ret = 0; /* we never return here */ -out: - return ret; + return 0; /* we never return here */ } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8e517bba6a7c..8db0010ed150 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -60,6 +60,7 @@ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ +#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) @@ -99,6 +100,7 @@ #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) +#define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) @@ -533,6 +535,9 @@ FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; +FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET +FOP_END; + #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ do { \ unsigned long _tmp; \ @@ -2996,6 +3001,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_aam(struct x86_emulate_ctxt *ctxt) +{ + u8 al, ah; + + if (ctxt->src.val == 0) + return emulate_de(ctxt); + + al = ctxt->dst.val & 0xff; + ah = al / ctxt->src.val; + al %= ctxt->src.val; + + ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); + + /* Set PF, ZF, SF */ + ctxt->src.type = OP_IMM; + ctxt->src.val = 0; + ctxt->src.bytes = 1; + fastop(ctxt, em_or); + + return X86EMUL_CONTINUE; +} + static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; @@ -3936,7 +3963,10 @@ static const struct opcode opcode_table[256] = { /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), - N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, + I(DstAcc | SrcImmUByte | No64, em_aam), + I(DstAcc | SrcImmUByte | No64, em_aad), + F(DstAcc | ByteOp | No64, em_salc), + I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ @@ -4198,6 +4228,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, op->val = 0; op->count = 1; break; + case OpXLat: + op->type = OP_MEM; + op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + op->addr.mem.ea = + register_address(ctxt, + reg_read(ctxt, VCPU_REGS_RBX) + + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); + op->addr.mem.seg = seg_override(ctxt); + op->val = 0; + break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 25a791ed21c8..260a91939555 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) return 0; } + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + ret = kvm_emulate_halt(vcpu); + goto out; + } + if (signal_pending(current)) goto out; if (need_resched()) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 05a8b1a2300d..094b5d96ab14 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) } EXPORT_SYMBOL_GPL(kvm_lmsw); +static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + !vcpu->guest_xcr0_loaded) { + /* kvm_set_xcr() also depends on this */ + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + vcpu->guest_xcr0_loaded = 1; + } +} + +static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +{ + if (vcpu->guest_xcr0_loaded) { + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); + vcpu->guest_xcr0_loaded = 0; + } +} + int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; @@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) return 1; if (xcr0 & ~host_xcr0) return 1; + kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; - vcpu->guest_xcr0_loaded = 0; return 0; } @@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) } } -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) -{ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - !vcpu->guest_xcr0_loaded) { - /* kvm_set_xcr() also depends on this */ - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); - vcpu->guest_xcr0_loaded = 1; - } -} - -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) -{ - if (vcpu->guest_xcr0_loaded) { - if (vcpu->arch.xcr0 != host_xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); - vcpu->guest_xcr0_loaded = 0; - } -} - static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 4a9be6ddf054..48e8461057ba 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -295,11 +295,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int pos; u32 table_offset, bir; - pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); - + pos = dev->msix_cap; pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, &table_offset); - bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); map_irq.table_base = pci_resource_start(dev, bir); map_irq.entry_nr = msidesc->msi_attrib.entry_nr; diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index d0d59bfbccce..aabfb8380a1c 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -345,7 +345,7 @@ 336 i386 perf_event_open sys_perf_event_open 337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg 338 i386 fanotify_init sys_fanotify_init -339 i386 fanotify_mark sys_fanotify_mark sys32_fanotify_mark +339 i386 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 340 i386 prlimit64 sys_prlimit64 341 i386 name_to_handle_at sys_name_to_handle_at 342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 53d4f680c9b5..a492be2635ac 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -85,7 +85,29 @@ EXPORT_SYMBOL_GPL(hypercall_page); +/* + * Pointer to the xen_vcpu_info structure or + * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info + * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info + * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point + * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to + * acknowledge pending events. + * Also more subtly it is used by the patched version of irq enable/disable + * e.g. xen_irq_enable_direct and xen_iret in PV mode. + * + * The desire to be able to do those mask/unmask operations as a single + * instruction by using the per-cpu offset held in %gs is the real reason + * vcpu info is in a per-cpu pointer and the original reason for this + * hypercall. + * + */ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); + +/* + * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info + * hypercall. This can be used both in PV and PVHVM mode. The structure + * overrides the default per_cpu(xen_vcpu, cpu) value. + */ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); enum xen_domain_type xen_domain_type = XEN_NATIVE; @@ -157,6 +179,21 @@ static void xen_vcpu_setup(int cpu) BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + /* + * This path is called twice on PVHVM - first during bootup via + * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being + * hotplugged: cpu_up -> xen_hvm_cpu_notify. + * As we can only do the VCPUOP_register_vcpu_info once lets + * not over-write its result. + * + * For PV it is called during restore (xen_vcpu_restore) and bootup + * (xen_setup_vcpu_info_placement). The hotplug mechanism does not + * use this function. + */ + if (xen_hvm_domain()) { + if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) + return; + } if (cpu < MAX_VIRT_CPUS) per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; @@ -172,7 +209,12 @@ static void xen_vcpu_setup(int cpu) /* Check to see if the hypervisor will put the vcpu_info structure where we want it, which allows direct access via - a percpu-variable. */ + a percpu-variable. + N.B. This hypercall can _only_ be called once per CPU. Subsequent + calls will error out with -EINVAL. This is due to the fact that + hypervisor has no unregister variant and this hypercall does not + allow to over-write info.mfn and info.offset. + */ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); if (err) { @@ -387,6 +429,9 @@ static void __init xen_init_cpuid_mask(void) cpuid_leaf1_edx_mask &= ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ + + cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32)); + ax = 1; cx = 0; xen_cpuid(&ax, &bx, &cx, &dx); @@ -1603,6 +1648,9 @@ void __ref xen_hvm_init_shared_info(void) * online but xen_hvm_init_shared_info is run at resume time too and * in that case multiple vcpus might be online. */ for_each_online_cpu(cpu) { + /* Leave it to be NULL. */ + if (cpu >= MAX_VIRT_CPUS) + continue; per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; } } diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 8b54603ce816..3002ec1bb71a 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu) int irq; const char *name; - WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", + WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", cpu, per_cpu(lock_kicker_irq, cpu)); /* diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index 8934298a638d..de15b4f4b237 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -72,12 +72,12 @@ fail: * R/W ops. **************************************************/ -static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom) +static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom, + size_t words) { int i; - for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++) - sprom[i] = bcma_read16(bus->drv_cc.core, - offset + (i * 2)); + for (i = 0; i < words; i++) + sprom[i] = bcma_read16(bus->drv_cc.core, offset + (i * 2)); } /************************************************** @@ -124,29 +124,29 @@ static inline u8 bcma_crc8(u8 crc, u8 data) return t[crc ^ data]; } -static u8 bcma_sprom_crc(const u16 *sprom) +static u8 bcma_sprom_crc(const u16 *sprom, size_t words) { int word; u8 crc = 0xFF; - for (word = 0; word < SSB_SPROMSIZE_WORDS_R4 - 1; word++) { + for (word = 0; word < words - 1; word++) { crc = bcma_crc8(crc, sprom[word] & 0x00FF); crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8); } - crc = bcma_crc8(crc, sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & 0x00FF); + crc = bcma_crc8(crc, sprom[words - 1] & 0x00FF); crc ^= 0xFF; return crc; } -static int bcma_sprom_check_crc(const u16 *sprom) +static int bcma_sprom_check_crc(const u16 *sprom, size_t words) { u8 crc; u8 expected_crc; u16 tmp; - crc = bcma_sprom_crc(sprom); - tmp = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_CRC; + crc = bcma_sprom_crc(sprom, words); + tmp = sprom[words - 1] & SSB_SPROM_REVISION_CRC; expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT; if (crc != expected_crc) return -EPROTO; @@ -154,21 +154,25 @@ static int bcma_sprom_check_crc(const u16 *sprom) return 0; } -static int bcma_sprom_valid(const u16 *sprom) +static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom, + size_t words) { u16 revision; int err; - err = bcma_sprom_check_crc(sprom); + err = bcma_sprom_check_crc(sprom, words); if (err) return err; - revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV; - if (revision != 8 && revision != 9) { + revision = sprom[words - 1] & SSB_SPROM_REVISION_REV; + if (revision != 8 && revision != 9 && revision != 10) { pr_err("Unsupported SPROM revision: %d\n", revision); return -ENOENT; } + bus->sprom.revision = revision; + bcma_debug(bus, "Found SPROM revision %d\n", revision); + return 0; } @@ -208,9 +212,6 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) != ARRAY_SIZE(bus->sprom.core_pwr_info)); - bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & - SSB_SPROM_REVISION_REV; - for (i = 0; i < 3; i++) { v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i]; *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v); @@ -502,7 +503,6 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) case BCMA_CHIP_ID_BCM4331: present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT; break; - case BCMA_CHIP_ID_BCM43224: case BCMA_CHIP_ID_BCM43225: /* for these chips OTP is always available */ @@ -550,7 +550,9 @@ int bcma_sprom_get(struct bcma_bus *bus) { u16 offset = BCMA_CC_SPROM; u16 *sprom; - int err = 0; + size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, + SSB_SPROMSIZE_WORDS_R10, }; + int i, err = 0; if (!bus->drv_cc.core) return -EOPNOTSUPP; @@ -579,32 +581,37 @@ int bcma_sprom_get(struct bcma_bus *bus) } } - sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16), - GFP_KERNEL); - if (!sprom) - return -ENOMEM; - if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 || bus->chipinfo.id == BCMA_CHIP_ID_BCM43431) bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); bcma_debug(bus, "SPROM offset 0x%x\n", offset); - bcma_sprom_read(bus, offset, sprom); + for (i = 0; i < ARRAY_SIZE(sprom_sizes); i++) { + size_t words = sprom_sizes[i]; + + sprom = kcalloc(words, sizeof(u16), GFP_KERNEL); + if (!sprom) + return -ENOMEM; + + bcma_sprom_read(bus, offset, sprom, words); + err = bcma_sprom_valid(bus, sprom, words); + if (!err) + break; + + kfree(sprom); + } if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 || bus->chipinfo.id == BCMA_CHIP_ID_BCM43431) bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); - err = bcma_sprom_valid(sprom); if (err) { - bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n"); + bcma_warn(bus, "Invalid SPROM read from the PCIe card, trying to use fallback SPROM\n"); err = bcma_fill_sprom_with_fallback(bus, &bus->sprom); - goto out; + } else { + bcma_sprom_extract_r8(bus, sprom); + kfree(sprom); } - bcma_sprom_extract_r8(bus, sprom); - -out: - kfree(sprom); return err; } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6961bbeab3ed..264f55099940 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, @@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void) init_waitqueue_head(&hdev->debug_wait); INIT_LIST_HEAD(&hdev->debug_list); - mutex_init(&hdev->debug_list_lock); + spin_lock_init(&hdev->debug_list_lock); sema_init(&hdev->driver_lock, 1); sema_init(&hdev->driver_input_lock, 1); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 7e56cb3855e3..8453214ec376 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf) { int i; struct hid_debug_list *list; + unsigned long flags; - mutex_lock(&hdev->debug_list_lock); + spin_lock_irqsave(&hdev->debug_list_lock, flags); list_for_each_entry(list, &hdev->debug_list, node) { for (i = 0; i < strlen(buf); i++) list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = buf[i]; list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; } - mutex_unlock(&hdev->debug_list_lock); + spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); } @@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; struct hid_debug_list *list; + unsigned long flags; if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { err = -ENOMEM; @@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) file->private_data = list; mutex_init(&list->read_mutex); - mutex_lock(&list->hdev->debug_list_lock); + spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_add_tail(&list->node, &list->hdev->debug_list); - mutex_unlock(&list->hdev->debug_list_lock); + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); out: return err; @@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait) static int hid_debug_events_release(struct inode *inode, struct file *file) { struct hid_debug_list *list = file->private_data; + unsigned long flags; - mutex_lock(&list->hdev->debug_list_lock); + spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); - mutex_unlock(&list->hdev->debug_list_lock); + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); kfree(list->hid_debug_buf); kfree(list); diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c index 9b0efb0083fe..d16491192112 100644 --- a/drivers/hid/hid-steelseries.c +++ b/drivers/hid/hid-steelseries.c @@ -18,7 +18,8 @@ #include "hid-ids.h" -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) #define SRWS1_NUMBER_LEDS 15 struct steelseries_srws1_data { __u16 led_state; @@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds) { struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; @@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices); static struct hid_driver steelseries_srws1_driver = { .name = "steelseries_srws1", .id_table = steelseries_srws1_devices, -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) .probe = steelseries_srws1_probe, .remove = steelseries_srws1_remove, #endif diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 0e8fab1913df..fa6964d8681a 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -273,6 +273,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { .target_residency = 500, .enter = &intel_idle }, { + .name = "C8-HSW", + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + .enter = &intel_idle }, + { + .name = "C9-HSW", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + .enter = &intel_idle }, + { + .name = "C10-HSW", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + .enter = &intel_idle }, + { .enter = NULL } }; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index c6083132c4b8..0387e05cdb98 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -319,6 +319,9 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { + unsigned noio_flag; + void *ptr; + if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, } *data_mode = DATA_MODE_VMALLOC; - return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + /* + * __vmalloc allocates the data pages and auxiliary structures with + * gfp_flags that were specified, but pagetables are always allocated + * with GFP_KERNEL, no matter what was specified as gfp_mask. + * + * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that + * all allocations done by this process (including pagetables) are done + * as if GFP_NOIO was specified. + */ + + if (gfp_mask & __GFP_NORETRY) + noio_flag = memalloc_noio_save(); + + ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + if (gfp_mask & __GFP_NORETRY) + memalloc_noio_restore(noio_flag); + + return ptr; } /* diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 83e995fece88..1af7255bbffb 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_read(&cmd->root_lock); - memcpy(stats, &cmd->stats, sizeof(*stats)); + *stats = cmd->stats; up_read(&cmd->root_lock); } @@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_write(&cmd->root_lock); - memcpy(&cmd->stats, stats, sizeof(*stats)); + cmd->stats = *stats; up_write(&cmd->root_lock); } diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index 558bdfdabf5f..33369ca9614f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -130,8 +130,8 @@ struct dm_cache_policy { * * Must not block. * - * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK - * would be typical). + * Returns 0 if in cache, -ENOENT if not, < 0 for other errors + * (-EWOULDBLOCK would be typical). */ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 10744091e6ca..df44b60e66f2 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -205,7 +205,7 @@ struct per_bio_data { /* * writethrough fields. These MUST remain at the end of this * structure and the 'cache' member must be the first as it - * is used to determine the offsetof the writethrough fields. + * is used to determine the offset of the writethrough fields. */ struct cache *cache; dm_cblock_t cblock; @@ -393,7 +393,7 @@ static int get_cell(struct cache *cache, return r; } - /*----------------------------------------------------------------*/ +/*----------------------------------------------------------------*/ static bool is_dirty(struct cache *cache, dm_cblock_t b) { @@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl } /*----------------------------------------------------------------*/ + static bool block_size_is_power_of_two(struct cache *cache) { return cache->sectors_per_block_shift >= 0; @@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err) /* * We can't issue this bio directly, since we're in interrupt - * context. So it get's put on a bio list for processing by the + * context. So it gets put on a bio list for processing by the * worker thread. */ defer_writethrough_bio(pb->cache, bio); @@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws) static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); + policy_tick(cache->policy); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } @@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, static struct kmem_cache *migration_cache; -static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv) +#define NOT_CORE_OPTION 1 + +static int process_config_option(struct cache *cache, const char *key, const char *value) +{ + unsigned long tmp; + + if (!strcasecmp(key, "migration_threshold")) { + if (kstrtoul(value, 10, &tmp)) + return -EINVAL; + + cache->migration_threshold = tmp; + return 0; + } + + return NOT_CORE_OPTION; +} + +static int set_config_value(struct cache *cache, const char *key, const char *value) +{ + int r = process_config_option(cache, key, value); + + if (r == NOT_CORE_OPTION) + r = policy_set_config_value(cache->policy, key, value); + + if (r) + DMWARN("bad config value for %s: %s", key, value); + + return r; +} + +static int set_config_values(struct cache *cache, int argc, const char **argv) { int r = 0; @@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a } while (argc) { - r = policy_set_config_value(p, argv[0], argv[1]); - if (r) { - DMWARN("policy_set_config_value failed: key = '%s', value = '%s'", - argv[0], argv[1]); - return r; - } + r = set_config_value(cache, argv[0], argv[1]); + if (r) + break; argc -= 2; argv += 2; @@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { - int r; - cache->policy = dm_cache_policy_create(ca->policy_name, cache->cache_size, cache->origin_sectors, @@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, return -ENOMEM; } - r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); - if (r) { - *error = "Error setting cache policy's config values"; - dm_cache_policy_destroy(cache->policy); - cache->policy = NULL; - } - - return r; + return 0; } /* @@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, return discard_block_size; } -#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) +#define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) { @@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; - memcpy(&cache->features, &ca->features, sizeof(cache->features)); + cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; @@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result) r = create_cache_policy(cache, ca, error); if (r) goto bad; + cache->policy_nr_args = ca->policy_argc; + cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; + + r = set_config_values(cache, ca->policy_argc, ca->policy_argv); + if (r) { + *error = "Error setting cache policy's config values"; + goto bad; + } cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, @@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result) INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); + r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { @@ -2517,23 +2545,6 @@ err: DMEMIT("Error"); } -#define NOT_CORE_OPTION 1 - -static int process_config_option(struct cache *cache, char **argv) -{ - unsigned long tmp; - - if (!strcasecmp(argv[0], "migration_threshold")) { - if (kstrtoul(argv[1], 10, &tmp)) - return -EINVAL; - - cache->migration_threshold = tmp; - return 0; - } - - return NOT_CORE_OPTION; -} - /* * Supports <key> <value>. * @@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv) */ static int cache_message(struct dm_target *ti, unsigned argc, char **argv) { - int r; struct cache *cache = ti->private; if (argc != 2) return -EINVAL; - r = process_config_option(cache, argv); - if (r == NOT_CORE_OPTION) - return policy_set_config_value(cache->policy, argv[0], argv[1]); - - return r; + return set_config_value(cache, argv[0], argv[1]); } static int cache_iterate_devices(struct dm_target *ti, @@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 1, 0}, + .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 51bb81676be3..bdf26f5bd326 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ti->num_flush_bios = 1; ti->num_discard_bios = 1; + ti->num_write_same_bios = 1; return 0; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0e07026a8d1..c434e5aab2df 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; + r = -ENOMEM; goto bad_pending_pool; } diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index ea5e878a30b9..d907ca6227ce 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct stripe_c *sc; - sector_t width; + sector_t width, tmp_len; uint32_t stripes; uint32_t chunk_size; int r; @@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) } width = ti->len; - if (sector_div(width, chunk_size)) { + if (sector_div(width, stripes)) { ti->error = "Target length not divisible by " - "chunk size"; + "number of stripes"; return -EINVAL; } - if (sector_div(width, stripes)) { + tmp_len = width; + if (sector_div(tmp_len, chunk_size)) { ti->error = "Target length not divisible by " - "number of stripes"; + "chunk size"; return -EINVAL; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e50dad0c65f4..1ff252ab7d46 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t) return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) + ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) return false; } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 00cee02f8fc9..60bce435f4fa 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, return r; } -static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count) { int r; dm_block_t old_count; - r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); + r = dm_sm_get_nr_blocks(sm, &old_count); if (r) return r; @@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) return 0; if (new_count < old_count) { - DMERR("cannot reduce size of data device"); + DMERR("cannot reduce size of space map"); return -EINVAL; } - return dm_sm_extend(pmd->data_sm, new_count - old_count); + return dm_sm_extend(sm, new_count - old_count); } int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) @@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) down_write(&pmd->root_lock); if (!pmd->fail_io) - r = __resize_data_dev(pmd, new_count); + r = __resize_space_map(pmd->data_sm, new_count); + up_write(&pmd->root_lock); + + return r; +} + +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +{ + int r = -EINVAL; + + down_write(&pmd->root_lock); + if (!pmd->fail_io) + r = __resize_space_map(pmd->metadata_sm, new_count); up_write(&pmd->root_lock); return r; @@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) dm_bm_set_read_only(pmd->bm); up_write(&pmd->root_lock); } + +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + int r; + + down_write(&pmd->root_lock); + r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); + up_write(&pmd->root_lock); + + return r; +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 0cecc3702885..845ebbe589a9 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -8,6 +8,7 @@ #define DM_THIN_METADATA_H #include "persistent-data/dm-block-manager.h" +#include "persistent-data/dm-space-map.h" #define THIN_METADATA_BLOCK_SIZE 4096 @@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); * blocks would be lost. */ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); /* * Flicks the underlying block manager into read only mode, so you know @@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); */ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 004ad1652b73..759cffc45cab 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) return r; if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { - DMWARN("%s: reached low water mark, sending event.", + DMWARN("%s: reached low water mark for data device: sending event.", dm_device_name(pool->pool_md)); spin_lock_irqsave(&pool->lock, flags); pool->low_water_triggered = 1; @@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio) bio_io_error(bio); } +/* + * FIXME: should we also commit due to size of transaction, measured in + * metadata blocks? + */ static int need_commit_due_to_time(struct pool *pool) { return jiffies < pool->last_commit_jiffies || @@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, return r; } +static void metadata_low_callback(void *context) +{ + struct pool *pool = context; + + DMWARN("%s: reached low water mark for metadata device: sending event.", + dm_device_name(pool->pool_md)); + + dm_table_event(pool->ti->table); +} + +static sector_t get_metadata_dev_size(struct block_device *bdev) +{ + sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + char buffer[BDEVNAME_SIZE]; + + if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { + DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", + bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); + metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; + } + + return metadata_dev_size; +} + +static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) +{ + sector_t metadata_dev_size = get_metadata_dev_size(bdev); + + sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + + return metadata_dev_size; +} + +/* + * When a metadata threshold is crossed a dm event is triggered, and + * userland should respond by growing the metadata device. We could let + * userland set the threshold, like we do with the data threshold, but I'm + * not sure they know enough to do this well. + */ +static dm_block_t calc_metadata_threshold(struct pool_c *pt) +{ + /* + * 4M is ample for all ops with the possible exception of thin + * device deletion which is harmless if it fails (just retry the + * delete after you've grown the device). + */ + dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; + return min((dm_block_t)1024ULL /* 4M */, quarter); +} + /* * thin-pool <metadata dev> <data dev> * <data block size (sectors)> @@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned long block_size; dm_block_t low_water_blocks; struct dm_dev *metadata_dev; - sector_t metadata_dev_size; - char b[BDEVNAME_SIZE]; + fmode_t metadata_mode; /* * FIXME Remove validation from scope of lock. @@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -EINVAL; goto out_unlock; } + as.argc = argc; as.argv = argv; - r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev); + /* + * Set default pool features. + */ + pool_features_init(&pf); + + dm_consume_args(&as, 4); + r = parse_pool_features(&as, &pf, ti); + if (r) + goto out_unlock; + + metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); + r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); if (r) { ti->error = "Error opening metadata block device"; goto out_unlock; } - metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) - DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", - bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); + /* + * Run for the side-effect of possibly issuing a warning if the + * device is too big. + */ + (void) get_metadata_dev_size(metadata_dev->bdev); r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); if (r) { @@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out; } - /* - * Set default pool features. - */ - pool_features_init(&pf); - - dm_consume_args(&as, 4); - r = parse_pool_features(&as, &pf, ti); - if (r) - goto out; - pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) { r = -ENOMEM; @@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = pt; + r = dm_pool_register_metadata_threshold(pt->pool->pmd, + calc_metadata_threshold(pt), + metadata_low_callback, + pool); + if (r) + goto out_free_pt; + pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); @@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio) return r; } -/* - * Retrieves the number of blocks of the data device from - * the superblock and compares it to the actual device size, - * thus resizing the data device in case it has grown. - * - * This both copes with opening preallocated data devices in the ctr - * being followed by a resume - * -and- - * calling the resume method individually after userspace has - * grown the data device in reaction to a table event. - */ -static int pool_preresume(struct dm_target *ti) +static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) { int r; struct pool_c *pt = ti->private; @@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti) sector_t data_size = ti->len; dm_block_t sb_data_size; - /* - * Take control of the pool object. - */ - r = bind_control_target(pool, ti); - if (r) - return r; + *need_commit = false; (void) sector_div(data_size, pool->sectors_per_block); @@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti) } if (data_size < sb_data_size) { - DMERR("pool target too small, is %llu blocks (expected %llu)", + DMERR("pool target (%llu blocks) too small: expected %llu", (unsigned long long)data_size, sb_data_size); return -EINVAL; @@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti) r = dm_pool_resize_data_dev(pool->pmd, data_size); if (r) { DMERR("failed to resize data device"); - /* FIXME Stricter than necessary: Rollback transaction instead here */ set_pool_mode(pool, PM_READ_ONLY); return r; } - (void) commit_or_fallback(pool); + *need_commit = true; } return 0; } +static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) +{ + int r; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + dm_block_t metadata_dev_size, sb_metadata_dev_size; + + *need_commit = false; + + metadata_dev_size = get_metadata_dev_size(pool->md_dev); + + r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); + if (r) { + DMERR("failed to retrieve data device size"); + return r; + } + + if (metadata_dev_size < sb_metadata_dev_size) { + DMERR("metadata device (%llu sectors) too small: expected %llu", + metadata_dev_size, sb_metadata_dev_size); + return -EINVAL; + + } else if (metadata_dev_size > sb_metadata_dev_size) { + r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); + if (r) { + DMERR("failed to resize metadata device"); + return r; + } + + *need_commit = true; + } + + return 0; +} + +/* + * Retrieves the number of blocks of the data device from + * the superblock and compares it to the actual device size, + * thus resizing the data device in case it has grown. + * + * This both copes with opening preallocated data devices in the ctr + * being followed by a resume + * -and- + * calling the resume method individually after userspace has + * grown the data device in reaction to a table event. + */ +static int pool_preresume(struct dm_target *ti) +{ + int r; + bool need_commit1, need_commit2; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + + /* + * Take control of the pool object. + */ + r = bind_control_target(pool, ti); + if (r) + return r; + + r = maybe_resize_data_dev(ti, &need_commit1); + if (r) + return r; + + r = maybe_resize_metadata_dev(ti, &need_commit2); + if (r) + return r; + + if (need_commit1 || need_commit2) + (void) commit_or_fallback(pool); + + return 0; +} + static void pool_resume(struct dm_target *ti) { struct pool_c *pt = ti->private; @@ -2549,7 +2669,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 7, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index f6d29e614ab7..e735a6d5a793 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -248,7 +248,8 @@ static struct dm_space_map ops = { .new_block = sm_disk_new_block, .commit = sm_disk_commit, .root_size = sm_disk_root_size, - .copy_root = sm_disk_copy_root + .copy_root = sm_disk_copy_root, + .register_threshold_callback = NULL }; struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 906cf3df71af..1c959684caef 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -17,6 +17,55 @@ /*----------------------------------------------------------------*/ /* + * An edge triggered threshold. + */ +struct threshold { + bool threshold_set; + bool value_set; + dm_block_t threshold; + dm_block_t current_value; + dm_sm_threshold_fn fn; + void *context; +}; + +static void threshold_init(struct threshold *t) +{ + t->threshold_set = false; + t->value_set = false; +} + +static void set_threshold(struct threshold *t, dm_block_t value, + dm_sm_threshold_fn fn, void *context) +{ + t->threshold_set = true; + t->threshold = value; + t->fn = fn; + t->context = context; +} + +static bool below_threshold(struct threshold *t, dm_block_t value) +{ + return t->threshold_set && value <= t->threshold; +} + +static bool threshold_already_triggered(struct threshold *t) +{ + return t->value_set && below_threshold(t, t->current_value); +} + +static void check_threshold(struct threshold *t, dm_block_t value) +{ + if (below_threshold(t, value) && + !threshold_already_triggered(t)) + t->fn(t->context); + + t->value_set = true; + t->current_value = value; +} + +/*----------------------------------------------------------------*/ + +/* * Space map interface. * * The low level disk format is written using the standard btree and @@ -54,6 +103,8 @@ struct sm_metadata { unsigned allocated_this_transaction; unsigned nr_uncommitted; struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + + struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) @@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm) kfree(smm); } -static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) -{ - DMERR("doesn't support extend"); - return -EINVAL; -} - static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) { struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); @@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) { + dm_block_t count; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + int r = sm_metadata_new_block_(sm, b); if (r) DMERR("unable to allocate new metadata block"); + + r = sm_metadata_get_nr_free(sm, &count); + if (r) + DMERR("couldn't get free block count"); + + check_threshold(&smm->threshold, count); + return r; } @@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm) return 0; } +static int sm_metadata_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + + set_threshold(&smm->threshold, threshold, fn, context); + + return 0; +} + static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result) { *result = sizeof(struct disk_sm_root); @@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t return 0; } +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks); + static struct dm_space_map ops = { .destroy = sm_metadata_destroy, .extend = sm_metadata_extend, @@ -395,7 +464,8 @@ static struct dm_space_map ops = { .new_block = sm_metadata_new_block, .commit = sm_metadata_commit, .root_size = sm_metadata_root_size, - .copy_root = sm_metadata_copy_root + .copy_root = sm_metadata_copy_root, + .register_threshold_callback = sm_metadata_register_threshold_callback }; /*----------------------------------------------------------------*/ @@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm) static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks) { - DMERR("boostrap doesn't support extend"); + DMERR("bootstrap doesn't support extend"); return -EINVAL; } @@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm, static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count) { - DMERR("boostrap doesn't support set_count"); + DMERR("bootstrap doesn't support set_count"); return -EINVAL; } @@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm) static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) { - DMERR("boostrap doesn't support root_size"); + DMERR("bootstrap doesn't support root_size"); return -EINVAL; } @@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where, size_t max) { - DMERR("boostrap doesn't support copy_root"); + DMERR("bootstrap doesn't support copy_root"); return -EINVAL; } @@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = { .new_block = sm_bootstrap_new_block, .commit = sm_bootstrap_commit, .root_size = sm_bootstrap_root_size, - .copy_root = sm_bootstrap_copy_root + .copy_root = sm_bootstrap_copy_root, + .register_threshold_callback = NULL }; /*----------------------------------------------------------------*/ +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) +{ + int r, i; + enum allocation_event ev; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + dm_block_t old_len = smm->ll.nr_blocks; + + /* + * Flick into a mode where all blocks get allocated in the new area. + */ + smm->begin = old_len; + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + + /* + * Extend. + */ + r = sm_ll_extend(&smm->ll, extra_blocks); + + /* + * Switch back to normal behaviour. + */ + memcpy(&smm->sm, &ops, sizeof(smm->sm)); + for (i = old_len; !r && i < smm->begin; i++) + r = sm_ll_inc(&smm->ll, i, &ev); + + return r; +} + +/*----------------------------------------------------------------*/ + struct dm_space_map *dm_sm_metadata_init(void) { struct sm_metadata *smm; @@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); return 0; diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h index 1cbfc6b1638a..3e6d1153b7c4 100644 --- a/drivers/md/persistent-data/dm-space-map.h +++ b/drivers/md/persistent-data/dm-space-map.h @@ -9,6 +9,8 @@ #include "dm-block-manager.h" +typedef void (*dm_sm_threshold_fn)(void *context); + /* * struct dm_space_map keeps a record of how many times each block in a device * is referenced. It needs to be fixed on disk as part of the transaction. @@ -59,6 +61,15 @@ struct dm_space_map { */ int (*root_size)(struct dm_space_map *sm, size_t *result); int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); + + /* + * You can register one threshold callback which is edge-triggered + * when the free space in the space map drops below the threshold. + */ + int (*register_threshold_callback)(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); }; /*----------------------------------------------------------------*/ @@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le return sm->copy_root(sm, copy_to_here_le, len); } +static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + if (sm->register_threshold_callback) + return sm->register_threshold_callback(sm, threshold, fn, context); + + return -EINVAL; +} + + #endif /* _LINUX_DM_SPACE_MAP_H */ diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index f8f0156dff4e..200020eb3005 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -280,5 +280,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" source "drivers/net/wireless/mwifiex/Kconfig" +source "drivers/net/wireless/cw1200/Kconfig" endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 67156efe14c4..0fab227025be 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -57,3 +57,5 @@ obj-$(CONFIG_MWIFIEX) += mwifiex/ obj-$(CONFIG_BRCMFMAC) += brcm80211/ obj-$(CONFIG_BRCMSMAC) += brcm80211/ + +obj-$(CONFIG_CW1200) += cw1200/ diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 5c9736a94e54..2437ad26949d 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, { struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); struct ath6kl *ar = ath6kl_priv(vif->ndev); - u32 id; + u32 id, freq; const struct ieee80211_mgmt *mgmt; bool more_data, queued; + /* default to the current channel, but use the one specified as argument + * if any + */ + freq = vif->ch_hint; + if (chan) + freq = chan->center_freq; + + /* never send freq zero to the firmware */ + if (WARN_ON(freq == 0)) + return -EINVAL; + mgmt = (const struct ieee80211_mgmt *) buf; if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && ieee80211_is_probe_resp(mgmt->frame_control) && @@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * command to allow the target to fill in the generic IEs. */ *cookie = 0; /* TX status not supported */ - return ath6kl_send_go_probe_resp(vif, buf, len, - chan->center_freq); + return ath6kl_send_go_probe_resp(vif, buf, len, freq); } id = vif->send_action_id++; @@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, /* AP mode Power saving processing */ if (vif->nw_type == AP_NETWORK) { - queued = ath6kl_mgmt_powersave_ap(vif, - id, chan->center_freq, - wait, buf, - len, &more_data, no_cck); + queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len, + &more_data, no_cck); if (queued) return 0; } - return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, - chan->center_freq, wait, - buf, len, no_cck); + return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq, + wait, buf, len, no_cck); } static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, @@ -3679,6 +3686,20 @@ err: return NULL; } +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath6kl_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_4WAY_HANDSHAKE, + .n_patterns = WOW_MAX_FILTERS_PER_LIST, + .pattern_min_len = 1, + .pattern_max_len = WOW_PATTERN_SIZE, +}; +#endif + int ath6kl_cfg80211_init(struct ath6kl *ar) { struct wiphy *wiphy = ar->wiphy; @@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); #ifdef CONFIG_PM - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE; + wiphy->wowlan = &ath6kl_wowlan_support; #endif wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS; diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 17507dc8a1e7..ec33c8007b29 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -84,14 +84,6 @@ config ATH9K_DFS_CERTIFIED developed. At this point enabling this option won't do anything except increase code size. -config ATH9K_MAC_DEBUG - bool "Atheros MAC statistics" - depends on ATH9K_DEBUGFS - default y - ---help--- - This option enables collection of statistics for Rx/Tx status - data and some other MAC related statistics - config ATH9K_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 7ecd40f07a74..e91725bf401c 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -118,10 +118,10 @@ static void ath9k_ani_restart(struct ath_hw *ah) { struct ar5416AniState *aniState; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; aniState->listenTime = 0; ENABLE_REGWRITE_BUFFER(ah); @@ -143,7 +143,7 @@ static void ath9k_ani_restart(struct ath_hw *ah) static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, bool scan) { - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath_common *common = ath9k_hw_common(ah); const struct ani_ofdm_level_entry *entry_ofdm; const struct ani_cck_level_entry *entry_cck; @@ -195,10 +195,10 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) { struct ar5416AniState *aniState; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false); @@ -210,7 +210,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, bool scan) { - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath_common *common = ath9k_hw_common(ah); const struct ani_ofdm_level_entry *entry_ofdm; const struct ani_cck_level_entry *entry_cck; @@ -251,10 +251,10 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) { struct ar5416AniState *aniState; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1, @@ -269,7 +269,7 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) { struct ar5416AniState *aniState; - aniState = &ah->curchan->ani; + aniState = &ah->ani; /* lower OFDM noise immunity */ if (aniState->ofdmNoiseImmunityLevel > 0 && @@ -292,12 +292,12 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) */ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) { - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath9k_channel *chan = ah->curchan; struct ath_common *common = ath9k_hw_common(ah); int ofdm_nil, cck_nil; - if (!DO_ANI(ah)) + if (!ah->curchan) return; BUG_ON(aniState == NULL); @@ -380,7 +380,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - struct ar5416AniState *aniState = &ah->curchan->ani; + struct ar5416AniState *aniState = &ah->ani; u32 phyCnt1, phyCnt2; int32_t listenTime; @@ -415,10 +415,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) struct ath_common *common = ath9k_hw_common(ah); u32 ofdmPhyErrRate, cckPhyErrRate; - if (!DO_ANI(ah)) + if (!ah->curchan) return; - aniState = &ah->curchan->ani; + aniState = &ah->ani; if (!ath9k_hw_ani_read_counters(ah)) return; @@ -490,32 +490,22 @@ EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); void ath9k_hw_ani_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - int i; + struct ar5416AniState *ani = &ah->ani; ath_dbg(common, ANI, "Initialize ANI\n"); ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW; - ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH; ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW; - for (i = 0; i < ARRAY_SIZE(ah->channels); i++) { - struct ath9k_channel *chan = &ah->channels[i]; - struct ar5416AniState *ani = &chan->ani; - - ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; - - ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - - ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false; - - ani->ofdmsTurn = true; - - ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; - ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; - ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; - } + ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; + ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; + ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false; + ani->ofdmsTurn = true; + ani->ofdmWeakSigDetect = true; + ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; + ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; /* * since we expect some ongoing maintenance on the tables, let's sanity @@ -524,9 +514,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah) ah->aniperiod = ATH9K_ANI_PERIOD; ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL; - if (ah->config.enable_ani) - ah->proc_phyerr |= HAL_PROCESS_ANI; - ath9k_ani_restart(ah); ath9k_enable_mib_counters(ah); } diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index dddb1361039a..78b9fa9f6455 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -17,10 +17,6 @@ #ifndef ANI_H #define ANI_H -#define HAL_PROCESS_ANI 0x00000001 - -#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI) && ah->curchan) - #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) /* units are errors per second */ @@ -38,11 +34,7 @@ #define ATH9K_ANI_CCK_TRIG_LOW 300 #define ATH9K_ANI_NOISE_IMMUNE_LVL 4 -#define ATH9K_ANI_USE_OFDM_WEAK_SIG true -#define ATH9K_ANI_CCK_WEAK_SIG_THR false - #define ATH9K_ANI_SPUR_IMMUNE_LVL 3 - #define ATH9K_ANI_FIRSTEP_LVL 2 #define ATH9K_ANI_RSSI_THR_HIGH 40 @@ -111,7 +103,7 @@ struct ar5416AniState { u8 mrcCCK; u8 spurImmunityLevel; u8 firstepLevel; - u8 ofdmWeakSigDetect; + bool ofdmWeakSigDetect; u32 listenTime; u32 ofdmPhyErrCount; u32 cckPhyErrCount; @@ -119,8 +111,6 @@ struct ar5416AniState { }; struct ar5416Stats { - u32 ast_ani_niup; - u32 ast_ani_nidown; u32 ast_ani_spurup; u32 ast_ani_spurdown; u32 ast_ani_ofdmon; diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 391da5ad6a99..d1acfe98918a 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -931,7 +931,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; - struct ar5416AniState *aniState = &chan->ani; + struct ar5416AniState *aniState = &ah->ani; s32 value, value2; switch (cmd & ah->ani_function) { @@ -1207,7 +1207,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; - struct ar5416AniState *aniState = &chan->ani; + struct ar5416AniState *aniState = &ah->ani; struct ath9k_ani_default *iniDef; u32 val; @@ -1251,7 +1251,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) /* these levels just got reset to defaults by the INI */ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; + aniState->ofdmWeakSigDetect = true; aniState->mrcCCK = false; /* not available on pre AR9003 */ } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index 830daa12feb6..8dc2d089cdef 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -38,10 +38,6 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah) else INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9280PciePhy_clkreq_always_on_L1_9280); -#ifdef CONFIG_PM_SLEEP - INIT_INI_ARRAY(&ah->iniPcieSerdesWow, - ar9280PciePhy_awow); -#endif if (AR_SREV_9287_11_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h index beb6162cf97c..4d18c66a6790 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h @@ -925,20 +925,6 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { {0x00004044, 0x00000000}, }; -static const u32 ar9280PciePhy_awow[][2] = { - /* Addr allmodes */ - {0x00004040, 0x9248fd00}, - {0x00004040, 0x24924924}, - {0x00004040, 0xa8000019}, - {0x00004040, 0x13160820}, - {0x00004040, 0xe5980560}, - {0x00004040, 0xc01dcffd}, - {0x00004040, 0x1aaabe41}, - {0x00004040, 0xbe105554}, - {0x00004040, 0x00043007}, - {0x00004044, 0x00000000}, -}; - static const u32 ar9285Modes_9285_1_2[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 301bf72c53bf..5163abd3937c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -469,6 +469,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_status = 0; rxs->rs_flags = 0; + rxs->flag = 0; rxs->rs_datalen = rxsp->status2 & AR_DataLen; rxs->rs_tstamp = rxsp->status3; @@ -493,8 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0; rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0; rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7); - rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0; - rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0; + rxs->flag |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0; + rxs->flag |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0; rxs->evm0 = rxsp->status6; rxs->evm1 = rxsp->status7; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 09c1f9da67a0..6343cc91953e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -454,6 +454,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) if (accum_cnt <= thresh_accum_cnt) continue; + max_index++; + /* sum(tx amplitude) */ accum_tx = ((data_L[i] >> 16) & 0xffff) | ((data_U[i] & 0x7ff) << 16); @@ -468,20 +470,21 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain) accum_tx <<= scale_factor; accum_rx <<= scale_factor; - x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >> - scale_factor; + x_est[max_index] = + (((accum_tx + accum_cnt) / accum_cnt) + 32) >> + scale_factor; - Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >> + Y[max_index] = + ((((accum_rx + accum_cnt) / accum_cnt) + 32) >> scale_factor) + - (1 << scale_factor) * max_index + 16; + (1 << scale_factor) * i + 16; if (accum_ang >= (1 << 26)) accum_ang -= 1 << 27; - theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) / - accum_cnt; - - max_index++; + theta[max_index] = + ((accum_ang * (1 << scale_factor)) + accum_cnt) / + accum_cnt; } /* diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 2bf6548dd143..0d053503b8bf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -904,7 +904,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_channel *chan = ah->curchan; - struct ar5416AniState *aniState = &chan->ani; + struct ar5416AniState *aniState = &ah->ani; s32 value, value2; switch (cmd & ah->ani_function) { @@ -1172,7 +1172,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) struct ath9k_ani_default *iniDef; u32 val; - aniState = &ah->curchan->ani; + aniState = &ah->ani; iniDef = &aniState->iniDef; ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n", @@ -1213,7 +1213,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah) /* these levels just got reset to defaults by the INI */ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; - aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; + aniState->ofdmWeakSigDetect = true; aniState->mrcCCK = true; } diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 8a1888d02070..579ed9c40b3f 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -646,6 +646,7 @@ enum sc_op_flags { SC_OP_ANI_RUN, SC_OP_PRIM_STA_VIF, SC_OP_HW_RESET, + SC_OP_SCANNING, }; /* Powersave flags */ @@ -759,7 +760,6 @@ struct ath_softc { struct rchan *rfs_chan_spec_scan; enum spectral_mode spectral_mode; struct ath_spec_scan spec_config; - int scanning; #ifdef CONFIG_PM_SLEEP atomic_t wow_got_bmiss_intr; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 2ff570f7f8ff..fd1eebab8647 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -39,7 +39,8 @@ static void ath9k_beaconq_config(struct ath_softc *sc) ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); - if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { + if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || + sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { /* Always burst out beacon and CAB traffic. */ qi.tqi_aifs = 1; qi.tqi_cwmin = 0; @@ -273,7 +274,8 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc) u64 tsf; int slot; - if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) { + if (sc->sc_ah->opmode != NL80211_IFTYPE_AP && + sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) { ath_dbg(common, BEACON, "slot 0, tsf: %llu\n", ath9k_hw_gettsf64(sc->sc_ah)); return 0; @@ -765,10 +767,10 @@ void ath9k_set_beacon(struct ath_softc *sc) switch (sc->sc_ah->opmode) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: ath9k_beacon_config_ap(sc, cur_conf); break; case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: ath9k_beacon_config_adhoc(sc, cur_conf); break; case NL80211_IFTYPE_STATION: diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index e6307b86363a..7852f6e59964 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -173,25 +173,69 @@ static const struct file_operations fops_rx_chainmask = { .llseek = default_llseek, }; -static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf, +static ssize_t read_file_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - char buf[32]; - unsigned int len; + struct ath_hw *ah = sc->sc_ah; + unsigned int len = 0, size = 1024; + ssize_t retval = 0; + char *buf; - len = sprintf(buf, "%d\n", common->disable_ani); - return simple_read_from_buffer(user_buf, count, ppos, buf, len); + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + if (common->disable_ani) { + len += snprintf(buf + len, size - len, "%s: %s\n", + "ANI", "DISABLED"); + goto exit; + } + + len += snprintf(buf + len, size - len, "%15s: %s\n", + "ANI", "ENABLED"); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "ANI RESET", ah->stats.ast_ani_reset); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "SPUR UP", ah->stats.ast_ani_spurup); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "SPUR DOWN", ah->stats.ast_ani_spurup); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "MRC-CCK ON", ah->stats.ast_ani_ccklow); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "MRC-CCK OFF", ah->stats.ast_ani_cckhigh); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "FIR-STEP UP", ah->stats.ast_ani_stepup); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "FIR-STEP DOWN", ah->stats.ast_ani_stepdown); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs); + len += snprintf(buf + len, size - len, "%15s: %u\n", + "CCK ERRORS", ah->stats.ast_ani_cckerrs); +exit: + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; } -static ssize_t write_file_disable_ani(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t write_file_ani(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - unsigned long disable_ani; + unsigned long ani; char buf[32]; ssize_t len; @@ -200,12 +244,15 @@ static ssize_t write_file_disable_ani(struct file *file, return -EFAULT; buf[len] = '\0'; - if (strict_strtoul(buf, 0, &disable_ani)) + if (strict_strtoul(buf, 0, &ani)) return -EINVAL; - common->disable_ani = !!disable_ani; + if (ani < 0 || ani > 1) + return -EINVAL; + + common->disable_ani = !ani; - if (disable_ani) { + if (common->disable_ani) { clear_bit(SC_OP_ANI_RUN, &sc->sc_flags); ath_stop_ani(sc); } else { @@ -215,9 +262,9 @@ static ssize_t write_file_disable_ani(struct file *file, return count; } -static const struct file_operations fops_disable_ani = { - .read = read_file_disable_ani, - .write = write_file_disable_ani, +static const struct file_operations fops_ani = { + .read = read_file_ani, + .write = write_file_ani, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, @@ -738,8 +785,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, struct ath_txq *txq, unsigned int flags) { -#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\ - [sc->debug.tsidx].c) int qnum = txq->axq_qnum; TX_STAT_INC(qnum, tx_pkts_all); @@ -771,37 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, TX_STAT_INC(qnum, data_underrun); if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) TX_STAT_INC(qnum, delim_underrun); - -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock(&sc->debug.samp_lock); - TX_SAMP_DBG(jiffies) = jiffies; - TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0; - TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1; - TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2; - TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0; - TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1; - TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2; - TX_SAMP_DBG(rateindex) = ts->ts_rateindex; - TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK); - TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry; - TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry; - TX_SAMP_DBG(rssi) = ts->ts_rssi; - TX_SAMP_DBG(tid) = ts->tid; - TX_SAMP_DBG(qid) = ts->qid; - - if (ts->ts_flags & ATH9K_TX_BA) { - TX_SAMP_DBG(ba_low) = ts->ba_low; - TX_SAMP_DBG(ba_high) = ts->ba_high; - } else { - TX_SAMP_DBG(ba_low) = 0; - TX_SAMP_DBG(ba_high) = 0; - } - - sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES; - spin_unlock(&sc->debug.samp_lock); -#endif - -#undef TX_SAMP_DBG } static const struct file_operations fops_xmit = { @@ -915,8 +929,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) { #define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ -#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\ - [sc->debug.rsidx].c) RX_STAT_INC(rx_pkts_all); sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen; @@ -940,27 +952,7 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) RX_PHY_ERR_INC(rs->rs_phyerr); } -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock(&sc->debug.samp_lock); - RX_SAMP_DBG(jiffies) = jiffies; - RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0; - RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1; - RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2; - RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0; - RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1; - RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2; - RX_SAMP_DBG(antenna) = rs->rs_antenna; - RX_SAMP_DBG(rssi) = rs->rs_rssi; - RX_SAMP_DBG(rate) = rs->rs_rate; - RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon; - - sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES; - spin_unlock(&sc->debug.samp_lock); - -#endif - #undef RX_PHY_ERR_INC -#undef RX_SAMP_DBG } static const struct file_operations fops_recv = { @@ -1485,283 +1477,6 @@ static const struct file_operations fops_modal_eeprom = { .llseek = default_llseek, }; -#ifdef CONFIG_ATH9K_MAC_DEBUG - -void ath9k_debug_samp_bb_mac(struct ath_softc *sc) -{ -#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c) - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - unsigned long flags; - int i; - - ath9k_ps_wakeup(sc); - - spin_lock_bh(&sc->debug.samp_lock); - - spin_lock_irqsave(&common->cc_lock, flags); - ath_hw_cycle_counters_update(common); - - ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles; - ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy; - ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame; - ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame; - spin_unlock_irqrestore(&common->cc_lock, flags); - - ATH_SAMP_DBG(noise) = ah->noise; - - REG_WRITE_D(ah, AR_MACMISC, - ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | - (AR_MACMISC_MISC_OBS_BUS_1 << - AR_MACMISC_MISC_OBS_BUS_MSB_S))); - - for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) - ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah, - AR_DMADBG_0 + (i * sizeof(u32))); - - ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1); - ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR); - - memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist, - sizeof(ATH_SAMP_DBG(nfCalHist))); - - sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES; - spin_unlock_bh(&sc->debug.samp_lock); - ath9k_ps_restore(sc); - -#undef ATH_SAMP_DBG -} - -static int open_file_bb_mac_samps(struct inode *inode, struct file *file) -{ -#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c - struct ath_softc *sc = inode->i_private; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ieee80211_conf *conf = &common->hw->conf; - struct ath_dbg_bb_mac_samp *bb_mac_samp; - struct ath9k_nfcal_hist *h; - int i, j, qcuOffset = 0, dcuOffset = 0; - u32 *qcuBase, *dcuBase, size = 30000, len = 0; - u32 sampidx = 0; - u8 *buf; - u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; - u8 nread; - - if (test_bit(SC_OP_INVALID, &sc->sc_flags)) - return -EAGAIN; - - buf = vmalloc(size); - if (!buf) - return -ENOMEM; - bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES); - if (!bb_mac_samp) { - vfree(buf); - return -ENOMEM; - } - /* Account the current state too */ - ath9k_debug_samp_bb_mac(sc); - - spin_lock_bh(&sc->debug.samp_lock); - memcpy(bb_mac_samp, sc->debug.bb_mac_samp, - sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES); - len += snprintf(buf + len, size - len, - "Current Sample Index: %d\n", sc->debug.sampidx); - spin_unlock_bh(&sc->debug.samp_lock); - - len += snprintf(buf + len, size - len, - "Raw DMA Debug Dump:\n"); - len += snprintf(buf + len, size - len, "Sample |\t"); - for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) - len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i); - len += snprintf(buf + len, size - len, "\n"); - - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - len += snprintf(buf + len, size - len, "%d\t", sampidx); - - for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) - len += snprintf(buf + len, size - len, " %08x\t", - ATH_SAMP_DBG(dma_dbg_reg_vals[i])); - len += snprintf(buf + len, size - len, "\n"); - } - len += snprintf(buf + len, size - len, "\n"); - - len += snprintf(buf + len, size - len, - "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]); - dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]); - - for (i = 0; i < ATH9K_NUM_QUEUES; i++, - qcuOffset += 4, dcuOffset += 5) { - if (i == 8) { - qcuOffset = 0; - qcuBase++; - } - - if (i == 6) { - dcuOffset = 0; - dcuBase++; - } - if (!sc->debug.stats.txstats[i].queued) - continue; - - len += snprintf(buf + len, size - len, - "%4d %7d %2x %1x %2x %2x\n", - sampidx, i, - (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, - (*qcuBase & (0x8 << qcuOffset)) >> - (qcuOffset + 3), - ATH_SAMP_DBG(dma_dbg_reg_vals[2]) & - (0x7 << (i * 3)) >> (i * 3), - (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); - } - len += snprintf(buf + len, size - len, "\n"); - } - len += snprintf(buf + len, size - len, - "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp " - "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 " - "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n"); - - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]); - dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]); - - len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx, - (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18, - (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22); - len += snprintf(buf + len, size - len, "%7x %8x ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3)); - len += snprintf(buf + len, size - len, "%7x %7x ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25, - (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27); - len += snprintf(buf + len, size - len, "%7d %12d ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10); - len += snprintf(buf + len, size - len, "%12d %12d ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12); - len += snprintf(buf + len, size - len, "%12d %12d ", - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13, - (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17); - len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n", - ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr)); - } - - len += snprintf(buf + len, size - len, - "Sample ChNoise Chain privNF #Reading Readings\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - h = ATH_SAMP_DBG(nfCalHist); - if (!ATH_SAMP_DBG(noise)) - continue; - - for (i = 0; i < NUM_NF_READINGS; i++) { - if (!(chainmask & (1 << i)) || - ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))) - continue; - - nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - - h[i].invalidNFcount; - len += snprintf(buf + len, size - len, - "%4d %5d %4d\t %d\t %d\t", - sampidx, ATH_SAMP_DBG(noise), - i, h[i].privNF, nread); - for (j = 0; j < nread; j++) - len += snprintf(buf + len, size - len, - " %d", h[i].nfCalBuffer[j]); - len += snprintf(buf + len, size - len, "\n"); - } - } - len += snprintf(buf + len, size - len, "\nCycle counters:\n" - "Sample Total Rxbusy Rxframes Txframes\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - if (!ATH_SAMP_DBG(cc.cycles)) - continue; - len += snprintf(buf + len, size - len, - "%4d %08x %08x %08x %08x\n", - sampidx, ATH_SAMP_DBG(cc.cycles), - ATH_SAMP_DBG(cc.rx_busy), - ATH_SAMP_DBG(cc.rx_frame), - ATH_SAMP_DBG(cc.tx_frame)); - } - - len += snprintf(buf + len, size - len, "Tx status Dump :\n"); - len += snprintf(buf + len, size - len, - "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb " - "isok rts_fail data_fail rate tid qid " - "ba_low ba_high tx_before(ms)\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) { - if (!ATH_SAMP_DBG(ts[i].jiffies)) - continue; - len += snprintf(buf + len, size - len, "%-14d" - "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d " - "%-9d %-4d %-3d %-3d %08x %08x %-11d\n", - sampidx, - ATH_SAMP_DBG(ts[i].rssi_ctl0), - ATH_SAMP_DBG(ts[i].rssi_ctl1), - ATH_SAMP_DBG(ts[i].rssi_ctl2), - ATH_SAMP_DBG(ts[i].rssi_ext0), - ATH_SAMP_DBG(ts[i].rssi_ext1), - ATH_SAMP_DBG(ts[i].rssi_ext2), - ATH_SAMP_DBG(ts[i].rssi), - ATH_SAMP_DBG(ts[i].isok), - ATH_SAMP_DBG(ts[i].rts_fail_cnt), - ATH_SAMP_DBG(ts[i].data_fail_cnt), - ATH_SAMP_DBG(ts[i].rateindex), - ATH_SAMP_DBG(ts[i].tid), - ATH_SAMP_DBG(ts[i].qid), - ATH_SAMP_DBG(ts[i].ba_low), - ATH_SAMP_DBG(ts[i].ba_high), - jiffies_to_msecs(jiffies - - ATH_SAMP_DBG(ts[i].jiffies))); - } - } - - len += snprintf(buf + len, size - len, "Rx status Dump :\n"); - len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 " - "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n"); - for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) { - for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) { - if (!ATH_SAMP_DBG(rs[i].jiffies)) - continue; - len += snprintf(buf + len, size - len, "%-14d" - "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n", - sampidx, - ATH_SAMP_DBG(rs[i].rssi_ctl0), - ATH_SAMP_DBG(rs[i].rssi_ctl1), - ATH_SAMP_DBG(rs[i].rssi_ctl2), - ATH_SAMP_DBG(rs[i].rssi_ext0), - ATH_SAMP_DBG(rs[i].rssi_ext1), - ATH_SAMP_DBG(rs[i].rssi_ext2), - ATH_SAMP_DBG(rs[i].rssi), - ATH_SAMP_DBG(rs[i].is_mybeacon) ? - "True" : "False", - ATH_SAMP_DBG(rs[i].antenna), - ATH_SAMP_DBG(rs[i].rate), - jiffies_to_msecs(jiffies - - ATH_SAMP_DBG(rs[i].jiffies))); - } - } - - vfree(bb_mac_samp); - file->private_data = buf; - - return 0; -#undef ATH_SAMP_DBG -} - -static const struct file_operations fops_samps = { - .open = open_file_bb_mac_samps, - .read = ath9k_debugfs_read_buf, - .release = ath9k_debugfs_release_buf, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - -#endif - #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT static ssize_t read_file_btcoex(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -2051,8 +1766,8 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, sc, &fops_rx_chainmask); debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_tx_chainmask); - debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, - sc->debug.debugfs_phy, sc, &fops_disable_ani); + debugfs_create_file("ani", S_IRUSR | S_IWUSR, + sc->debug.debugfs_phy, sc, &fops_ani); debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->config.enable_paprd); debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, @@ -2087,11 +1802,6 @@ int ath9k_init_debug(struct ath_hw *ah) debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_spectral_fft_period); - -#ifdef CONFIG_ATH9K_MAC_DEBUG - debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc, - &fops_samps); -#endif debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 794a7ec83a24..9719378f7c73 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -251,56 +251,10 @@ struct ath_stats { u32 reset[__RESET_TYPE_MAX]; }; -#define ATH_DBG_MAX_SAMPLES 10 -struct ath_dbg_bb_mac_samp { - u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS]; - u32 pcu_obs, pcu_cr, noise; - struct { - u64 jiffies; - int8_t rssi_ctl0; - int8_t rssi_ctl1; - int8_t rssi_ctl2; - int8_t rssi_ext0; - int8_t rssi_ext1; - int8_t rssi_ext2; - int8_t rssi; - bool isok; - u8 rts_fail_cnt; - u8 data_fail_cnt; - u8 rateindex; - u8 qid; - u8 tid; - u32 ba_low; - u32 ba_high; - } ts[ATH_DBG_MAX_SAMPLES]; - struct { - u64 jiffies; - int8_t rssi_ctl0; - int8_t rssi_ctl1; - int8_t rssi_ctl2; - int8_t rssi_ext0; - int8_t rssi_ext1; - int8_t rssi_ext2; - int8_t rssi; - bool is_mybeacon; - u8 antenna; - u8 rate; - } rs[ATH_DBG_MAX_SAMPLES]; - struct ath_cycle_counters cc; - struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; -}; - struct ath9k_debug { struct dentry *debugfs_phy; u32 regidx; struct ath_stats stats; -#ifdef CONFIG_ATH9K_MAC_DEBUG - spinlock_t samp_lock; - struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES]; - u8 sampidx; - u8 tsidx; - u8 rsidx; -#endif }; int ath9k_init_debug(struct ath_hw *ah); @@ -359,17 +313,4 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc, #endif /* CONFIG_ATH9K_DEBUGFS */ -#ifdef CONFIG_ATH9K_MAC_DEBUG - -void ath9k_debug_samp_bb_mac(struct ath_softc *sc); - -#else - -static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc) -{ -} - -#endif - - #endif /* DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index d3b099d7898b..0085e643132f 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -208,6 +208,9 @@ struct ath9k_htc_target_rx_stats { case NL80211_IFTYPE_AP: \ _priv->num_ap_vif++; \ break; \ + case NL80211_IFTYPE_MESH_POINT: \ + _priv->num_mbss_vif++; \ + break; \ default: \ break; \ } \ @@ -224,6 +227,9 @@ struct ath9k_htc_target_rx_stats { case NL80211_IFTYPE_AP: \ _priv->num_ap_vif--; \ break; \ + case NL80211_IFTYPE_MESH_POINT: \ + _priv->num_mbss_vif--; \ + break; \ default: \ break; \ } \ @@ -450,6 +456,7 @@ struct ath9k_htc_priv { u8 sta_slot; u8 vif_sta_pos[ATH9K_HTC_MAX_VIF]; u8 num_ibss_vif; + u8 num_mbss_vif; u8 num_sta_vif; u8 num_sta_assoc_vif; u8 num_ap_vif; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index f13f458dd656..e0c03bd64182 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -28,7 +28,8 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv) ath9k_hw_get_txq_props(ah, priv->beaconq, &qi); - if (priv->ah->opmode == NL80211_IFTYPE_AP) { + if (priv->ah->opmode == NL80211_IFTYPE_AP || + priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) { qi.tqi_aifs = 1; qi.tqi_cwmin = 0; qi.tqi_cwmax = 0; @@ -628,6 +629,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, case NL80211_IFTYPE_ADHOC: ath9k_htc_beacon_config_adhoc(priv, cur_conf); break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: ath9k_htc_beacon_config_ap(priv, cur_conf); break; @@ -649,6 +651,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv) case NL80211_IFTYPE_ADHOC: ath9k_htc_beacon_config_adhoc(priv, cur_conf); break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: ath9k_htc_beacon_config_ap(priv, cur_conf); break; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index a47f5e05fc04..59f64367e8ca 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -698,7 +698,8 @@ static const struct ieee80211_iface_limit if_limits[] = { { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) }, { .max = 2, .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) }, + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_MESH_POINT) }, }; static const struct ieee80211_iface_combination if_comb = { @@ -721,6 +722,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; hw->wiphy->interface_modes = @@ -728,7 +730,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_CLIENT); + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_MESH_POINT); hw->wiphy->iface_combinations = &if_comb; hw->wiphy->n_iface_combinations = 1; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 0743a47cef8f..34869c2405aa 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -113,7 +113,9 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) struct ath9k_htc_priv *priv = data; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon) + if ((vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT) && + bss_conf->enable_beacon) priv->reconfig_beacon = true; if (bss_conf->assoc) { @@ -180,6 +182,8 @@ static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv) priv->ah->opmode = NL80211_IFTYPE_ADHOC; else if (priv->num_ap_vif) priv->ah->opmode = NL80211_IFTYPE_AP; + else if (priv->num_mbss_vif) + priv->ah->opmode = NL80211_IFTYPE_MESH_POINT; else priv->ah->opmode = NL80211_IFTYPE_STATION; @@ -810,8 +814,7 @@ void ath9k_htc_ani_work(struct work_struct *work) } /* Verify whether we must check ANI */ - if (ah->config.enable_ani && - (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { + if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -841,8 +844,7 @@ set_timer: * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; - if (ah->config.enable_ani) - cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); + cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); @@ -1052,6 +1054,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_AP: hvif.opmode = HTC_M_HOSTAP; break; + case NL80211_IFTYPE_MESH_POINT: + hvif.opmode = HTC_M_WDS; /* close enough */ + break; default: ath_err(common, "Interface type %d not yet supported\n", vif->type); @@ -1084,6 +1089,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, INC_VIF(priv, vif->type); if ((vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_MESH_POINT) || (vif->type == NL80211_IFTYPE_ADHOC)) ath9k_htc_assign_bslot(priv, vif); @@ -1134,6 +1140,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, DEC_VIF(priv, vif->type); if ((vif->type == NL80211_IFTYPE_AP) || + vif->type == NL80211_IFTYPE_MESH_POINT || (vif->type == NL80211_IFTYPE_ADHOC)) ath9k_htc_remove_bslot(priv, vif); @@ -1525,9 +1532,10 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) { /* * Disable SWBA interrupt only if there are no - * AP/IBSS interfaces. + * concurrent AP/mesh or IBSS interfaces. */ - if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { + if ((priv->num_ap_vif + priv->num_mbss_vif <= 1) || + priv->num_ibss_vif) { ath_dbg(common, CONFIG, "Beacon disabled for BSS: %pM\n", bss_conf->bssid); @@ -1538,12 +1546,15 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON_INT) { /* - * Reset the HW TSF for the first AP interface. + * Reset the HW TSF for the first AP or mesh interface. */ - if ((priv->ah->opmode == NL80211_IFTYPE_AP) && - (priv->nvifs == 1) && - (priv->num_ap_vif == 1) && - (vif->type == NL80211_IFTYPE_AP)) { + if (priv->nvifs == 1 && + ((priv->ah->opmode == NL80211_IFTYPE_AP && + vif->type == NL80211_IFTYPE_AP && + priv->num_ap_vif == 1) || + (priv->ah->opmode == NL80211_IFTYPE_MESH_POINT && + vif->type == NL80211_IFTYPE_MESH_POINT && + priv->num_mbss_vif == 1))) { set_bit(OP_TSF_RESET, &priv->op_flags); } ath_dbg(common, CONFIG, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 6bd0e92ea2aa..e602c9519709 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -887,7 +887,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) if (priv->rxfilter & FIF_PSPOLL) rfilt |= ATH9K_RX_FILTER_PSPOLL; - if (priv->nvifs > 1) + if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS) rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; return rfilt; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 7f25da8444fe..a13f6cea2ba7 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -452,7 +452,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.pcie_clock_req = 0; ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; - ah->config.enable_ani = true; for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { ah->config.spurchans[i][0] = AR_NO_SPUR; @@ -549,8 +548,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); - if (ah->config.enable_ani) - ath9k_hw_ani_init(ah); + ath9k_hw_ani_init(ah); return 0; } @@ -1245,10 +1243,10 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) switch (opmode) { case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: set |= AR_STA_ID1_ADHOC; REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: set |= AR_STA_ID1_STA_AP; /* fall through */ @@ -2246,12 +2244,12 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) switch (ah->opmode) { case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_MESH_POINT: REG_SET_BIT(ah, AR_TXCFG, AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon + TU_TO_USEC(ah->atim_window ? ah->atim_window : 1)); flags |= AR_NDP_TIMER_EN; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon - @@ -2595,13 +2593,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->hw_caps |= ATH9K_HW_CAP_RTT; } - if (AR_SREV_9280_20_OR_LATER(ah)) { - pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE | - ATH9K_HW_WOW_PATTERN_MATCH_EXACT; - - if (AR_SREV_9280(ah)) - pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD; - } + if (AR_SREV_9462(ah)) + pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE; if (AR_SREV_9300_20_OR_LATER(ah) && ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index ae3034374bc4..7d259b7dc254 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -246,9 +246,7 @@ enum ath9k_hw_caps { ATH9K_HW_CAP_MCI = BIT(15), ATH9K_HW_CAP_DFS = BIT(16), ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), - ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18), - ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19), - ATH9K_HW_CAP_PAPRD = BIT(20), + ATH9K_HW_CAP_PAPRD = BIT(18), }; /* @@ -291,7 +289,6 @@ struct ath9k_ops_config { u32 ofdm_trig_high; u32 cck_trig_high; u32 cck_trig_low; - u32 enable_ani; u32 enable_paprd; int serialize_regmode; bool rx_intr_mitigation; @@ -423,7 +420,6 @@ struct ath9k_hw_cal_data { struct ath9k_channel { struct ieee80211_channel *chan; - struct ar5416AniState ani; u16 channel; u32 channelFlags; u32 chanmode; @@ -854,10 +850,10 @@ struct ath_hw { u32 globaltxtimeout; /* ANI */ - u32 proc_phyerr; u32 aniperiod; enum ath9k_ani_cmd ani_function; u32 ani_skip_count; + struct ar5416AniState ani; #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT struct ath_btcoex_hw btcoex_hw; @@ -882,9 +878,6 @@ struct ath_hw { struct ar5416IniArray iniBank6; struct ar5416IniArray iniAddac; struct ar5416IniArray iniPcieSerdes; -#ifdef CONFIG_PM_SLEEP - struct ar5416IniArray iniPcieSerdesWow; -#endif struct ar5416IniArray iniPcieSerdesLowPower; struct ar5416IniArray iniModesFastClock; struct ar5416IniArray iniAdditional; @@ -1165,8 +1158,6 @@ static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) } #endif - - #define ATH9K_CLOCK_RATE_CCK 22 #define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 #define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 0237b2868961..f359b0d7078d 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -21,6 +21,7 @@ #include <linux/ath9k_platform.h> #include <linux/module.h> #include <linux/relay.h> +#include <net/ieee80211_radiotap.h> #include "ath9k.h" @@ -613,9 +614,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); mutex_init(&sc->mutex); -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock_init(&sc->debug.samp_lock); -#endif tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, (unsigned long)sc); @@ -755,6 +753,15 @@ static const struct ieee80211_iface_combination if_comb[] = { } }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support ath9k_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .n_patterns = MAX_NUM_USER_PATTERN, + .pattern_min_len = 1, + .pattern_max_len = MAX_PATTERN_SIZE, +}; +#endif + void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) { struct ath_hw *ah = sc->sc_ah; @@ -769,12 +776,19 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_RC_TABLE; - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + + if (AR_SREV_9280_20_OR_LATER(ah)) + hw->radiotap_mcs_details |= + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + } if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) hw->flags |= IEEE80211_HW_MFP_CAPABLE; + hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -795,21 +809,12 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; #ifdef CONFIG_PM_SLEEP - if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && - device_can_wakeup(sc->dev)) { - - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT; - hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; - hw->wiphy->wowlan.pattern_min_len = 1; - hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; - - } + device_can_wakeup(sc->dev)) + hw->wiphy->wowlan = &ath9k_wowlan_support; atomic_set(&sc->wow_sleep_proc_intr, -1); atomic_set(&sc->wow_got_bmiss_intr, -1); - #endif hw->queues = 4; diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 849259b07370..fff5d3ccc663 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -390,9 +390,7 @@ void ath_ani_calibrate(unsigned long data) } /* Verify whether we must check ANI */ - if (sc->sc_ah->config.enable_ani - && (timestamp - common->ani.checkani_timer) >= - ah->config.ani_poll_interval) { + if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) { aniflag = true; common->ani.checkani_timer = timestamp; } @@ -418,7 +416,6 @@ void ath_ani_calibrate(unsigned long data) longcal ? "long" : "", shortcal ? "short" : "", aniflag ? "ani" : "", common->ani.caldone ? "true" : "false"); - ath9k_debug_samp_bb_mac(sc); ath9k_ps_restore(sc); set_timer: @@ -428,9 +425,7 @@ set_timer: * short calibration and long calibration. */ cal_interval = ATH_LONG_CALINTERVAL; - if (sc->sc_ah->config.enable_ani) - cal_interval = min(cal_interval, - (u32)ah->config.ani_poll_interval); + cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval); if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 498fee04afa0..d055e389abfc 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -547,6 +547,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_status = 0; rs->rs_flags = 0; + rs->flag = 0; rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; rs->rs_tstamp = ads.AR_RcvTimestamp; @@ -586,10 +587,17 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); - rs->rs_flags = - (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; - rs->rs_flags |= - (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; + + /* directly mapped flags for ieee80211_rx_status */ + rs->flag |= + (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0; + rs->flag |= + (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0; + if (AR_SREV_9280_20_OR_LATER(ah)) + rs->flag |= + (ads.ds_rxstatus3 & AR_STBC) ? + /* we can only Nss=1 STBC */ + (1 << RX_FLAG_STBC_SHIFT) : 0; if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 5865f92998e1..b02dfce964b4 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -149,6 +149,7 @@ struct ath_rx_status { u32 evm2; u32 evm3; u32 evm4; + u32 flag; /* see enum mac80211_rx_flags */ }; struct ath_htc_rx_status { @@ -533,7 +534,8 @@ struct ar5416_desc { #define AR_2040 0x00000002 #define AR_Parallel40 0x00000004 #define AR_Parallel40_S 2 -#define AR_RxStatusRsvd30 0x000000f8 +#define AR_STBC 0x00000008 /* on ar9280 and later */ +#define AR_RxStatusRsvd30 0x000000f0 #define AR_RxAntenna 0xffffff00 #define AR_RxAntenna_S 8 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a18414b5948b..cc5a98b8d187 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -193,7 +193,6 @@ static bool ath_prepare_reset(struct ath_softc *sc) ath_stop_ani(sc); del_timer_sync(&sc->rx_poll_timer); - ath9k_debug_samp_bb_mac(sc); ath9k_hw_disable_interrupts(ah); if (!ath_drain_all_txq(sc)) @@ -1273,7 +1272,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) curchan->center_freq); } else { /* perform spectral scan if requested. */ - if (sc->scanning && + if (test_bit(SC_OP_SCANNING, &sc->sc_flags) && sc->spectral_mode == SPECTRAL_CHANSCAN) ath9k_spectral_scan_trigger(hw); } @@ -1689,7 +1688,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; int ret = 0; - local_bh_disable(); + mutex_lock(&sc->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: @@ -1720,7 +1719,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n"); } - local_bh_enable(); + mutex_unlock(&sc->mutex); return ret; } @@ -2004,7 +2003,6 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_capabilities *pcaps = &ah->caps; int pattern_count = 0; int i, byte_cnt; u8 dis_deauth_pattern[MAX_PATTERN_SIZE]; @@ -2074,36 +2072,9 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc) /* Create Disassociate pattern mask */ - if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) { - - if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) { - /* - * for AR9280, because of hardware limitation, the - * first 4 bytes have to be matched for all patterns. - * the mask for disassociation and de-auth pattern - * matching need to enable the first 4 bytes. - * also the duration field needs to be filled. - */ - dis_deauth_mask[0] = 0xf0; - - /* - * fill in duration field - FIXME: what is the exact value ? - */ - dis_deauth_pattern[2] = 0xff; - dis_deauth_pattern[3] = 0xff; - } else { - dis_deauth_mask[0] = 0xfe; - } - - dis_deauth_mask[1] = 0x03; - dis_deauth_mask[2] = 0xc0; - } else { - dis_deauth_mask[0] = 0xef; - dis_deauth_mask[1] = 0x3f; - dis_deauth_mask[2] = 0x00; - dis_deauth_mask[3] = 0xfc; - } + dis_deauth_mask[0] = 0xfe; + dis_deauth_mask[1] = 0x03; + dis_deauth_mask[2] = 0xc0; ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n"); @@ -2339,15 +2310,13 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled) static void ath9k_sw_scan_start(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; - - sc->scanning = 1; + set_bit(SC_OP_SCANNING, &sc->sc_flags); } static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; - - sc->scanning = 0; + clear_bit(SC_OP_SCANNING, &sc->sc_flags); } struct ieee80211_ops ath9k_ops = { diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 8be2b5d8c155..865e043e8aa6 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -868,10 +868,7 @@ static int ath9k_process_rate(struct ath_common *common, if (rx_stats->rs_rate & 0x80) { /* HT rate */ rxs->flag |= RX_FLAG_HT; - if (rx_stats->rs_flags & ATH9K_RX_2040) - rxs->flag |= RX_FLAG_40MHZ; - if (rx_stats->rs_flags & ATH9K_RX_GI) - rxs->flag |= RX_FLAG_SHORT_GI; + rxs->flag |= rx_stats->flag; rxs->rate_idx = rx_stats->rs_rate & 0x7f; return 0; } @@ -958,11 +955,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (rx_stats->rs_more) return 0; - ath9k_process_rssi(common, hw, hdr, rx_stats); - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) return -EINVAL; + ath9k_process_rssi(common, hw, hdr, rx_stats); + rx_status->band = hw->conf.chandef.chan->band; rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->signal = ah->noise + rx_stats->rs_rssi; diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c index 9f8563091bea..81c88dd606dc 100644 --- a/drivers/net/wireless/ath/ath9k/wow.c +++ b/drivers/net/wireless/ath/ath9k/wow.c @@ -34,17 +34,6 @@ const char *ath9k_hw_wow_event_to_string(u32 wow_event) } EXPORT_SYMBOL(ath9k_hw_wow_event_to_string); -static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah) -{ - int i; - - for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++) - REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0), - INI_RA(&ah->iniPcieSerdesWow, i, 1)); - - usleep_range(1000, 1500); -} - static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); @@ -58,15 +47,8 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)); return; - } else { - if (!AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, AR_RXDP, 0x0); } - /* AR9280 WoW has sleep issue, do not set it to sleep */ - if (AR_SREV_9280_20(ah)) - return; - REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); } @@ -84,27 +66,16 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah) /* set the transmit buffer */ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16)); - - if (!(AR_SREV_9300_20_OR_LATER(ah))) - ctl[0] += (KAL_ANTENNA_MODE << 25); - ctl[1] = 0; ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */ ctl[4] = 0; ctl[7] = (ah->txchainmask) << 2; - - if (AR_SREV_9300_20_OR_LATER(ah)) - ctl[2] = 0xf << 16; /* tx_tries 0 */ - else - ctl[2] = 0x7 << 16; /* tx_tries 0 */ - + ctl[2] = 0xf << 16; /* tx_tries 0 */ for (i = 0; i < KAL_NUM_DESC_WORDS; i++) REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); - /* for AR9300 family 13 descriptor words */ - if (AR_SREV_9300_20_OR_LATER(ah)) - REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); + REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) | (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16); @@ -183,9 +154,6 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT); - if (!AR_SREV_9285_12_OR_LATER(ah)) - return; - if (pattern_count < 4) { /* Pattern 0-3 uses AR_WOW_LENGTH1 register */ set = (pattern_len & AR_WOW_LENGTH_MAX) << @@ -207,6 +175,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) { u32 wow_status = 0; u32 val = 0, rval; + /* * read the WoW status register to know * the wakeup reason @@ -223,19 +192,14 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) val &= ah->wow_event_mask; if (val) { - if (val & AR_WOW_MAGIC_PAT_FOUND) wow_status |= AH_WOW_MAGIC_PATTERN_EN; - if (AR_WOW_PATTERN_FOUND(val)) wow_status |= AH_WOW_USER_PATTERN_EN; - if (val & AR_WOW_KEEP_ALIVE_FAIL) wow_status |= AH_WOW_LINK_CHANGE; - if (val & AR_WOW_BEACON_FAIL) wow_status |= AH_WOW_BEACON_MISS; - } /* @@ -255,17 +219,6 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN))); /* - * tie reset register for AR9002 family of chipsets - * NB: not tieing it back might have some repurcussions. - */ - - if (!AR_SREV_9300_20_OR_LATER(ah)) { - REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN | - AR_WA_POR_SHORT | AR_WA_RESET_EN); - } - - - /* * restore the beacon threshold to init value */ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); @@ -277,8 +230,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) * reset to our Chip's Power On Reset so that any PCI-E * reset from the bus will not reset our chip */ - - if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress) + if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, false); ah->wow_event_mask = 0; @@ -298,7 +250,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * are from the 'pattern_enable' in this function and * 'pattern_count' of ath9k_hw_wow_apply_pattern() */ - wow_event_mask = ah->wow_event_mask; /* @@ -306,50 +257,15 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * WOW sleep, we do want the Reset from the PCI-E to disturb * our hw state */ - if (ah->is_pciexpress) { - /* * we need to untie the internal POR (power-on-reset) * to the external PCI-E reset. We also need to tie * the PCI-E Phy reset to the PCI-E reset. */ - - if (AR_SREV_9300_20_OR_LATER(ah)) { - set = AR_WA_RESET_EN | AR_WA_POR_SHORT; - clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE; - REG_RMW(ah, AR_WA, set, clr); - } else { - if (AR_SREV_9285(ah) || AR_SREV_9287(ah)) - set = AR9285_WA_DEFAULT; - else - set = AR9280_WA_DEFAULT; - - /* - * In AR9280 and AR9285, bit 14 in WA register - * (disable L1) should only be set when device - * enters D3 state and be cleared when device - * comes back to D0 - */ - - if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) - set |= AR_WA_D3_L1_DISABLE; - - clr = AR_WA_UNTIE_RESET_EN; - set |= AR_WA_RESET_EN | AR_WA_POR_SHORT; - REG_RMW(ah, AR_WA, set, clr); - - /* - * for WoW sleep, we reprogram the SerDes so that the - * PLL and CLK REQ are both enabled. This uses more - * power but otherwise WoW sleep is unstable and the - * chip may disappear. - */ - - if (AR_SREV_9285_12_OR_LATER(ah)) - ath9k_hw_config_serdes_wow_sleep(ah); - - } + set = AR_WA_RESET_EN | AR_WA_POR_SHORT; + clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE; + REG_RMW(ah, AR_WA, set, clr); } /* @@ -378,7 +294,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) * Program default values for pattern backoff, aifs/slot/KAL count, * beacon miss timeout, KAL timeout, etc. */ - set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF); REG_SET_BIT(ah, AR_WOW_PATTERN, set); @@ -398,7 +313,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) /* * Keep alive timo in ms except AR9280 */ - if (!pattern_enable || AR_SREV_9280(ah)) + if (!pattern_enable) set = AR_WOW_KEEP_ALIVE_NEVER; else set = KAL_TIMEOUT * 32; @@ -420,7 +335,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) /* * Configure MAC WoW Registers */ - set = 0; /* Send keep alive timeouts anyway */ clr = AR_WOW_KEEP_ALIVE_AUTO_DIS; @@ -430,16 +344,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) else set = AR_WOW_KEEP_ALIVE_FAIL_DIS; - /* - * FIXME: For now disable keep alive frame - * failure. This seems to sometimes trigger - * unnecessary wake up with AR9485 chipsets. - */ set = AR_WOW_KEEP_ALIVE_FAIL_DIS; - REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr); - /* * we are relying on a bmiss failure. ensure we have * enough threshold to prevent false positives @@ -473,14 +380,8 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) set |= AR_WOW_MAC_INTR_EN; REG_RMW(ah, AR_WOW_PATTERN, set, clr); - /* - * For AR9285 and later version of chipsets - * enable WoW pattern match for packets less - * than 256 bytes for all patterns - */ - if (AR_SREV_9285_12_OR_LATER(ah)) - REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B, - AR_WOW_PATTERN_SUPPORTED); + REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B, + AR_WOW_PATTERN_SUPPORTED); /* * Set the power states appropriately and enable PME @@ -488,43 +389,32 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) clr = 0; set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA; - /* - * This is needed for AR9300 chipsets to wake-up - * the host. - */ - if (AR_SREV_9300_20_OR_LATER(ah)) - clr = AR_PCIE_PM_CTRL_ENA; + clr = AR_PCIE_PM_CTRL_ENA; REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { - /* - * this is needed to prevent the chip waking up - * the host within 3-4 seconds with certain - * platform/BIOS. The fix is to enable - * D1 & D3 to match original definition and - * also match the OTP value. Anyway this - * is more related to SW WOW. - */ - clr = AR_PMCTRL_PWR_STATE_D1D3; - REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); - - set = AR_PMCTRL_PWR_STATE_D1D3_REAL; - REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); - } - + /* + * this is needed to prevent the chip waking up + * the host within 3-4 seconds with certain + * platform/BIOS. The fix is to enable + * D1 & D3 to match original definition and + * also match the OTP value. Anyway this + * is more related to SW WOW. + */ + clr = AR_PMCTRL_PWR_STATE_D1D3; + REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); + set = AR_PMCTRL_PWR_STATE_D1D3_REAL; + REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); - if (AR_SREV_9300_20_OR_LATER(ah)) { - /* to bring down WOW power low margin */ - set = BIT(13); - REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set); - /* HW WoW */ - clr = BIT(5); - REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr); - } + /* to bring down WOW power low margin */ + set = BIT(13); + REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set); + /* HW WoW */ + clr = BIT(5); + REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr); ath9k_hw_set_powermode_wow_sleep(ah); ah->wow_event_mask = wow_event_mask; diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index bac3d98a0cfb..5644ac54facc 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -27,3 +27,15 @@ config WIL6210_ISR_COR self-clear when accessed for debug purposes, it makes such monitoring impossible. Say y unless you debug interrupts + +config ATH6KL_TRACING + bool "wil6210 tracing support" + depends on WIL6210 + depends on EVENT_TRACING + default y + ---help--- + Say Y here to enable tracepoints for the wil6210 driver + using the kernel tracing infrastructure. Select this + option if you are interested in debugging the driver. + + If unsure, say Y to make it easier to debug problems. diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index d288eea0a26a..f891d514d881 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -1,15 +1,20 @@ obj-$(CONFIG_WIL6210) += wil6210.o -wil6210-objs := main.o -wil6210-objs += netdev.o -wil6210-objs += cfg80211.o -wil6210-objs += pcie_bus.o -wil6210-objs += debugfs.o -wil6210-objs += wmi.o -wil6210-objs += interrupt.o -wil6210-objs += txrx.o +wil6210-y := main.o +wil6210-y += netdev.o +wil6210-y += cfg80211.o +wil6210-y += pcie_bus.o +wil6210-y += debugfs.o +wil6210-y += wmi.o +wil6210-y += interrupt.o +wil6210-y += txrx.o +wil6210-y += debug.o +wil6210-$(CONFIG_WIL6210_TRACING) += trace.o ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) subdir-ccflags-y += -Werror endif +# for tracing framework to find trace.h +CFLAGS_trace.o := -I$(src) + subdir-ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index c5d4a87abaaf..4eb05d0818c3 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -322,12 +322,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, * FW don't support scan after connection attempt */ set_bit(wil_status_dontscan, &wil->status); + set_bit(wil_status_fwconnecting, &wil->status); rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); if (rc == 0) { /* Connect can take lots of time */ mod_timer(&wil->connect_timer, jiffies + msecs_to_jiffies(2000)); + } else { + clear_bit(wil_status_dontscan, &wil->status); + clear_bit(wil_status_fwconnecting, &wil->status); } out: diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c new file mode 100644 index 000000000000..9eeabf4a5879 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" +#include "trace.h" + +int wil_err(struct wil6210_priv *wil, const char *fmt, ...) +{ + struct net_device *ndev = wil_to_ndev(wil); + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret; + + va_start(args, fmt); + vaf.va = &args; + ret = netdev_err(ndev, "%pV", &vaf); + trace_wil6210_log_err(&vaf); + va_end(args); + + return ret; +} + +int wil_info(struct wil6210_priv *wil, const char *fmt, ...) +{ + struct net_device *ndev = wil_to_ndev(wil); + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + int ret; + + va_start(args, fmt); + vaf.va = &args; + ret = netdev_info(ndev, "%pV", &vaf); + trace_wil6210_log_info(&vaf); + va_end(args); + + return ret; +} + +int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + trace_wil6210_log_dbg(&vaf); + va_end(args); + + return 0; +} diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 727b1f53e6ad..e8308ec30970 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -418,9 +418,15 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) if (skb) { unsigned char printbuf[16 * 3 + 2]; int i = 0; - int len = skb_headlen(skb); + int len = le16_to_cpu(d->dma.length); void *p = skb->data; + if (len != skb_headlen(skb)) { + seq_printf(s, "!!! len: desc = %d skb = %d\n", + len, skb_headlen(skb)); + len = min_t(int, len, skb_headlen(skb)); + } + seq_printf(s, " len = %d\n", len); while (i < len) { diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index e3c1e7684f9c..8205d3e4ab66 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -17,6 +17,7 @@ #include <linux/interrupt.h> #include "wil6210.h" +#include "trace.h" /** * Theory of operation: @@ -103,14 +104,14 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) clear_bit(wil_status_irqen, &wil->status); } -static void wil6210_unmask_irq_tx(struct wil6210_priv *wil) +void wil6210_unmask_irq_tx(struct wil6210_priv *wil) { iowrite32(WIL6210_IMC_TX, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, IMC)); } -static void wil6210_unmask_irq_rx(struct wil6210_priv *wil) +void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { iowrite32(WIL6210_IMC_RX, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + @@ -168,6 +169,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_rx(isr); wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (!isr) { @@ -180,13 +182,14 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) { wil_dbg_irq(wil, "RX done\n"); isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE; - wil_rx_handle(wil); + wil_dbg_txrx(wil, "NAPI schedule\n"); + napi_schedule(&wil->napi_rx); } if (isr) wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); - wil6210_unmask_irq_rx(wil); + /* Rx IRQ will be enabled when NAPI processing finished */ return IRQ_HANDLED; } @@ -198,6 +201,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_tx(isr); wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (!isr) { @@ -208,23 +212,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) wil6210_mask_irq_tx(wil); if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { - uint i; wil_dbg_irq(wil, "TX done\n"); + napi_schedule(&wil->napi_tx); isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; - for (i = 0; i < 24; i++) { - u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i); - if (isr & mask) { - isr &= ~mask; - wil_dbg_irq(wil, "TX done(%i)\n", i); - wil_tx_complete(wil, i); - } - } + /* clear also all VRING interrupts */ + isr &= ~(BIT(25) - 1UL); } if (isr) wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); - wil6210_unmask_irq_tx(wil); + /* Tx IRQ will be enabled when NAPI processing finished */ return IRQ_HANDLED; } @@ -256,6 +254,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); + trace_wil6210_irq_misc(isr); wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr); if (!isr) { @@ -301,6 +300,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) struct wil6210_priv *wil = cookie; u32 isr = wil->isr_misc; + trace_wil6210_irq_misc_thread(isr); wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); if (isr & ISR_MISC_FW_ERROR) { @@ -408,6 +408,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) if (wil6210_debug_irq_mask(wil, pseudo_cause)) return IRQ_NONE; + trace_wil6210_irq_pseudo(pseudo_cause); wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause); wil6210_mask_irq_pseudo(wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index a0478e2f6868..c97b864667c5 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -56,27 +56,21 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) { uint i; struct net_device *ndev = wil_to_ndev(wil); - struct wireless_dev *wdev = wil->wdev; wil_dbg_misc(wil, "%s()\n", __func__); wil_link_off(wil); - clear_bit(wil_status_fwconnected, &wil->status); - - switch (wdev->sme_state) { - case CFG80211_SME_CONNECTED: - cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE, + if (test_bit(wil_status_fwconnected, &wil->status)) { + clear_bit(wil_status_fwconnected, &wil->status); + cfg80211_disconnected(ndev, + WLAN_STATUS_UNSPECIFIED_FAILURE, NULL, 0, GFP_KERNEL); - break; - case CFG80211_SME_CONNECTING: + } else if (test_bit(wil_status_fwconnecting, &wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); - break; - default: - break; } - + clear_bit(wil_status_fwconnecting, &wil->status); for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) wil_vring_fini_tx(wil, i); @@ -365,6 +359,9 @@ static int __wil_up(struct wil6210_priv *wil) /* Rx VRING. After MAC and beacon */ wil_rx_init(wil); + napi_enable(&wil->napi_rx); + napi_enable(&wil->napi_tx); + return 0; } @@ -381,6 +378,9 @@ int wil_up(struct wil6210_priv *wil) static int __wil_down(struct wil6210_priv *wil) { + napi_disable(&wil->napi_rx); + napi_disable(&wil->napi_tx); + if (wil->scan_request) { cfg80211_scan_done(wil->scan_request, true); wil->scan_request = NULL; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 098a8ec6b841..29dd1e58cb17 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -40,6 +40,55 @@ static const struct net_device_ops wil_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) +{ + struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, + napi_rx); + int quota = budget; + int done; + + wil_rx_handle(wil, "a); + done = budget - quota; + + if (done <= 1) { /* burst ends - only one packet processed */ + napi_complete(napi); + wil6210_unmask_irq_rx(wil); + wil_dbg_txrx(wil, "NAPI RX complete\n"); + } + + wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done); + + return done; +} + +static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) +{ + struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, + napi_tx); + int tx_done = 0; + uint i; + + /* always process ALL Tx complete, regardless budget - it is fast */ + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { + struct vring *vring = &wil->vring_tx[i]; + + if (!vring->va) + continue; + + tx_done += wil_tx_complete(wil, i); + } + + if (tx_done <= 1) { /* burst ends - only one packet processed */ + napi_complete(napi); + wil6210_unmask_irq_tx(wil); + wil_dbg_txrx(wil, "NAPI TX complete\n"); + } + + wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done); + + return min(tx_done, budget); +} + void *wil_if_alloc(struct device *dev, void __iomem *csr) { struct net_device *ndev; @@ -81,6 +130,11 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; + netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, + WIL6210_NAPI_BUDGET); + netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, + WIL6210_NAPI_BUDGET); + wil_link_off(wil); return wil; diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c new file mode 100644 index 000000000000..cd2534b9c5aa --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/trace.c @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h new file mode 100644 index 000000000000..eff1239be53a --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM wil6210 +#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define WIL6210_TRACE_H + +#include <linux/tracepoint.h> +#include "wil6210.h" +#include "txrx.h" + +/* create empty functions when tracing is disabled */ +#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__) + +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) +#undef DEFINE_EVENT +#define DEFINE_EVENT(evt_class, name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */ + +DECLARE_EVENT_CLASS(wil6210_wmi, + TP_PROTO(u16 id, void *buf, u16 buf_len), + + TP_ARGS(id, buf, buf_len), + + TP_STRUCT__entry( + __field(u16, id) + __field(u16, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->id = id; + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "id 0x%04x len %d", + __entry->id, __entry->buf_len + ) +); + +DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd, + TP_PROTO(u16 id, void *buf, u16 buf_len), + TP_ARGS(id, buf, buf_len) +); + +DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event, + TP_PROTO(u16 id, void *buf, u16 buf_len), + TP_ARGS(id, buf, buf_len) +); + +#define WIL6210_MSG_MAX (200) + +DECLARE_EVENT_CLASS(wil6210_log_event, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf), + TP_STRUCT__entry( + __dynamic_array(char, msg, WIL6210_MSG_MAX) + ), + TP_fast_assign( + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), + WIL6210_MSG_MAX, + vaf->fmt, + *vaf->va) >= WIL6210_MSG_MAX); + ), + TP_printk("%s", __get_str(msg)) +); + +DEFINE_EVENT(wil6210_log_event, wil6210_log_err, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(wil6210_log_event, wil6210_log_info, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg, + TP_PROTO(struct va_format *vaf), + TP_ARGS(vaf) +); + +#define wil_pseudo_irq_cause(x) __print_flags(x, "|", \ + {BIT_DMA_PSEUDO_CAUSE_RX, "Rx" }, \ + {BIT_DMA_PSEUDO_CAUSE_TX, "Tx" }, \ + {BIT_DMA_PSEUDO_CAUSE_MISC, "Misc" }) + +TRACE_EVENT(wil6210_irq_pseudo, + TP_PROTO(u32 x), + TP_ARGS(x), + TP_STRUCT__entry( + __field(u32, x) + ), + TP_fast_assign( + __entry->x = x; + ), + TP_printk("cause 0x%08x : %s", __entry->x, + wil_pseudo_irq_cause(__entry->x)) +); + +DECLARE_EVENT_CLASS(wil6210_irq, + TP_PROTO(u32 x), + TP_ARGS(x), + TP_STRUCT__entry( + __field(u32, x) + ), + TP_fast_assign( + __entry->x = x; + ), + TP_printk("cause 0x%08x", __entry->x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_rx, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_tx, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_misc, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread, + TP_PROTO(u32 x), + TP_ARGS(x) +); + +TRACE_EVENT(wil6210_rx, + TP_PROTO(u16 index, struct vring_rx_desc *d), + TP_ARGS(index, d), + TP_STRUCT__entry( + __field(u16, index) + __field(unsigned int, len) + __field(u8, mid) + __field(u8, cid) + __field(u8, tid) + __field(u8, type) + __field(u8, subtype) + __field(u16, seq) + __field(u8, mcs) + ), + TP_fast_assign( + __entry->index = index; + __entry->len = d->dma.length; + __entry->mid = wil_rxdesc_mid(d); + __entry->cid = wil_rxdesc_cid(d); + __entry->tid = wil_rxdesc_tid(d); + __entry->type = wil_rxdesc_ftype(d); + __entry->subtype = wil_rxdesc_subtype(d); + __entry->seq = wil_rxdesc_seq(d); + __entry->mcs = wil_rxdesc_mcs(d); + ), + TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x" + " type 0x%1x subtype 0x%1x", __entry->index, __entry->len, + __entry->mid, __entry->cid, __entry->tid, __entry->mcs, + __entry->seq, __entry->type, __entry->subtype) +); + +TRACE_EVENT(wil6210_tx, + TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags), + TP_ARGS(vring, index, len, frags), + TP_STRUCT__entry( + __field(u8, vring) + __field(u8, frags) + __field(u16, index) + __field(unsigned int, len) + ), + TP_fast_assign( + __entry->vring = vring; + __entry->frags = frags; + __entry->index = index; + __entry->len = len; + ), + TP_printk("vring %d index %d len %d frags %d", + __entry->vring, __entry->index, __entry->len, __entry->frags) +); + +TRACE_EVENT(wil6210_tx_done, + TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err), + TP_ARGS(vring, index, len, err), + TP_STRUCT__entry( + __field(u8, vring) + __field(u8, err) + __field(u16, index) + __field(unsigned int, len) + ), + TP_fast_assign( + __entry->vring = vring; + __entry->index = index; + __entry->len = len; + __entry->err = err; + ), + TP_printk("vring %d index %d len %d err 0x%02x", + __entry->vring, __entry->index, __entry->len, + __entry->err) +); + +#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/ + +#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) +/* we don't want to use include/trace/events */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> +#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 797024507c71..00dffeda983e 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -22,6 +22,7 @@ #include "wil6210.h" #include "wmi.h" #include "txrx.h" +#include "trace.h" static bool rtap_include_phy_info; module_param(rtap_include_phy_info, bool, S_IRUGO); @@ -89,8 +90,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) * we can use any */ for (i = 0; i < vring->size; i++) { - volatile struct vring_tx_desc *d = &(vring->va[i].tx); - d->dma.status = TX_DMA_STATUS_DU; + volatile struct vring_tx_desc *_d = &(vring->va[i].tx); + _d->dma.status = TX_DMA_STATUS_DU; } wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size, @@ -106,30 +107,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, size_t sz = vring->size * sizeof(vring->va[0]); while (!wil_vring_is_empty(vring)) { + dma_addr_t pa; + struct sk_buff *skb; + u16 dmalen; + if (tx) { - volatile struct vring_tx_desc *d = + struct vring_tx_desc dd, *d = ⅆ + volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; - dma_addr_t pa = d->dma.addr_low | - ((u64)d->dma.addr_high << 32); - struct sk_buff *skb = vring->ctx[vring->swtail]; + + *d = *_d; + pa = wil_desc_addr(&d->dma.addr); + dmalen = le16_to_cpu(d->dma.length); + skb = vring->ctx[vring->swtail]; if (skb) { - dma_unmap_single(dev, pa, d->dma.length, + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; } else { - dma_unmap_page(dev, pa, d->dma.length, + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } vring->swtail = wil_vring_next_tail(vring); } else { /* rx */ - volatile struct vring_rx_desc *d = + struct vring_rx_desc dd, *d = ⅆ + volatile struct vring_rx_desc *_d = &vring->va[vring->swtail].rx; - dma_addr_t pa = d->dma.addr_low | - ((u64)d->dma.addr_high << 32); - struct sk_buff *skb = vring->ctx[vring->swhead]; - dma_unmap_single(dev, pa, d->dma.length, - DMA_FROM_DEVICE); + + *d = *_d; + pa = wil_desc_addr(&d->dma.addr); + dmalen = le16_to_cpu(d->dma.length); + skb = vring->ctx[vring->swhead]; + dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); kfree_skb(skb); wil_vring_advance_head(vring, 1); } @@ -151,7 +161,8 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, { struct device *dev = wil_to_dev(wil); unsigned int sz = RX_BUF_LEN; - volatile struct vring_rx_desc *d = &(vring->va[i].rx); + struct vring_rx_desc dd, *d = ⅆ + volatile struct vring_rx_desc *_d = &(vring->va[i].rx); dma_addr_t pa; /* TODO align */ @@ -169,13 +180,13 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, } d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; - d->dma.addr_low = lower_32_bits(pa); - d->dma.addr_high = (u16)upper_32_bits(pa); + wil_desc_addr_set(&d->dma.addr, pa); /* ip_length don't care */ /* b11 don't care */ /* error don't care */ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ - d->dma.length = sz; + d->dma.length = cpu_to_le16(sz); + *_d = *d; vring->ctx[i] = skb; return 0; @@ -321,11 +332,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, { struct device *dev = wil_to_dev(wil); struct net_device *ndev = wil_to_ndev(wil); - volatile struct vring_rx_desc *d; - struct vring_rx_desc *d1; + volatile struct vring_rx_desc *_d; + struct vring_rx_desc *d; struct sk_buff *skb; dma_addr_t pa; unsigned int sz = RX_BUF_LEN; + u16 dmalen; u8 ftype; u8 ds_bits; @@ -334,32 +346,44 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, if (wil_vring_is_empty(vring)) return NULL; - d = &(vring->va[vring->swhead].rx); - if (!(d->dma.status & RX_DMA_STATUS_DU)) { + _d = &(vring->va[vring->swhead].rx); + if (!(_d->dma.status & RX_DMA_STATUS_DU)) { /* it is not error, we just reached end of Rx done area */ return NULL; } - pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); skb = vring->ctx[vring->swhead]; + d = wil_skb_rxdesc(skb); + *d = *_d; + pa = wil_desc_addr(&d->dma.addr); + vring->ctx[vring->swhead] = NULL; + wil_vring_advance_head(vring, 1); + dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); - skb_trim(skb, d->dma.length); + dmalen = le16_to_cpu(d->dma.length); - d1 = wil_skb_rxdesc(skb); - *d1 = *d; + trace_wil6210_rx(vring->swhead, d); + wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen); + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, + (const void *)d, sizeof(*d), false); - wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1); + if (dmalen > sz) { + wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); + kfree_skb(skb); + return NULL; + } + skb_trim(skb, dmalen); + + wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb_headlen(skb), false); + + + wil->stats.last_mcs_rx = wil_rxdesc_mcs(d); /* use radiotap header only if required */ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) wil_rx_add_radiotap_header(wil, skb); - wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length); - wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, - (const void *)d, sizeof(*d), false); - - wil_vring_advance_head(vring, 1); - /* no extra checks if in sniffer mode */ if (ndev->type != ARPHRD_ETHER) return skb; @@ -368,7 +392,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, * Driver should recognize it by frame type, that is found * in Rx descriptor. If type is not data, it is 802.11 frame as is */ - ftype = wil_rxdesc_ftype(d1) << 2; + ftype = wil_rxdesc_ftype(d) << 2; if (ftype != IEEE80211_FTYPE_DATA) { wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); /* TODO: process it */ @@ -383,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, return NULL; } - ds_bits = wil_rxdesc_ds_bits(d1); + ds_bits = wil_rxdesc_ds_bits(d); if (ds_bits == 1) { /* * HW bug - in ToDS mode, i.e. Rx on AP side, @@ -425,6 +449,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) /* * Pass Rx packet to the netif. Update statistics. + * Called in softirq context (NAPI poll). */ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) { @@ -433,10 +458,7 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) skb_orphan(skb); - if (in_interrupt()) - rc = netif_rx(skb); - else - rc = netif_rx_ni(skb); + rc = netif_receive_skb(skb); if (likely(rc == NET_RX_SUCCESS)) { ndev->stats.rx_packets++; @@ -450,9 +472,9 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) /** * Proceed all completed skb's from Rx VRING * - * Safe to call from IRQ + * Safe to call from NAPI poll, i.e. softirq with interrupts enabled */ -void wil_rx_handle(struct wil6210_priv *wil) +void wil_rx_handle(struct wil6210_priv *wil, int *quota) { struct net_device *ndev = wil_to_ndev(wil); struct vring *v = &wil->vring_rx; @@ -463,9 +485,8 @@ void wil_rx_handle(struct wil6210_priv *wil) return; } wil_dbg_txrx(wil, "%s()\n", __func__); - while (NULL != (skb = wil_vring_reap_rx(wil, v))) { - wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb_headlen(skb), false); + while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) { + (*quota)--; if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { skb->dev = ndev; @@ -600,17 +621,15 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, return NULL; } -static int wil_tx_desc_map(volatile struct vring_tx_desc *d, - dma_addr_t pa, u32 len) +static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len) { - d->dma.addr_low = lower_32_bits(pa); - d->dma.addr_high = (u16)upper_32_bits(pa); + wil_desc_addr_set(&d->dma.addr, pa); d->dma.ip_length = 0; /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/ d->dma.b11 = 0/*14 | BIT(7)*/; d->dma.error = 0; d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ - d->dma.length = len; + d->dma.length = cpu_to_le16((u16)len); d->dma.d0 = 0; d->mac.d[0] = 0; d->mac.d[1] = 0; @@ -630,7 +649,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, struct sk_buff *skb) { struct device *dev = wil_to_dev(wil); - volatile struct vring_tx_desc *d; + struct vring_tx_desc dd, *d = ⅆ + volatile struct vring_tx_desc *_d; u32 swhead = vring->swhead; int avail = wil_vring_avail_tx(vring); int nr_frags = skb_shinfo(skb)->nr_frags; @@ -648,7 +668,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 1 + nr_frags); return -ENOMEM; } - d = &(vring->va[i].tx); + _d = &(vring->va[i].tx); /* FIXME FW can accept only unicast frames for the peer */ memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN); @@ -667,25 +687,30 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_tx_desc_map(d, pa, skb_headlen(skb)); d->mac.d[2] |= ((nr_frags + 1) << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); + if (nr_frags) + *_d = *d; + /* middle segments */ for (f = 0; f < nr_frags; f++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; int len = skb_frag_size(frag); i = (swhead + f + 1) % vring->size; - d = &(vring->va[i].tx); + _d = &(vring->va[i].tx); pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) goto dma_error; wil_tx_desc_map(d, pa, len); vring->ctx[i] = NULL; + *_d = *d; } /* for the last seg only */ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); d->dma.d0 |= BIT(9); /* BUG: undocumented bit */ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS); + *_d = *d; wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); @@ -693,6 +718,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); + trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); /* hold reference to skb * to prevent skb release before accounting @@ -705,14 +731,18 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* unmap what we have mapped */ /* Note: increment @f to operate with positive index */ for (f++; f > 0; f--) { + u16 dmalen; + i = (swhead + f) % vring->size; - d = &(vring->va[i].tx); - d->dma.status = TX_DMA_STATUS_DU; - pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); + _d = &(vring->va[i].tx); + *d = *_d; + _d->dma.status = TX_DMA_STATUS_DU; + pa = wil_desc_addr(&d->dma.addr); + dmalen = le16_to_cpu(d->dma.length); if (vring->ctx[i]) - dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); else - dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } return -EINVAL; @@ -761,7 +791,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) break; /* goto drop; */ } drop: - netif_tx_stop_all_queues(ndev); ndev->stats.tx_dropped++; dev_kfree_skb_any(skb); @@ -771,41 +800,48 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) /** * Clean up transmitted skb's from the Tx VRING * + * Return number of descriptors cleared + * * Safe to call from IRQ */ -void wil_tx_complete(struct wil6210_priv *wil, int ringid) +int wil_tx_complete(struct wil6210_priv *wil, int ringid) { struct net_device *ndev = wil_to_ndev(wil); struct device *dev = wil_to_dev(wil); struct vring *vring = &wil->vring_tx[ringid]; + int done = 0; if (!vring->va) { wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); - return; + return 0; } wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); while (!wil_vring_is_empty(vring)) { - volatile struct vring_tx_desc *d1 = + volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; struct vring_tx_desc dd, *d = ⅆ dma_addr_t pa; struct sk_buff *skb; + u16 dmalen; - dd = *d1; + *d = *_d; if (!(d->dma.status & TX_DMA_STATUS_DU)) break; + dmalen = le16_to_cpu(d->dma.length); + trace_wil6210_tx_done(ringid, vring->swtail, dmalen, + d->dma.error); wil_dbg_txrx(wil, "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", - vring->swtail, d->dma.length, d->dma.status, + vring->swtail, dmalen, d->dma.status, d->dma.error); wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); - pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); + pa = wil_desc_addr(&d->dma.addr); skb = vring->ctx[vring->swtail]; if (skb) { if (d->dma.error == 0) { @@ -815,18 +851,21 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid) ndev->stats.tx_errors++; } - dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; } else { - dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); + dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } - d->dma.addr_low = 0; - d->dma.addr_high = 0; + d->dma.addr.addr_low = 0; + d->dma.addr.addr_high = 0; d->dma.length = 0; d->dma.status = TX_DMA_STATUS_DU; vring->swtail = wil_vring_next_tail(vring); + done++; } if (wil_vring_avail_tx(vring) > vring->size/4) netif_tx_wake_all_queues(wil_to_ndev(wil)); + + return done; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index adef12fb2aee..23c0781cdb93 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -27,6 +27,28 @@ #define WIL6210_RTAP_SIZE (128) /* Tx/Rx path */ + +/* + * Common representation of physical address in Vring + */ +struct vring_dma_addr { + __le32 addr_low; + __le16 addr_high; +} __packed; + +static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr) +{ + return le32_to_cpu(addr->addr_low) | + ((u64)le16_to_cpu(addr->addr_high) << 32); +} + +static inline void wil_desc_addr_set(struct vring_dma_addr *addr, + dma_addr_t pa) +{ + addr->addr_low = cpu_to_le32(lower_32_bits(pa)); + addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa)); +} + /* * Tx descriptor - MAC part * [dword 0] @@ -216,13 +238,12 @@ struct vring_tx_mac { struct vring_tx_dma { u32 d0; - u32 addr_low; - u16 addr_high; + struct vring_dma_addr addr; u8 ip_length; u8 b11; /* 0..6: mac_length; 7:ip_version */ u8 error; /* 0..2: err; 3..7: reserved; */ u8 status; /* 0: used; 1..7; reserved */ - u16 length; + __le16 length; } __packed; /* @@ -315,13 +336,12 @@ struct vring_rx_mac { struct vring_rx_dma { u32 d0; - u32 addr_low; - u16 addr_high; + struct vring_dma_addr addr; u8 ip_length; u8 b11; u8 error; u8 status; - u16 length; + __le16 length; } __packed; struct vring_tx_desc { diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 8f76ecd8a7e5..373cf656f5b0 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -34,9 +34,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_MEM_SIZE (2*1024*1024UL) -#define WIL6210_RX_RING_SIZE (128) -#define WIL6210_TX_RING_SIZE (128) -#define WIL6210_MAX_TX_RINGS (24) +#define WIL6210_RX_RING_SIZE (128) +#define WIL6210_TX_RING_SIZE (128) +#define WIL6210_MAX_TX_RINGS (24) /* HW limit */ +#define WIL6210_MAX_CID (8) /* HW limit */ +#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ /* Hardware definitions begin */ @@ -184,6 +186,7 @@ struct vring { enum { /* for wil6210_priv.status */ wil_status_fwready = 0, + wil_status_fwconnecting, wil_status_fwconnected, wil_status_dontscan, wil_status_reset_done, @@ -239,6 +242,8 @@ struct wil6210_priv { * - consumed in thread by wmi_event_worker */ spinlock_t wmi_ev_lock; + struct napi_struct napi_rx; + struct napi_struct napi_tx; /* DMA related */ struct vring vring_rx; struct vring vring_tx[WIL6210_MAX_TX_RINGS]; @@ -267,9 +272,13 @@ struct wil6210_priv { #define wil_to_ndev(i) (wil_to_wdev(i)->netdev) #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) -#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg) -#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg) -#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg) +int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...); +int wil_err(struct wil6210_priv *wil, const char *fmt, ...); +int wil_info(struct wil6210_priv *wil, const char *fmt, ...); +#define wil_dbg(wil, fmt, arg...) do { \ + netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \ + wil_dbg_trace(wil, fmt, ##arg); \ +} while (0) #define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg) #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) @@ -356,10 +365,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, void wil_vring_fini_tx(struct wil6210_priv *wil, int id); netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); -void wil_tx_complete(struct wil6210_priv *wil, int ringid); +int wil_tx_complete(struct wil6210_priv *wil, int ringid); +void wil6210_unmask_irq_tx(struct wil6210_priv *wil); /* RX API */ -void wil_rx_handle(struct wil6210_priv *wil); +void wil_rx_handle(struct wil6210_priv *wil, int *quota); +void wil6210_unmask_irq_rx(struct wil6210_priv *wil); int wil_iftype_nl2wmi(enum nl80211_iftype type); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 45b04e383f9a..527ffb543821 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -20,6 +20,7 @@ #include "wil6210.h" #include "txrx.h" #include "wmi.h" +#include "trace.h" /** * WMI event receiving - theory of operations @@ -246,6 +247,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) iowrite32(r->head = next_head, wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head)); + trace_wil6210_wmi_cmd(cmdid, buf, len); + /* interrupt to FW */ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); @@ -406,7 +409,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { - if (wdev->sme_state != CFG80211_SME_CONNECTING) { + if (!test_bit(wil_status_fwconnecting, &wil->status)) { wil_err(wil, "Not in connecting state\n"); return; } @@ -430,6 +433,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); } + clear_bit(wil_status_fwconnecting, &wil->status); set_bit(wil_status_fwconnected, &wil->status); /* FIXME FW can transmit only ucast frames to peer */ @@ -635,8 +639,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil) hdr.flags); if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { - wil_dbg_wmi(wil, "WMI event 0x%04x\n", - evt->event.wmi.id); + u16 id = le16_to_cpu(evt->event.wmi.id); + wil_dbg_wmi(wil, "WMI event 0x%04x\n", id); + trace_wil6210_wmi_event(id, &evt->event.wmi, len); } wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, &evt->event.hdr, sizeof(hdr) + len, true); @@ -724,7 +729,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) .bcon_interval = cpu_to_le16(bi), .network_type = wmi_nettype, .disable_sec_offload = 1, - .channel = chan, + .channel = chan - 1, }; struct { struct wil6210_mbox_hdr_wmi wmi; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 44fa0cdbf97b..11400b39cf0b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -606,7 +606,8 @@ static int brcmf_sdio_pd_remove(struct platform_device *pdev) static struct platform_driver brcmf_sdio_pd = { .remove = brcmf_sdio_pd_remove, .driver = { - .name = BRCMFMAC_SDIO_PDATA_NAME + .name = BRCMFMAC_SDIO_PDATA_NAME, + .owner = THIS_MODULE, } }; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 6d758f285352..8bd256ba66ab 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -3917,6 +3917,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct brcmf_fil_af_params_le *af_params; bool ack; s32 chan_nr; + u32 freq; brcmf_dbg(TRACE, "Enter\n"); @@ -3929,6 +3930,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, return -EPERM; } + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + if (ieee80211_is_probe_resp(mgmt->frame_control)) { /* Right now the only reason to get a probe response */ /* is for p2p listen response or for p2p GO from */ @@ -3944,7 +3947,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; ie_len = len - ie_offset; - vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; err = brcmf_vif_set_mgmt_ie(vif, @@ -3968,8 +3970,15 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); /* Add the length exepted for 802.11 header */ action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); - /* Add the channel */ - chan_nr = ieee80211_frequency_to_channel(chan->center_freq); + /* Add the channel. Use the one specified as parameter if any or + * the current one (got from the firmware) otherwise + */ + if (chan) + freq = chan->center_freq; + else + brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL, + &freq); + chan_nr = ieee80211_frequency_to_channel(freq); af_params->channel = cpu_to_le32(chan_nr); memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 1585cc5bf866..bd982856d385 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -900,7 +900,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, if (supr_status) { update_rate = false; if (supr_status == TX_STATUS_SUPR_BADCH) { - brcms_err(wlc->hw->d11core, + brcms_dbg_ht(wlc->hw->d11core, "%s: Pkt tx suppressed, illegal channel possibly %d\n", __func__, CHSPEC_CHANNEL( wlc->default_bss->chanspec)); diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig new file mode 100644 index 000000000000..7a585d4e9c8e --- /dev/null +++ b/drivers/net/wireless/cw1200/Kconfig @@ -0,0 +1,45 @@ +config CW1200 + tristate "CW1200 WLAN support" + depends on MAC80211 && CFG80211 + help + This is a driver for the ST-E CW1100 & CW1200 WLAN chipsets. + This option just enables the driver core, see below for + specific bus support. + +if CW1200 + +config CW1200_WLAN_SDIO + tristate "Support SDIO platforms" + depends on CW1200 && MMC + help + Enable support for the CW1200 connected via an SDIO bus. + By default this driver only supports the Sagrad SG901-1091/1098 EVK + and similar designs that utilize a hardware reset circuit. To + support different CW1200 SDIO designs you will need to override + the default platform data by calling cw1200_sdio_set_platform_data() + in your board setup file. + +config CW1200_WLAN_SPI + tristate "Support SPI platforms" + depends on CW1200 && SPI + help + Enables support for the CW1200 connected via a SPI bus. You will + need to add appropriate platform data glue in your board setup + file. + +menu "Driver debug features" + depends on CW1200 && DEBUG_FS + +config CW1200_ETF + bool "Enable CW1200 Engineering Test Framework hooks" + help + If you don't know what this is, just say N. + +config CW1200_ITP + bool "Enable ITP access" + help + If you don't know what this is, just say N. + +endmenu + +endif diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile new file mode 100644 index 000000000000..bc6cbf91f26e --- /dev/null +++ b/drivers/net/wireless/cw1200/Makefile @@ -0,0 +1,22 @@ +cw1200_core-y := \ + fwio.o \ + txrx.o \ + main.o \ + queue.o \ + hwio.o \ + bh.o \ + wsm.o \ + sta.o \ + scan.o \ + debug.o +cw1200_core-$(CONFIG_CW1200_ITP) += itp.o +cw1200_core-$(CONFIG_PM) += pm.o + +# CFLAGS_sta.o += -DDEBUG + +cw1200_wlan_sdio-y := cw1200_sdio.o +cw1200_wlan_spi-y := cw1200_spi.o + +obj-$(CONFIG_CW1200) += cw1200_core.o +obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o +obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c new file mode 100644 index 000000000000..324b57001da0 --- /dev/null +++ b/drivers/net/wireless/cw1200/bh.c @@ -0,0 +1,616 @@ +/* + * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver, which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <net/mac80211.h> +#include <linux/kthread.h> +#include <linux/timer.h> + +#include "cw1200.h" +#include "bh.h" +#include "hwio.h" +#include "wsm.h" +#include "hwbus.h" +#include "debug.h" +#include "fwio.h" + +static int cw1200_bh(void *arg); + +#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4) +/* an SPI message cannot be bigger than (2"12-1)*2 bytes + * "*2" to cvt to bytes */ +#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2) +#define PIGGYBACK_CTRL_REG (2) +#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG) + +/* Suspend state privates */ +enum cw1200_bh_pm_state { + CW1200_BH_RESUMED = 0, + CW1200_BH_SUSPEND, + CW1200_BH_SUSPENDED, + CW1200_BH_RESUME, +}; + +typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv, + u8 *data, size_t size); + +static void cw1200_bh_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bh_work); + cw1200_bh(priv); +} + +int cw1200_register_bh(struct cw1200_common *priv) +{ + int err = 0; + /* Realtime workqueue */ + priv->bh_workqueue = alloc_workqueue("cw1200_bh", + WQ_MEM_RECLAIM | WQ_HIGHPRI + | WQ_CPU_INTENSIVE, 1); + + if (!priv->bh_workqueue) + return -ENOMEM; + + INIT_WORK(&priv->bh_work, cw1200_bh_work); + + pr_debug("[BH] register.\n"); + + atomic_set(&priv->bh_rx, 0); + atomic_set(&priv->bh_tx, 0); + atomic_set(&priv->bh_term, 0); + atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); + priv->bh_error = 0; + priv->hw_bufs_used = 0; + priv->buf_id_tx = 0; + priv->buf_id_rx = 0; + init_waitqueue_head(&priv->bh_wq); + init_waitqueue_head(&priv->bh_evt_wq); + + err = !queue_work(priv->bh_workqueue, &priv->bh_work); + WARN_ON(err); + return err; +} + +void cw1200_unregister_bh(struct cw1200_common *priv) +{ + atomic_add(1, &priv->bh_term); + wake_up(&priv->bh_wq); + + flush_workqueue(priv->bh_workqueue); + + destroy_workqueue(priv->bh_workqueue); + priv->bh_workqueue = NULL; + + pr_debug("[BH] unregistered.\n"); +} + +void cw1200_irq_handler(struct cw1200_common *priv) +{ + pr_debug("[BH] irq.\n"); + + /* Disable Interrupts! */ + /* NOTE: hwbus_ops->lock already held */ + __cw1200_irq_enable(priv, 0); + + if (/* WARN_ON */(priv->bh_error)) + return; + + if (atomic_add_return(1, &priv->bh_rx) == 1) + wake_up(&priv->bh_wq); +} +EXPORT_SYMBOL_GPL(cw1200_irq_handler); + +void cw1200_bh_wakeup(struct cw1200_common *priv) +{ + pr_debug("[BH] wakeup.\n"); + if (priv->bh_error) { + pr_err("[BH] wakeup failed (BH error)\n"); + return; + } + + if (atomic_add_return(1, &priv->bh_tx) == 1) + wake_up(&priv->bh_wq); +} + +int cw1200_bh_suspend(struct cw1200_common *priv) +{ + pr_debug("[BH] suspend.\n"); + if (priv->bh_error) { + wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n"); + return -EINVAL; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND); + wake_up(&priv->bh_wq); + return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || + (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)), + 1 * HZ) ? 0 : -ETIMEDOUT; +} + +int cw1200_bh_resume(struct cw1200_common *priv) +{ + pr_debug("[BH] resume.\n"); + if (priv->bh_error) { + wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n"); + return -EINVAL; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_RESUME); + wake_up(&priv->bh_wq); + return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || + (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)), + 1 * HZ) ? 0 : -ETIMEDOUT; +} + +static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv) +{ + ++priv->hw_bufs_used; +} + +int wsm_release_tx_buffer(struct cw1200_common *priv, int count) +{ + int ret = 0; + int hw_bufs_used = priv->hw_bufs_used; + + priv->hw_bufs_used -= count; + if (WARN_ON(priv->hw_bufs_used < 0)) + ret = -1; + else if (hw_bufs_used >= priv->wsm_caps.input_buffers) + ret = 1; + if (!priv->hw_bufs_used) + wake_up(&priv->bh_evt_wq); + return ret; +} + +static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv, + u16 *ctrl_reg) +{ + int ret; + + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, ctrl_reg); + if (ret) { + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, ctrl_reg); + if (ret) + pr_err("[BH] Failed to read control register.\n"); + } + + return ret; +} + +static int cw1200_device_wakeup(struct cw1200_common *priv) +{ + u16 ctrl_reg; + int ret; + + pr_debug("[BH] Device wakeup.\n"); + + /* First, set the dpll register */ + ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, + cw1200_dpll_from_clk(priv->hw_refclk)); + if (WARN_ON(ret)) + return ret; + + /* To force the device to be always-on, the host sets WLAN_UP to 1 */ + ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, + ST90TDS_CONT_WUP_BIT); + if (WARN_ON(ret)) + return ret; + + ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg); + if (WARN_ON(ret)) + return ret; + + /* If the device returns WLAN_RDY as 1, the device is active and will + * remain active. */ + if (ctrl_reg & ST90TDS_CONT_RDY_BIT) { + pr_debug("[BH] Device awake.\n"); + return 1; + } + + return 0; +} + +/* Must be called from BH thraed. */ +void cw1200_enable_powersave(struct cw1200_common *priv, + bool enable) +{ + pr_debug("[BH] Powerave is %s.\n", + enable ? "enabled" : "disabled"); + priv->powersave_enabled = enable; +} + +static int cw1200_bh_rx_helper(struct cw1200_common *priv, + uint16_t *ctrl_reg, + int *tx) +{ + size_t read_len = 0; + struct sk_buff *skb_rx = NULL; + struct wsm_hdr *wsm; + size_t wsm_len; + u16 wsm_id; + u8 wsm_seq; + int rx_resync = 1; + + size_t alloc_len; + u8 *data; + + read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2; + if (!read_len) + return 0; /* No more work */ + + if (WARN_ON((read_len < sizeof(struct wsm_hdr)) || + (read_len > EFFECTIVE_BUF_SIZE))) { + pr_debug("Invalid read len: %zu (%04x)", + read_len, *ctrl_reg); + goto err; + } + + /* Add SIZE of PIGGYBACK reg (CONTROL Reg) + * to the NEXT Message length + 2 Bytes for SKB */ + read_len = read_len + 2; + + alloc_len = priv->hwbus_ops->align_size( + priv->hwbus_priv, read_len); + + /* Check if not exceeding CW1200 capabilities */ + if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) { + pr_debug("Read aligned len: %zu\n", + alloc_len); + } + + skb_rx = dev_alloc_skb(alloc_len); + if (WARN_ON(!skb_rx)) + goto err; + + skb_trim(skb_rx, 0); + skb_put(skb_rx, read_len); + data = skb_rx->data; + if (WARN_ON(!data)) + goto err; + + if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) { + pr_err("rx blew up, len %zu\n", alloc_len); + goto err; + } + + /* Piggyback */ + *ctrl_reg = __le16_to_cpu( + ((__le16 *)data)[alloc_len / 2 - 1]); + + wsm = (struct wsm_hdr *)data; + wsm_len = __le16_to_cpu(wsm->len); + if (WARN_ON(wsm_len > read_len)) + goto err; + + if (priv->wsm_enable_wsm_dumps) + print_hex_dump_bytes("<-- ", + DUMP_PREFIX_NONE, + data, wsm_len); + + wsm_id = __le16_to_cpu(wsm->id) & 0xFFF; + wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7; + + skb_trim(skb_rx, wsm_len); + + if (wsm_id == 0x0800) { + wsm_handle_exception(priv, + &data[sizeof(*wsm)], + wsm_len - sizeof(*wsm)); + goto err; + } else if (!rx_resync) { + if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) + goto err; + } + priv->wsm_rx_seq = (wsm_seq + 1) & 7; + rx_resync = 0; + + if (wsm_id & 0x0400) { + int rc = wsm_release_tx_buffer(priv, 1); + if (WARN_ON(rc < 0)) + return rc; + else if (rc > 0) + *tx = 1; + } + + /* cw1200_wsm_rx takes care on SKB livetime */ + if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx))) + goto err; + + if (skb_rx) { + dev_kfree_skb(skb_rx); + skb_rx = NULL; + } + + return 0; + +err: + if (skb_rx) { + dev_kfree_skb(skb_rx); + skb_rx = NULL; + } + return -1; +} + +static int cw1200_bh_tx_helper(struct cw1200_common *priv, + int *pending_tx, + int *tx_burst) +{ + size_t tx_len; + u8 *data; + int ret; + struct wsm_hdr *wsm; + + if (priv->device_can_sleep) { + ret = cw1200_device_wakeup(priv); + if (WARN_ON(ret < 0)) { /* Error in wakeup */ + *pending_tx = 1; + return 0; + } else if (ret) { /* Woke up */ + priv->device_can_sleep = false; + } else { /* Did not awake */ + *pending_tx = 1; + return 0; + } + } + + wsm_alloc_tx_buffer(priv); + ret = wsm_get_tx(priv, &data, &tx_len, tx_burst); + if (ret <= 0) { + wsm_release_tx_buffer(priv, 1); + if (WARN_ON(ret < 0)) + return ret; /* Error */ + return 0; /* No work */ + } + + wsm = (struct wsm_hdr *)data; + BUG_ON(tx_len < sizeof(*wsm)); + BUG_ON(__le16_to_cpu(wsm->len) != tx_len); + + atomic_add(1, &priv->bh_tx); + + tx_len = priv->hwbus_ops->align_size( + priv->hwbus_priv, tx_len); + + /* Check if not exceeding CW1200 capabilities */ + if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE)) + pr_debug("Write aligned len: %zu\n", tx_len); + + wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX)); + wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq)); + + if (WARN_ON(cw1200_data_write(priv, data, tx_len))) { + pr_err("tx blew up, len %zu\n", tx_len); + wsm_release_tx_buffer(priv, 1); + return -1; /* Error */ + } + + if (priv->wsm_enable_wsm_dumps) + print_hex_dump_bytes("--> ", + DUMP_PREFIX_NONE, + data, + __le16_to_cpu(wsm->len)); + + wsm_txed(priv, data); + priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX; + + if (*tx_burst > 1) { + cw1200_debug_tx_burst(priv); + return 1; /* Work remains */ + } + + return 0; +} + +static int cw1200_bh(void *arg) +{ + struct cw1200_common *priv = arg; + int rx, tx, term, suspend; + u16 ctrl_reg = 0; + int tx_allowed; + int pending_tx = 0; + int tx_burst; + long status; + u32 dummy; + int ret; + + for (;;) { + if (!priv->hw_bufs_used && + priv->powersave_enabled && + !priv->device_can_sleep && + !atomic_read(&priv->recent_scan)) { + status = 1 * HZ; + pr_debug("[BH] Device wakedown. No data.\n"); + cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } else if (priv->hw_bufs_used) { + /* Interrupt loss detection */ + status = 1 * HZ; + } else { + status = MAX_SCHEDULE_TIMEOUT; + } + + /* Dummy Read for SDIO retry mechanism*/ + if ((priv->hw_type != -1) && + (atomic_read(&priv->bh_rx) == 0) && + (atomic_read(&priv->bh_tx) == 0)) + cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID, + &dummy, sizeof(dummy)); + + pr_debug("[BH] waiting ...\n"); + status = wait_event_interruptible_timeout(priv->bh_wq, ({ + rx = atomic_xchg(&priv->bh_rx, 0); + tx = atomic_xchg(&priv->bh_tx, 0); + term = atomic_xchg(&priv->bh_term, 0); + suspend = pending_tx ? + 0 : atomic_read(&priv->bh_suspend); + (rx || tx || term || suspend || priv->bh_error); + }), status); + + pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n", + rx, tx, term, suspend, status); + + /* Did an error occur? */ + if ((status < 0 && status != -ERESTARTSYS) || + term || priv->bh_error) { + break; + } + if (!status) { /* wait_event timed out */ + unsigned long timestamp = jiffies; + long timeout; + int pending = 0; + int i; + + /* Check to see if we have any outstanding frames */ + if (priv->hw_bufs_used && (!rx || !tx)) { + wiphy_warn(priv->hw->wiphy, + "Missed interrupt? (%d frames outstanding)\n", + priv->hw_bufs_used); + rx = 1; + + /* Get a timestamp of "oldest" frame */ + for (i = 0; i < 4; ++i) + pending += cw1200_queue_get_xmit_timestamp( + &priv->tx_queue[i], + ×tamp, + priv->pending_frame_id); + + /* Check if frame transmission is timed out. + * Add an extra second with respect to possible + * interrupt loss. + */ + timeout = timestamp + + WSM_CMD_LAST_CHANCE_TIMEOUT + + 1 * HZ - + jiffies; + + /* And terminate BH thread if the frame is "stuck" */ + if (pending && timeout < 0) { + wiphy_warn(priv->hw->wiphy, + "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n", + priv->hw_bufs_used, pending, + timestamp, jiffies); + break; + } + } else if (!priv->device_can_sleep && + !atomic_read(&priv->recent_scan)) { + pr_debug("[BH] Device wakedown. Timeout.\n"); + cw1200_reg_write_16(priv, + ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } + goto done; + } else if (suspend) { + pr_debug("[BH] Device suspend.\n"); + if (priv->powersave_enabled) { + pr_debug("[BH] Device wakedown. Suspend.\n"); + cw1200_reg_write_16(priv, + ST90TDS_CONTROL_REG_ID, 0); + priv->device_can_sleep = true; + } + + atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED); + wake_up(&priv->bh_evt_wq); + status = wait_event_interruptible(priv->bh_wq, + CW1200_BH_RESUME == atomic_read(&priv->bh_suspend)); + if (status < 0) { + wiphy_err(priv->hw->wiphy, + "Failed to wait for resume: %ld.\n", + status); + break; + } + pr_debug("[BH] Device resume.\n"); + atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); + wake_up(&priv->bh_evt_wq); + atomic_add(1, &priv->bh_rx); + goto done; + } + + rx: + tx += pending_tx; + pending_tx = 0; + + if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) + break; + + /* Don't bother trying to rx unless we have data to read */ + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { + ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); + if (ret < 0) + break; + /* Double up here if there's more data.. */ + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { + ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); + if (ret < 0) + break; + } + } + + tx: + if (tx) { + tx = 0; + + BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers); + tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used; + tx_allowed = tx_burst > 0; + + if (!tx_allowed) { + /* Buffers full. Ensure we process tx + * after we handle rx.. + */ + pending_tx = tx; + goto done_rx; + } + ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst); + if (ret < 0) + break; + if (ret > 0) /* More to transmit */ + tx = ret; + + /* Re-read ctrl reg */ + if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) + break; + } + + done_rx: + if (priv->bh_error) + break; + if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) + goto rx; + if (tx) + goto tx; + + done: + /* Re-enable device interrupts */ + priv->hwbus_ops->lock(priv->hwbus_priv); + __cw1200_irq_enable(priv, 1); + priv->hwbus_ops->unlock(priv->hwbus_priv); + } + + /* Explicitly disable device interrupts */ + priv->hwbus_ops->lock(priv->hwbus_priv); + __cw1200_irq_enable(priv, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + + if (!term) { + pr_err("[BH] Fatal error, exiting.\n"); + priv->bh_error = 1; + /* TODO: schedule_work(recovery) */ + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/cw1200/bh.h new file mode 100644 index 000000000000..af6a4853728f --- /dev/null +++ b/drivers/net/wireless/cw1200/bh.h @@ -0,0 +1,28 @@ +/* + * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_BH_H +#define CW1200_BH_H + +/* extern */ struct cw1200_common; + +int cw1200_register_bh(struct cw1200_common *priv); +void cw1200_unregister_bh(struct cw1200_common *priv); +void cw1200_irq_handler(struct cw1200_common *priv); +void cw1200_bh_wakeup(struct cw1200_common *priv); +int cw1200_bh_suspend(struct cw1200_common *priv); +int cw1200_bh_resume(struct cw1200_common *priv); +/* Must be called from BH thread. */ +void cw1200_enable_powersave(struct cw1200_common *priv, + bool enable); +int wsm_release_tx_buffer(struct cw1200_common *priv, int count); + +#endif /* CW1200_BH_H */ diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/cw1200/cw1200.h new file mode 100644 index 000000000000..95320f2b25eb --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200.h @@ -0,0 +1,331 @@ +/* + * Common private data for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on the mac80211 Prism54 code, which is + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_H +#define CW1200_H + +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <net/mac80211.h> + +#include "queue.h" +#include "wsm.h" +#include "scan.h" +#include "txrx.h" +#include "pm.h" + +/* Forward declarations */ +struct hwbus_ops; +struct task_struct; +struct cw1200_debug_priv; +struct firmware; + +#ifdef CONFIG_CW1200_ETF +extern int etf_mode; +extern char *etf_firmware; +#endif + +#define CW1200_MAX_CTRL_FRAME_LEN (0x1000) + +#define CW1200_MAX_STA_IN_AP_MODE (5) +#define CW1200_LINK_ID_AFTER_DTIM (CW1200_MAX_STA_IN_AP_MODE + 1) +#define CW1200_LINK_ID_UAPSD (CW1200_MAX_STA_IN_AP_MODE + 2) +#define CW1200_LINK_ID_MAX (CW1200_MAX_STA_IN_AP_MODE + 3) +#define CW1200_MAX_REQUEUE_ATTEMPTS (5) + +#define CW1200_MAX_TID (8) + +#define CW1200_BLOCK_ACK_CNT (30) +#define CW1200_BLOCK_ACK_THLD (800) +#define CW1200_BLOCK_ACK_HIST (3) +#define CW1200_BLOCK_ACK_INTERVAL (1 * HZ / CW1200_BLOCK_ACK_HIST) + +#define CW1200_JOIN_TIMEOUT (1 * HZ) +#define CW1200_AUTH_TIMEOUT (5 * HZ) + +struct cw1200_ht_info { + struct ieee80211_sta_ht_cap ht_cap; + enum nl80211_channel_type channel_type; + u16 operation_mode; +}; + +/* Please keep order */ +enum cw1200_join_status { + CW1200_JOIN_STATUS_PASSIVE = 0, + CW1200_JOIN_STATUS_MONITOR, + CW1200_JOIN_STATUS_JOINING, + CW1200_JOIN_STATUS_PRE_STA, + CW1200_JOIN_STATUS_STA, + CW1200_JOIN_STATUS_IBSS, + CW1200_JOIN_STATUS_AP, +}; + +enum cw1200_link_status { + CW1200_LINK_OFF, + CW1200_LINK_RESERVE, + CW1200_LINK_SOFT, + CW1200_LINK_HARD, + CW1200_LINK_RESET, + CW1200_LINK_RESET_REMAP, +}; + +extern int cw1200_power_mode; +extern const char * const cw1200_fw_types[]; + +struct cw1200_link_entry { + unsigned long timestamp; + enum cw1200_link_status status; + enum cw1200_link_status prev_status; + u8 mac[ETH_ALEN]; + u8 buffered[CW1200_MAX_TID]; + struct sk_buff_head rx_queue; +}; + +struct cw1200_common { + /* interfaces to the rest of the stack */ + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + struct device *pdev; + + /* Statistics */ + struct ieee80211_low_level_stats stats; + + /* Our macaddr */ + u8 mac_addr[ETH_ALEN]; + + /* Hardware interface */ + const struct hwbus_ops *hwbus_ops; + struct hwbus_priv *hwbus_priv; + + /* Hardware information */ + enum { + HIF_9000_SILICON_VERSATILE = 0, + HIF_8601_VERSATILE, + HIF_8601_SILICON, + } hw_type; + enum { + CW1200_HW_REV_CUT10 = 10, + CW1200_HW_REV_CUT11 = 11, + CW1200_HW_REV_CUT20 = 20, + CW1200_HW_REV_CUT22 = 22, + CW1X60_HW_REV = 40, + } hw_revision; + int hw_refclk; + bool hw_have_5ghz; + const struct firmware *sdd; + char *sdd_path; + + struct cw1200_debug_priv *debug; + + struct workqueue_struct *workqueue; + struct mutex conf_mutex; + + struct cw1200_queue tx_queue[4]; + struct cw1200_queue_stats tx_queue_stats; + int tx_burst_idx; + + /* firmware/hardware info */ + unsigned int tx_hdr_len; + + /* Radio data */ + int output_power; + + /* BBP/MAC state */ + struct ieee80211_rate *rates; + struct ieee80211_rate *mcs_rates; + struct ieee80211_channel *channel; + struct wsm_edca_params edca; + struct wsm_tx_queue_params tx_queue_params; + struct wsm_mib_association_mode association_mode; + struct wsm_set_bss_params bss_params; + struct cw1200_ht_info ht_info; + struct wsm_set_pm powersave_mode; + struct wsm_set_pm firmware_ps_mode; + int cqm_rssi_thold; + unsigned cqm_rssi_hyst; + bool cqm_use_rssi; + int cqm_beacon_loss_count; + int channel_switch_in_progress; + wait_queue_head_t channel_switch_done; + u8 long_frame_max_tx_count; + u8 short_frame_max_tx_count; + int mode; + bool enable_beacon; + int beacon_int; + bool listening; + struct wsm_rx_filter rx_filter; + struct wsm_mib_multicast_filter multicast_filter; + bool has_multicast_subscription; + bool disable_beacon_filter; + struct work_struct update_filtering_work; + struct work_struct set_beacon_wakeup_period_work; + + u8 ba_rx_tid_mask; + u8 ba_tx_tid_mask; + + struct cw1200_pm_state pm_state; + + struct wsm_p2p_ps_modeinfo p2p_ps_modeinfo; + struct wsm_uapsd_info uapsd_info; + bool setbssparams_done; + bool bt_present; + u8 conf_listen_interval; + u32 listen_interval; + u32 erp_info; + u32 rts_threshold; + + /* BH */ + atomic_t bh_rx; + atomic_t bh_tx; + atomic_t bh_term; + atomic_t bh_suspend; + + struct workqueue_struct *bh_workqueue; + struct work_struct bh_work; + + int bh_error; + wait_queue_head_t bh_wq; + wait_queue_head_t bh_evt_wq; + u8 buf_id_tx; + u8 buf_id_rx; + u8 wsm_rx_seq; + u8 wsm_tx_seq; + int hw_bufs_used; + bool powersave_enabled; + bool device_can_sleep; + + /* Scan status */ + struct cw1200_scan scan; + /* Keep cw1200 awake (WUP = 1) 1 second after each scan to avoid + * FW issue with sleeping/waking up. */ + atomic_t recent_scan; + struct delayed_work clear_recent_scan_work; + + /* WSM */ + struct wsm_startup_ind wsm_caps; + struct mutex wsm_cmd_mux; + struct wsm_buf wsm_cmd_buf; + struct wsm_cmd wsm_cmd; + wait_queue_head_t wsm_cmd_wq; + wait_queue_head_t wsm_startup_done; + int firmware_ready; + atomic_t tx_lock; + + /* WSM debug */ + int wsm_enable_wsm_dumps; + + /* WSM Join */ + enum cw1200_join_status join_status; + u32 pending_frame_id; + bool join_pending; + struct delayed_work join_timeout; + struct work_struct unjoin_work; + struct work_struct join_complete_work; + int join_complete_status; + int join_dtim_period; + bool delayed_unjoin; + + /* TX/RX and security */ + s8 wep_default_key_id; + struct work_struct wep_key_work; + u32 key_map; + struct wsm_add_key keys[WSM_KEY_MAX_INDEX + 1]; + + /* AP powersave */ + u32 link_id_map; + struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE]; + struct work_struct link_id_work; + struct delayed_work link_id_gc_work; + u32 sta_asleep_mask; + u32 pspoll_mask; + bool aid0_bit_set; + spinlock_t ps_state_lock; /* Protect power save state */ + bool buffered_multicasts; + bool tx_multicast; + struct work_struct set_tim_work; + struct work_struct set_cts_work; + struct work_struct multicast_start_work; + struct work_struct multicast_stop_work; + struct timer_list mcast_timeout; + + /* WSM events and CQM implementation */ + spinlock_t event_queue_lock; /* Protect event queue */ + struct list_head event_queue; + struct work_struct event_handler; + + struct delayed_work bss_loss_work; + spinlock_t bss_loss_lock; /* Protect BSS loss state */ + int bss_loss_state; + int bss_loss_confirm_id; + int delayed_link_loss; + struct work_struct bss_params_work; + + /* TX rate policy cache */ + struct tx_policy_cache tx_policy_cache; + struct work_struct tx_policy_upload_work; + + /* legacy PS mode switch in suspend */ + int ps_mode_switch_in_progress; + wait_queue_head_t ps_mode_switch_done; + + /* Workaround for WFD testcase 6.1.10*/ + struct work_struct linkid_reset_work; + u8 action_frame_sa[ETH_ALEN]; + u8 action_linkid; + +#ifdef CONFIG_CW1200_ETF + struct sk_buff_head etf_q; +#endif +}; + +struct cw1200_sta_priv { + int link_id; +}; + +/* interfaces for the drivers */ +int cw1200_core_probe(const struct hwbus_ops *hwbus_ops, + struct hwbus_priv *hwbus, + struct device *pdev, + struct cw1200_common **pself, + int ref_clk, const u8 *macaddr, + const char *sdd_path, bool have_5ghz); +void cw1200_core_release(struct cw1200_common *self); + +#define FWLOAD_BLOCK_SIZE (1024) + +static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info) +{ + return ht_info->channel_type != NL80211_CHAN_NO_HT; +} + +static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info) +{ + return cw1200_is_ht(ht_info) && + (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ht_info->operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); +} + +static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info) +{ + if (!cw1200_is_ht(ht_info)) + return 0; + return ht_info->ht_cap.ampdu_density; +} + +#endif /* CW1200_H */ diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c new file mode 100644 index 000000000000..bb1f405315e4 --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -0,0 +1,424 @@ +/* + * Mac80211 SDIO driver for ST-Ericsson CW1200 device + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio.h> +#include <net/mac80211.h> + +#include "cw1200.h" +#include "hwbus.h" +#include <linux/platform_data/net-cw1200.h> +#include "hwio.h" + +MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>"); +MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver"); +MODULE_LICENSE("GPL"); + +#define SDIO_BLOCK_SIZE (512) + +/* Default platform data for Sagrad modules */ +static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_sagrad_1091_1098.bin", +}; + +/* Allow platform data to be overridden */ +static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data; + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata) +{ + global_plat_data = pdata; +} + +struct hwbus_priv { + struct sdio_func *func; + struct cw1200_common *core; + const struct cw1200_platform_data_sdio *pdata; +}; + +#ifndef SDIO_VENDOR_ID_STE +#define SDIO_VENDOR_ID_STE 0x0020 +#endif + +#ifndef SDIO_DEVICE_ID_STE_CW1200 +#define SDIO_DEVICE_ID_STE_CW1200 0x2280 +#endif + +static const struct sdio_device_id cw1200_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, + { /* end: all zeroes */ }, +}; + +/* hwbus_ops implemetation */ + +static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self, + unsigned int addr, + void *dst, int count) +{ + return sdio_memcpy_fromio(self->func, dst, addr, count); +} + +static int cw1200_sdio_memcpy_toio(struct hwbus_priv *self, + unsigned int addr, + const void *src, int count) +{ + return sdio_memcpy_toio(self->func, addr, (void *)src, count); +} + +static void cw1200_sdio_lock(struct hwbus_priv *self) +{ + sdio_claim_host(self->func); +} + +static void cw1200_sdio_unlock(struct hwbus_priv *self) +{ + sdio_release_host(self->func); +} + +static void cw1200_sdio_irq_handler(struct sdio_func *func) +{ + struct hwbus_priv *self = sdio_get_drvdata(func); + + /* note: sdio_host already claimed here. */ + if (self->core) + cw1200_irq_handler(self->core); +} + +static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id) +{ + return IRQ_WAKE_THREAD; +} + +static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id) +{ + struct hwbus_priv *self = dev_id; + + if (self->core) { + sdio_claim_host(self->func); + cw1200_irq_handler(self->core); + sdio_release_host(self->func); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int cw1200_request_irq(struct hwbus_priv *self) +{ + int ret; + u8 cccr; + + cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret); + if (WARN_ON(ret)) + goto err; + + /* Master interrupt enable ... */ + cccr |= BIT(0); + + /* ... for our function */ + cccr |= BIT(self->func->num); + + sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret); + if (WARN_ON(ret)) + goto err; + + ret = enable_irq_wake(self->pdata->irq); + if (WARN_ON(ret)) + goto err; + + /* Request the IRQ */ + ret = request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq, + cw1200_gpio_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "cw1200_wlan_irq", self); + if (WARN_ON(ret)) + goto err; + + return 0; + +err: + return ret; +} + +static int cw1200_sdio_irq_subscribe(struct hwbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ subscribe\n"); + sdio_claim_host(self->func); + if (self->pdata->irq) + ret = cw1200_request_irq(self); + else + ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler); + + sdio_release_host(self->func); + return ret; +} + +static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ unsubscribe\n"); + + if (self->pdata->irq) { + disable_irq_wake(self->pdata->irq); + free_irq(self->pdata->irq, self); + } else { + sdio_claim_host(self->func); + ret = sdio_release_irq(self->func); + sdio_release_host(self->func); + } + return ret; +} + +static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata) +{ + if (pdata->reset) { + gpio_set_value(pdata->reset, 0); + msleep(30); /* Min is 2 * CLK32K cycles */ + gpio_free(pdata->reset); + } + + if (pdata->power_ctrl) + pdata->power_ctrl(pdata, false); + if (pdata->clk_ctrl) + pdata->clk_ctrl(pdata, false); + + return 0; +} + +static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata) +{ + /* Ensure I/Os are pulled low */ + if (pdata->reset) { + gpio_request(pdata->reset, "cw1200_wlan_reset"); + gpio_direction_output(pdata->reset, 0); + } + if (pdata->powerup) { + gpio_request(pdata->powerup, "cw1200_wlan_powerup"); + gpio_direction_output(pdata->powerup, 0); + } + if (pdata->reset || pdata->powerup) + msleep(10); /* Settle time? */ + + /* Enable 3v3 and 1v8 to hardware */ + if (pdata->power_ctrl) { + if (pdata->power_ctrl(pdata, true)) { + pr_err("power_ctrl() failed!\n"); + return -1; + } + } + + /* Enable CLK32K */ + if (pdata->clk_ctrl) { + if (pdata->clk_ctrl(pdata, true)) { + pr_err("clk_ctrl() failed!\n"); + return -1; + } + msleep(10); /* Delay until clock is stable for 2 cycles */ + } + + /* Enable POWERUP signal */ + if (pdata->powerup) { + gpio_set_value(pdata->powerup, 1); + msleep(250); /* or more..? */ + } + /* Enable RSTn signal */ + if (pdata->reset) { + gpio_set_value(pdata->reset, 1); + msleep(50); /* Or more..? */ + } + return 0; +} + +static size_t cw1200_sdio_align_size(struct hwbus_priv *self, size_t size) +{ + if (self->pdata->no_nptb) + size = round_up(size, SDIO_BLOCK_SIZE); + else + size = sdio_align_size(self->func, size); + + return size; +} + +static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend) +{ + int ret = 0; + + if (self->pdata->irq) + ret = irq_set_irq_wake(self->pdata->irq, suspend); + return ret; +} + +static struct hwbus_ops cw1200_sdio_hwbus_ops = { + .hwbus_memcpy_fromio = cw1200_sdio_memcpy_fromio, + .hwbus_memcpy_toio = cw1200_sdio_memcpy_toio, + .lock = cw1200_sdio_lock, + .unlock = cw1200_sdio_unlock, + .align_size = cw1200_sdio_align_size, + .power_mgmt = cw1200_sdio_pm, +}; + +/* Probe Function to be called by SDIO stack when device is discovered */ +static int cw1200_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct hwbus_priv *self; + int status; + + pr_info("cw1200_wlan_sdio: Probe called\n"); + + /* We are only able to handle the wlan function */ + if (func->num != 0x01) + return -ENODEV; + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + pr_err("Can't allocate SDIO hwbus_priv.\n"); + return -ENOMEM; + } + + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + + self->pdata = global_plat_data; /* FIXME */ + self->func = func; + sdio_set_drvdata(func, self); + sdio_claim_host(func); + sdio_enable_func(func); + sdio_release_host(func); + + status = cw1200_sdio_irq_subscribe(self); + + status = cw1200_core_probe(&cw1200_sdio_hwbus_ops, + self, &func->dev, &self->core, + self->pdata->ref_clk, + self->pdata->macaddr, + self->pdata->sdd_file, + self->pdata->have_5ghz); + if (status) { + cw1200_sdio_irq_unsubscribe(self); + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + sdio_set_drvdata(func, NULL); + kfree(self); + } + + return status; +} + +/* Disconnect Function to be called by SDIO stack when + * device is disconnected */ +static void cw1200_sdio_disconnect(struct sdio_func *func) +{ + struct hwbus_priv *self = sdio_get_drvdata(func); + + if (self) { + cw1200_sdio_irq_unsubscribe(self); + if (self->core) { + cw1200_core_release(self->core); + self->core = NULL; + } + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + sdio_set_drvdata(func, NULL); + kfree(self); + } +} + +#ifdef CONFIG_PM +static int cw1200_sdio_suspend(struct device *dev) +{ + int ret; + struct sdio_func *func = dev_to_sdio_func(dev); + struct hwbus_priv *self = sdio_get_drvdata(func); + + if (!cw1200_can_suspend(self->core)) + return -EAGAIN; + + /* Notify SDIO that CW1200 will remain powered during suspend */ + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) + pr_err("Error setting SDIO pm flags: %i\n", ret); + + return ret; +} + +static int cw1200_sdio_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops cw1200_pm_ops = { + .suspend = cw1200_sdio_suspend, + .resume = cw1200_sdio_resume, +}; +#endif + +static struct sdio_driver sdio_driver = { + .name = "cw1200_wlan_sdio", + .id_table = cw1200_sdio_ids, + .probe = cw1200_sdio_probe, + .remove = cw1200_sdio_disconnect, +#ifdef CONFIG_PM + .drv = { + .pm = &cw1200_pm_ops, + } +#endif +}; + +/* Init Module function -> Called by insmod */ +static int __init cw1200_sdio_init(void) +{ + const struct cw1200_platform_data_sdio *pdata; + int ret; + + /* FIXME -- this won't support multiple devices */ + pdata = global_plat_data; + + if (cw1200_sdio_on(pdata)) { + ret = -1; + goto err; + } + + ret = sdio_register_driver(&sdio_driver); + if (ret) + goto err; + + return 0; + +err: + cw1200_sdio_off(pdata); + return ret; +} + +/* Called at Driver Unloading */ +static void __exit cw1200_sdio_exit(void) +{ + const struct cw1200_platform_data_sdio *pdata; + + /* FIXME -- this won't support multiple devices */ + pdata = global_plat_data; + sdio_unregister_driver(&sdio_driver); + cw1200_sdio_off(pdata); +} + + +module_init(cw1200_sdio_init); +module_exit(cw1200_sdio_exit); diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c new file mode 100644 index 000000000000..e58f0a5bafa9 --- /dev/null +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -0,0 +1,465 @@ +/* + * Mac80211 SPI driver for ST-Ericsson CW1200 device + * + * Copyright (c) 2011, Sagrad Inc. + * Author: Solomon Peachy <speachy@sagrad.com> + * + * Based on cw1200_sdio.c + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <net/mac80211.h> + +#include <linux/spi/spi.h> +#include <linux/device.h> + +#include "cw1200.h" +#include "hwbus.h" +#include <linux/platform_data/net-cw1200.h> +#include "hwio.h" + +MODULE_AUTHOR("Solomon Peachy <speachy@sagrad.com>"); +MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:cw1200_wlan_spi"); + +/* #define SPI_DEBUG */ + +struct hwbus_priv { + struct spi_device *func; + struct cw1200_common *core; + const struct cw1200_platform_data_spi *pdata; + spinlock_t lock; /* Serialize all bus operations */ + int claimed; +}; + +#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) +#define SET_WRITE 0x7FFF /* usage: and operation */ +#define SET_READ 0x8000 /* usage: or operation */ + +/* + Notes on byte ordering: + LE: B0 B1 B2 B3 + BE: B3 B2 B1 B0 + + Hardware expects 32-bit data to be written as 16-bit BE words: + + B1 B0 B3 B2 + +*/ + +static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self, + unsigned int addr, + void *dst, int count) +{ + int ret, i; + uint16_t regaddr; + struct spi_message m; + + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .rx_buf = dst, + .len = count, + }; + + regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; + regaddr |= SET_READ; + regaddr |= (count>>1); + regaddr = cpu_to_le16(regaddr); + +#ifdef SPI_DEBUG + pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, + le16_to_cpu(regaddr)); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + regaddr = swab16(regaddr); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(self->func, &m); + +#ifdef SPI_DEBUG + pr_info("READ : "); + for (i = 0; i < t_addr.len; i++) + printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); + printk(" : "); + for (i = 0; i < t_msg.len; i++) + printk("%02x ", ((u8 *)t_msg.rx_buf)[i]); + printk("\n"); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)dst; + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + + return ret; +} + +static int cw1200_spi_memcpy_toio(struct hwbus_priv *self, + unsigned int addr, + const void *src, int count) +{ + int rval, i; + uint16_t regaddr; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .tx_buf = src, + .len = count, + }; + struct spi_message m; + + regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; + regaddr &= SET_WRITE; + regaddr |= (count>>1); + regaddr = cpu_to_le16(regaddr); + +#ifdef SPI_DEBUG + pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, + le16_to_cpu(regaddr)); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)src; + regaddr = swab16(regaddr); + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + +#ifdef SPI_DEBUG + pr_info("WRITE: "); + for (i = 0; i < t_addr.len; i++) + printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); + printk(" : "); + for (i = 0; i < t_msg.len; i++) + printk("%02x ", ((u8 *)t_msg.tx_buf)[i]); + printk("\n"); +#endif + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + rval = spi_sync(self->func, &m); + +#ifdef SPI_DEBUG + pr_info("WROTE: %d\n", m.actual_length); +#endif + +#if defined(__LITTLE_ENDIAN) + /* We have to byteswap if the SPI bus is limited to 8b operation */ + if (self->func->bits_per_word == 8) +#endif + { + uint16_t *buf = (uint16_t *)src; + for (i = 0; i < ((count + 1) >> 1); i++) + buf[i] = swab16(buf[i]); + } + return rval; +} + +static void cw1200_spi_lock(struct hwbus_priv *self) +{ + unsigned long flags; + + might_sleep(); + + spin_lock_irqsave(&self->lock, flags); + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (!self->claimed) + break; + spin_unlock_irqrestore(&self->lock, flags); + schedule(); + spin_lock_irqsave(&self->lock, flags); + } + set_current_state(TASK_RUNNING); + self->claimed = 1; + spin_unlock_irqrestore(&self->lock, flags); + + return; +} + +static void cw1200_spi_unlock(struct hwbus_priv *self) +{ + unsigned long flags; + + spin_lock_irqsave(&self->lock, flags); + self->claimed = 0; + spin_unlock_irqrestore(&self->lock, flags); + return; +} + +static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id) +{ + struct hwbus_priv *self = dev_id; + + if (self->core) { + cw1200_irq_handler(self->core); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int cw1200_spi_irq_subscribe(struct hwbus_priv *self) +{ + int ret; + + pr_debug("SW IRQ subscribe\n"); + + ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, + IRQF_TRIGGER_HIGH, + "cw1200_wlan_irq", self); + if (WARN_ON(ret < 0)) + goto exit; + + ret = enable_irq_wake(self->func->irq); + if (WARN_ON(ret)) + goto free_irq; + + return 0; + +free_irq: + free_irq(self->func->irq, self); +exit: + return ret; +} + +static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) +{ + int ret = 0; + + pr_debug("SW IRQ unsubscribe\n"); + disable_irq_wake(self->func->irq); + free_irq(self->func->irq, self); + + return ret; +} + +static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) +{ + if (pdata->reset) { + gpio_set_value(pdata->reset, 0); + msleep(30); /* Min is 2 * CLK32K cycles */ + gpio_free(pdata->reset); + } + + if (pdata->power_ctrl) + pdata->power_ctrl(pdata, false); + if (pdata->clk_ctrl) + pdata->clk_ctrl(pdata, false); + + return 0; +} + +static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata) +{ + /* Ensure I/Os are pulled low */ + if (pdata->reset) { + gpio_request(pdata->reset, "cw1200_wlan_reset"); + gpio_direction_output(pdata->reset, 0); + } + if (pdata->powerup) { + gpio_request(pdata->powerup, "cw1200_wlan_powerup"); + gpio_direction_output(pdata->powerup, 0); + } + if (pdata->reset || pdata->powerup) + msleep(10); /* Settle time? */ + + /* Enable 3v3 and 1v8 to hardware */ + if (pdata->power_ctrl) { + if (pdata->power_ctrl(pdata, true)) { + pr_err("power_ctrl() failed!\n"); + return -1; + } + } + + /* Enable CLK32K */ + if (pdata->clk_ctrl) { + if (pdata->clk_ctrl(pdata, true)) { + pr_err("clk_ctrl() failed!\n"); + return -1; + } + msleep(10); /* Delay until clock is stable for 2 cycles */ + } + + /* Enable POWERUP signal */ + if (pdata->powerup) { + gpio_set_value(pdata->powerup, 1); + msleep(250); /* or more..? */ + } + /* Enable RSTn signal */ + if (pdata->reset) { + gpio_set_value(pdata->reset, 1); + msleep(50); /* Or more..? */ + } + return 0; +} + +static size_t cw1200_spi_align_size(struct hwbus_priv *self, size_t size) +{ + return size & 1 ? size + 1 : size; +} + +static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend) +{ + return irq_set_irq_wake(self->func->irq, suspend); +} + +static struct hwbus_ops cw1200_spi_hwbus_ops = { + .hwbus_memcpy_fromio = cw1200_spi_memcpy_fromio, + .hwbus_memcpy_toio = cw1200_spi_memcpy_toio, + .lock = cw1200_spi_lock, + .unlock = cw1200_spi_unlock, + .align_size = cw1200_spi_align_size, + .power_mgmt = cw1200_spi_pm, +}; + +/* Probe Function to be called by SPI stack when device is discovered */ +static int cw1200_spi_probe(struct spi_device *func) +{ + const struct cw1200_platform_data_spi *plat_data = + func->dev.platform_data; + struct hwbus_priv *self; + int status; + + /* Sanity check speed */ + if (func->max_speed_hz > 52000000) + func->max_speed_hz = 52000000; + if (func->max_speed_hz < 1000000) + func->max_speed_hz = 1000000; + + /* Fix up transfer size */ + if (plat_data->spi_bits_per_word) + func->bits_per_word = plat_data->spi_bits_per_word; + if (!func->bits_per_word) + func->bits_per_word = 16; + + /* And finally.. */ + func->mode = SPI_MODE_0; + + pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n", + func->chip_select, func->mode, func->bits_per_word, + func->max_speed_hz); + + if (cw1200_spi_on(plat_data)) { + pr_err("spi_on() failed!\n"); + return -1; + } + + if (spi_setup(func)) { + pr_err("spi_setup() failed!\n"); + return -1; + } + + self = kzalloc(sizeof(*self), GFP_KERNEL); + if (!self) { + pr_err("Can't allocate SPI hwbus_priv."); + return -ENOMEM; + } + + self->pdata = plat_data; + self->func = func; + spin_lock_init(&self->lock); + + spi_set_drvdata(func, self); + + status = cw1200_spi_irq_subscribe(self); + + status = cw1200_core_probe(&cw1200_spi_hwbus_ops, + self, &func->dev, &self->core, + self->pdata->ref_clk, + self->pdata->macaddr, + self->pdata->sdd_file, + self->pdata->have_5ghz); + + if (status) { + cw1200_spi_irq_unsubscribe(self); + cw1200_spi_off(plat_data); + kfree(self); + } + + return status; +} + +/* Disconnect Function to be called by SPI stack when device is disconnected */ +static int cw1200_spi_disconnect(struct spi_device *func) +{ + struct hwbus_priv *self = spi_get_drvdata(func); + + if (self) { + cw1200_spi_irq_unsubscribe(self); + if (self->core) { + cw1200_core_release(self->core); + self->core = NULL; + } + kfree(self); + } + cw1200_spi_off(func->dev.platform_data); + + return 0; +} + +#ifdef CONFIG_PM +static int cw1200_spi_suspend(struct device *dev, pm_message_t state) +{ + struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); + + if (!cw1200_can_suspend(self->core)) + return -EAGAIN; + + /* XXX notify host that we have to keep CW1200 powered on? */ + return 0; +} + +static int cw1200_spi_resume(struct device *dev) +{ + return 0; +} +#endif + +static struct spi_driver spi_driver = { + .probe = cw1200_spi_probe, + .remove = cw1200_spi_disconnect, + .driver = { + .name = "cw1200_wlan_spi", + .bus = &spi_bus_type, + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .suspend = cw1200_spi_suspend, + .resume = cw1200_spi_resume, +#endif + }, +}; + +module_spi_driver(spi_driver); diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c new file mode 100644 index 000000000000..eb40c9c61a51 --- /dev/null +++ b/drivers/net/wireless/cw1200/debug.c @@ -0,0 +1,658 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * DebugFS code + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include "cw1200.h" +#include "debug.h" +#include "fwio.h" + +/* join_status */ +static const char * const cw1200_debug_join_status[] = { + "passive", + "monitor", + "station (joining)", + "station (not authenticated yet)", + "station", + "adhoc", + "access point", +}; + +/* WSM_JOIN_PREAMBLE_... */ +static const char * const cw1200_debug_preamble[] = { + "long", + "short", + "long on 1 and 2 Mbps", +}; + + +static const char * const cw1200_debug_link_id[] = { + "OFF", + "REQ", + "SOFT", + "HARD", +}; + +static const char *cw1200_debug_mode(int mode) +{ + switch (mode) { + case NL80211_IFTYPE_UNSPECIFIED: + return "unspecified"; + case NL80211_IFTYPE_MONITOR: + return "monitor"; + case NL80211_IFTYPE_STATION: + return "station"; + case NL80211_IFTYPE_ADHOC: + return "adhoc"; + case NL80211_IFTYPE_MESH_POINT: + return "mesh point"; + case NL80211_IFTYPE_AP: + return "access point"; + case NL80211_IFTYPE_P2P_CLIENT: + return "p2p client"; + case NL80211_IFTYPE_P2P_GO: + return "p2p go"; + default: + return "unsupported"; + } +} + +static void cw1200_queue_status_show(struct seq_file *seq, + struct cw1200_queue *q) +{ + int i; + seq_printf(seq, "Queue %d:\n", q->queue_id); + seq_printf(seq, " capacity: %zu\n", q->capacity); + seq_printf(seq, " queued: %zu\n", q->num_queued); + seq_printf(seq, " pending: %zu\n", q->num_pending); + seq_printf(seq, " sent: %zu\n", q->num_sent); + seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no"); + seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no"); + seq_puts(seq, " link map: 0-> "); + for (i = 0; i < q->stats->map_capacity; ++i) + seq_printf(seq, "%.2d ", q->link_map_cache[i]); + seq_printf(seq, "<-%zu\n", q->stats->map_capacity); +} + +static void cw1200_debug_print_map(struct seq_file *seq, + struct cw1200_common *priv, + const char *label, + u32 map) +{ + int i; + seq_printf(seq, "%s0-> ", label); + for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i) + seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : ".."); + seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1); +} + +static int cw1200_status_show(struct seq_file *seq, void *v) +{ + int i; + struct list_head *item; + struct cw1200_common *priv = seq->private; + struct cw1200_debug_priv *d = priv->debug; + + seq_puts(seq, "CW1200 Wireless LAN driver status\n"); + seq_printf(seq, "Hardware: %d.%d\n", + priv->wsm_caps.hw_id, + priv->wsm_caps.hw_subid); + seq_printf(seq, "Firmware: %s %d.%d\n", + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build); + seq_printf(seq, "FW API: %d\n", + priv->wsm_caps.fw_api); + seq_printf(seq, "FW caps: 0x%.4X\n", + priv->wsm_caps.fw_cap); + seq_printf(seq, "FW label: '%s'\n", + priv->wsm_caps.fw_label); + seq_printf(seq, "Mode: %s%s\n", + cw1200_debug_mode(priv->mode), + priv->listening ? " (listening)" : ""); + seq_printf(seq, "Join state: %s\n", + cw1200_debug_join_status[priv->join_status]); + if (priv->channel) + seq_printf(seq, "Channel: %d%s\n", + priv->channel->hw_value, + priv->channel_switch_in_progress ? + " (switching)" : ""); + if (priv->rx_filter.promiscuous) + seq_puts(seq, "Filter: promisc\n"); + else if (priv->rx_filter.fcs) + seq_puts(seq, "Filter: fcs\n"); + if (priv->rx_filter.bssid) + seq_puts(seq, "Filter: bssid\n"); + if (!priv->disable_beacon_filter) + seq_puts(seq, "Filter: beacons\n"); + + if (priv->enable_beacon || + priv->mode == NL80211_IFTYPE_AP || + priv->mode == NL80211_IFTYPE_ADHOC || + priv->mode == NL80211_IFTYPE_MESH_POINT || + priv->mode == NL80211_IFTYPE_P2P_GO) + seq_printf(seq, "Beaconing: %s\n", + priv->enable_beacon ? + "enabled" : "disabled"); + + for (i = 0; i < 4; ++i) + seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i, + priv->edca.params[i].cwmin, + priv->edca.params[i].cwmax, + priv->edca.params[i].aifns, + priv->edca.params[i].txop_limit, + priv->edca.params[i].max_rx_lifetime); + + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + static const char *pm_mode = "unknown"; + switch (priv->powersave_mode.mode) { + case WSM_PSM_ACTIVE: + pm_mode = "off"; + break; + case WSM_PSM_PS: + pm_mode = "on"; + break; + case WSM_PSM_FAST_PS: + pm_mode = "dynamic"; + break; + } + seq_printf(seq, "Preamble: %s\n", + cw1200_debug_preamble[priv->association_mode.preamble]); + seq_printf(seq, "AMPDU spcn: %d\n", + priv->association_mode.mpdu_start_spacing); + seq_printf(seq, "Basic rate: 0x%.8X\n", + le32_to_cpu(priv->association_mode.basic_rate_set)); + seq_printf(seq, "Bss lost: %d beacons\n", + priv->bss_params.beacon_lost_count); + seq_printf(seq, "AID: %d\n", + priv->bss_params.aid); + seq_printf(seq, "Rates: 0x%.8X\n", + priv->bss_params.operational_rate_set); + seq_printf(seq, "Powersave: %s\n", pm_mode); + } + seq_printf(seq, "HT: %s\n", + cw1200_is_ht(&priv->ht_info) ? "on" : "off"); + if (cw1200_is_ht(&priv->ht_info)) { + seq_printf(seq, "Greenfield: %s\n", + cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no"); + seq_printf(seq, "AMPDU dens: %d\n", + cw1200_ht_ampdu_density(&priv->ht_info)); + } + seq_printf(seq, "RSSI thold: %d\n", + priv->cqm_rssi_thold); + seq_printf(seq, "RSSI hyst: %d\n", + priv->cqm_rssi_hyst); + seq_printf(seq, "Long retr: %d\n", + priv->long_frame_max_tx_count); + seq_printf(seq, "Short retr: %d\n", + priv->short_frame_max_tx_count); + spin_lock_bh(&priv->tx_policy_cache.lock); + i = 0; + list_for_each(item, &priv->tx_policy_cache.used) + ++i; + spin_unlock_bh(&priv->tx_policy_cache.lock); + seq_printf(seq, "RC in use: %d\n", i); + + seq_puts(seq, "\n"); + for (i = 0; i < 4; ++i) { + cw1200_queue_status_show(seq, &priv->tx_queue[i]); + seq_puts(seq, "\n"); + } + + cw1200_debug_print_map(seq, priv, "Link map: ", + priv->link_id_map); + cw1200_debug_print_map(seq, priv, "Asleep map: ", + priv->sta_asleep_mask); + cw1200_debug_print_map(seq, priv, "PSPOLL map: ", + priv->pspoll_mask); + + seq_puts(seq, "\n"); + + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (priv->link_id_db[i].status) { + seq_printf(seq, "Link %d: %s, %pM\n", + i + 1, + cw1200_debug_link_id[priv->link_id_db[i].status], + priv->link_id_db[i].mac); + } + } + + seq_puts(seq, "\n"); + + seq_printf(seq, "BH status: %s\n", + atomic_read(&priv->bh_term) ? "terminated" : "alive"); + seq_printf(seq, "Pending RX: %d\n", + atomic_read(&priv->bh_rx)); + seq_printf(seq, "Pending TX: %d\n", + atomic_read(&priv->bh_tx)); + if (priv->bh_error) + seq_printf(seq, "BH errcode: %d\n", + priv->bh_error); + seq_printf(seq, "TX bufs: %d x %d bytes\n", + priv->wsm_caps.input_buffers, + priv->wsm_caps.input_buffer_size); + seq_printf(seq, "Used bufs: %d\n", + priv->hw_bufs_used); + seq_printf(seq, "Powermgmt: %s\n", + priv->powersave_enabled ? "on" : "off"); + seq_printf(seq, "Device: %s\n", + priv->device_can_sleep ? "asleep" : "awake"); + + spin_lock(&priv->wsm_cmd.lock); + seq_printf(seq, "WSM status: %s\n", + priv->wsm_cmd.done ? "idle" : "active"); + seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n", + priv->wsm_cmd.cmd, priv->wsm_cmd.len); + seq_printf(seq, "WSM retval: %d\n", + priv->wsm_cmd.ret); + spin_unlock(&priv->wsm_cmd.lock); + + seq_printf(seq, "Datapath: %s\n", + atomic_read(&priv->tx_lock) ? "locked" : "unlocked"); + if (atomic_read(&priv->tx_lock)) + seq_printf(seq, "TXlock cnt: %d\n", + atomic_read(&priv->tx_lock)); + + seq_printf(seq, "TXed: %d\n", + d->tx); + seq_printf(seq, "AGG TXed: %d\n", + d->tx_agg); + seq_printf(seq, "MULTI TXed: %d (%d)\n", + d->tx_multi, d->tx_multi_frames); + seq_printf(seq, "RXed: %d\n", + d->rx); + seq_printf(seq, "AGG RXed: %d\n", + d->rx_agg); + seq_printf(seq, "TX miss: %d\n", + d->tx_cache_miss); + seq_printf(seq, "TX align: %d\n", + d->tx_align); + seq_printf(seq, "TX burst: %d\n", + d->tx_burst); + seq_printf(seq, "TX TTL: %d\n", + d->tx_ttl); + seq_printf(seq, "Scan: %s\n", + atomic_read(&priv->scan.in_progress) ? "active" : "idle"); + + return 0; +} + +static int cw1200_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_status_show, + inode->i_private); +} + +static const struct file_operations fops_status = { + .open = cw1200_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int cw1200_counters_show(struct seq_file *seq, void *v) +{ + int ret; + struct cw1200_common *priv = seq->private; + struct wsm_mib_counters_table counters; + + ret = wsm_get_counters_table(priv, &counters); + if (ret) + return ret; + +#define PUT_COUNTER(tab, name) \ + seq_printf(seq, "%s:" tab "%d\n", #name, \ + __le32_to_cpu(counters.name)) + + PUT_COUNTER("\t\t", plcp_errors); + PUT_COUNTER("\t\t", fcs_errors); + PUT_COUNTER("\t\t", tx_packets); + PUT_COUNTER("\t\t", rx_packets); + PUT_COUNTER("\t\t", rx_packet_errors); + PUT_COUNTER("\t", rx_decryption_failures); + PUT_COUNTER("\t\t", rx_mic_failures); + PUT_COUNTER("\t", rx_no_key_failures); + PUT_COUNTER("\t", tx_multicast_frames); + PUT_COUNTER("\t", tx_frames_success); + PUT_COUNTER("\t", tx_frame_failures); + PUT_COUNTER("\t", tx_frames_retried); + PUT_COUNTER("\t", tx_frames_multi_retried); + PUT_COUNTER("\t", rx_frame_duplicates); + PUT_COUNTER("\t\t", rts_success); + PUT_COUNTER("\t\t", rts_failures); + PUT_COUNTER("\t\t", ack_failures); + PUT_COUNTER("\t", rx_multicast_frames); + PUT_COUNTER("\t", rx_frames_success); + PUT_COUNTER("\t", rx_cmac_icv_errors); + PUT_COUNTER("\t\t", rx_cmac_replays); + PUT_COUNTER("\t", rx_mgmt_ccmp_replays); + +#undef PUT_COUNTER + + return 0; +} + +static int cw1200_counters_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_counters_show, + inode->i_private); +} + +static const struct file_operations fops_counters = { + .open = cw1200_counters_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +#ifdef CONFIG_CW1200_ETF +static int cw1200_etf_out_show(struct seq_file *seq, void *v) +{ + struct cw1200_common *priv = seq->private; + struct sk_buff *skb; + u32 len = 0; + + skb = skb_dequeue(&priv->etf_q); + + if (skb) + len = skb->len; + + seq_write(seq, &len, sizeof(len)); + + if (skb) { + seq_write(seq, skb->data, len); + kfree_skb(skb); + } + + return 0; +} + +static int cw1200_etf_out_open(struct inode *inode, struct file *file) +{ + return single_open(file, &cw1200_etf_out_show, + inode->i_private); +} + +static const struct file_operations fops_etf_out = { + .open = cw1200_etf_out_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +struct etf_req_msg; +static int etf_request(struct cw1200_common *priv, + struct etf_req_msg *msg, u32 len); + +#define MAX_RX_SZE 2600 + +struct etf_in_state { + struct cw1200_common *priv; + u32 total_len; + u8 buf[MAX_RX_SZE]; + u32 written; +}; + +static int cw1200_etf_in_open(struct inode *inode, struct file *file) +{ + struct etf_in_state *etf = kmalloc(sizeof(struct etf_in_state), + GFP_KERNEL); + + if (!etf) + return -ENOMEM; + + etf->written = 0; + etf->total_len = 0; + etf->priv = inode->i_private; + + file->private_data = etf; + + return 0; +} + +static int cw1200_etf_in_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static ssize_t cw1200_etf_in_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct etf_in_state *etf = file->private_data; + + ssize_t written = 0; + + if (!etf->total_len) { + if (count < sizeof(etf->total_len)) { + pr_err("count < sizeof(total_len)\n"); + return -EINVAL; + } + + if (copy_from_user(&etf->total_len, user_buf, + sizeof(etf->total_len))) { + pr_err("copy_from_user (len) failed\n"); + return -EFAULT; + } + + written += sizeof(etf->total_len); + count -= sizeof(etf->total_len); + } + + if (!count) + goto done; + + if (copy_from_user(etf->buf + etf->written, user_buf + written, + count)) { + pr_err("copy_from_user (payload %zu) failed\n", count); + return -EFAULT; + } + + written += count; + etf->written += count; + + if (etf->written >= etf->total_len) { + if (etf_request(etf->priv, (struct etf_req_msg *)etf->buf, + etf->total_len)) { + pr_err("etf_request failed\n"); + return -EIO; + } + } + +done: + return written; +} + +static const struct file_operations fops_etf_in = { + .open = cw1200_etf_in_open, + .release = cw1200_etf_in_release, + .write = cw1200_etf_in_write, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; +#endif /* CONFIG_CW1200_ETF */ + +static ssize_t cw1200_wsm_dumps(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + char buf[1]; + + if (!count) + return -EINVAL; + if (copy_from_user(buf, user_buf, 1)) + return -EFAULT; + + if (buf[0] == '1') + priv->wsm_enable_wsm_dumps = 1; + else + priv->wsm_enable_wsm_dumps = 0; + + return count; +} + +static const struct file_operations fops_wsm_dumps = { + .open = simple_open, + .write = cw1200_wsm_dumps, + .llseek = default_llseek, +}; + +int cw1200_debug_init(struct cw1200_common *priv) +{ + int ret = -ENOMEM; + struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv), + GFP_KERNEL); + priv->debug = d; + if (!d) + return ret; + + d->debugfs_phy = debugfs_create_dir("cw1200", + priv->hw->wiphy->debugfsdir); + if (!d->debugfs_phy) + goto err; + + if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy, + priv, &fops_status)) + goto err; + + if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy, + priv, &fops_counters)) + goto err; + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) { + skb_queue_head_init(&priv->etf_q); + + if (!debugfs_create_file("etf_out", S_IRUSR, d->debugfs_phy, + priv, &fops_etf_out)) + goto err; + if (!debugfs_create_file("etf_in", S_IWUSR, d->debugfs_phy, + priv, &fops_etf_in)) + goto err; + } +#endif /* CONFIG_CW1200_ETF */ + + if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy, + priv, &fops_wsm_dumps)) + goto err; + + ret = cw1200_itp_init(priv); + if (ret) + goto err; + + return 0; + +err: + priv->debug = NULL; + debugfs_remove_recursive(d->debugfs_phy); + kfree(d); + return ret; +} + +void cw1200_debug_release(struct cw1200_common *priv) +{ + struct cw1200_debug_priv *d = priv->debug; + if (d) { + cw1200_itp_release(priv); + priv->debug = NULL; + kfree(d); + } +} + +#ifdef CONFIG_CW1200_ETF +struct cw1200_sdd { + u8 id; + u8 len; + u8 data[]; +}; + +struct etf_req_msg { + u32 id; + u32 len; + u8 data[]; +}; + +static int parse_sdd_file(struct cw1200_common *priv, u8 *data, u32 length) +{ + struct cw1200_sdd *ie; + + while (length > 0) { + ie = (struct cw1200_sdd *)data; + if (ie->id == SDD_REFERENCE_FREQUENCY_ELT_ID) { + priv->hw_refclk = cpu_to_le16(*((u16 *)ie->data)); + pr_info("Using Reference clock frequency %d KHz\n", + priv->hw_refclk); + break; + } + + length -= ie->len + sizeof(*ie); + data += ie->len + sizeof(*ie); + } + return 0; +} + +char *etf_firmware; + +#define ST90TDS_START_ADAPTER 0x09 /* Loads firmware too */ +#define ST90TDS_STOP_ADAPTER 0x0A +#define ST90TDS_CONFIG_ADAPTER 0x0E /* Send configuration params */ +#define ST90TDS_SBUS_READ 0x13 +#define ST90TDS_SBUS_WRITE 0x14 +#define ST90TDS_GET_DEVICE_OPTION 0x19 +#define ST90TDS_SET_DEVICE_OPTION 0x1A +#define ST90TDS_SEND_SDD 0x1D /* SDD File used to find DPLL */ + +#include "fwio.h" + +static int etf_request(struct cw1200_common *priv, + struct etf_req_msg *msg, + u32 len) +{ + int rval = -1; + switch (msg->id) { + case ST90TDS_START_ADAPTER: + etf_firmware = "cw1200_etf.bin"; + pr_info("ETF_START (len %d, '%s')\n", len, etf_firmware); + rval = cw1200_load_firmware(priv); + break; + case ST90TDS_STOP_ADAPTER: + pr_info("ETF_STOP (unhandled)\n"); + break; + case ST90TDS_SEND_SDD: + pr_info("ETF_SDD\n"); + rval = parse_sdd_file(priv, msg->data, msg->len); + break; + case ST90TDS_CONFIG_ADAPTER: + pr_info("ETF_CONFIG_ADAP (unhandled)\n"); + break; + case ST90TDS_SBUS_READ: + pr_info("ETF_SBUS_READ (unhandled)\n"); + break; + case ST90TDS_SBUS_WRITE: + pr_info("ETF_SBUS_WRITE (unhandled)\n"); + break; + case ST90TDS_SET_DEVICE_OPTION: + pr_info("ETF_SET_DEV_OPT (unhandled)\n"); + break; + default: + pr_info("ETF_PASSTHRU (0x%08x)\n", msg->id); + rval = wsm_raw_cmd(priv, (u8 *)msg, len); + break; + } + + return rval; +} +#endif /* CONFIG_CW1200_ETF */ diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/cw1200/debug.h new file mode 100644 index 000000000000..1fea5b29a819 --- /dev/null +++ b/drivers/net/wireless/cw1200/debug.h @@ -0,0 +1,98 @@ +/* + * DebugFS code for ST-Ericsson CW1200 mac80211 driver + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_DEBUG_H_INCLUDED +#define CW1200_DEBUG_H_INCLUDED + +#include "itp.h" + +struct cw1200_debug_priv { + struct dentry *debugfs_phy; + int tx; + int tx_agg; + int rx; + int rx_agg; + int tx_multi; + int tx_multi_frames; + int tx_cache_miss; + int tx_align; + int tx_ttl; + int tx_burst; + int ba_cnt; + int ba_acc; + int ba_cnt_rx; + int ba_acc_rx; +#ifdef CONFIG_CW1200_ITP + struct cw1200_itp itp; +#endif /* CONFIG_CW1200_ITP */ +}; + +int cw1200_debug_init(struct cw1200_common *priv); +void cw1200_debug_release(struct cw1200_common *priv); + +static inline void cw1200_debug_txed(struct cw1200_common *priv) +{ + ++priv->debug->tx; +} + +static inline void cw1200_debug_txed_agg(struct cw1200_common *priv) +{ + ++priv->debug->tx_agg; +} + +static inline void cw1200_debug_txed_multi(struct cw1200_common *priv, + int count) +{ + ++priv->debug->tx_multi; + priv->debug->tx_multi_frames += count; +} + +static inline void cw1200_debug_rxed(struct cw1200_common *priv) +{ + ++priv->debug->rx; +} + +static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv) +{ + ++priv->debug->rx_agg; +} + +static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv) +{ + ++priv->debug->tx_cache_miss; +} + +static inline void cw1200_debug_tx_align(struct cw1200_common *priv) +{ + ++priv->debug->tx_align; +} + +static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv) +{ + ++priv->debug->tx_ttl; +} + +static inline void cw1200_debug_tx_burst(struct cw1200_common *priv) +{ + ++priv->debug->tx_burst; +} + +static inline void cw1200_debug_ba(struct cw1200_common *priv, + int ba_cnt, int ba_acc, + int ba_cnt_rx, int ba_acc_rx) +{ + priv->debug->ba_cnt = ba_cnt; + priv->debug->ba_acc = ba_acc; + priv->debug->ba_cnt_rx = ba_cnt_rx; + priv->debug->ba_acc_rx = ba_acc_rx; +} + +#endif /* CW1200_DEBUG_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c new file mode 100644 index 000000000000..427c9f24b94e --- /dev/null +++ b/drivers/net/wireless/cw1200/fwio.c @@ -0,0 +1,525 @@ +/* + * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <linux/sched.h> +#include <linux/firmware.h> + +#include "cw1200.h" +#include "fwio.h" +#include "hwio.h" +#include "hwbus.h" +#include "bh.h" + +static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision) +{ + int hw_type = -1; + u32 silicon_type = (config_reg_val >> 24) & 0x7; + u32 silicon_vers = (config_reg_val >> 31) & 0x1; + + switch (silicon_type) { + case 0x00: + *major_revision = 1; + hw_type = HIF_9000_SILICON_VERSATILE; + break; + case 0x01: + case 0x02: /* CW1x00 */ + case 0x04: /* CW1x60 */ + *major_revision = silicon_type; + if (silicon_vers) + hw_type = HIF_8601_VERSATILE; + else + hw_type = HIF_8601_SILICON; + break; + default: + break; + } + + return hw_type; +} + +static int cw1200_load_firmware_cw1200(struct cw1200_common *priv) +{ + int ret, block, num_blocks; + unsigned i; + u32 val32; + u32 put = 0, get = 0; + u8 *buf = NULL; + const char *fw_path; + const struct firmware *firmware = NULL; + + /* Macroses are local. */ +#define APB_WRITE(reg, val) \ + do { \ + ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define APB_READ(reg, val) \ + do { \ + ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define REG_WRITE(reg, val) \ + do { \ + ret = cw1200_reg_write_32(priv, (reg), (val)); \ + if (ret < 0) \ + goto error; \ + } while (0) +#define REG_READ(reg, val) \ + do { \ + ret = cw1200_reg_read_32(priv, (reg), &(val)); \ + if (ret < 0) \ + goto error; \ + } while (0) + + switch (priv->hw_revision) { + case CW1200_HW_REV_CUT10: + fw_path = FIRMWARE_CUT10; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_10; + break; + case CW1200_HW_REV_CUT11: + fw_path = FIRMWARE_CUT11; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_11; + break; + case CW1200_HW_REV_CUT20: + fw_path = FIRMWARE_CUT20; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_20; + break; + case CW1200_HW_REV_CUT22: + fw_path = FIRMWARE_CUT22; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_22; + break; + case CW1X60_HW_REV: + fw_path = FIRMWARE_CW1X60; + if (!priv->sdd_path) + priv->sdd_path = SDD_FILE_CW1X60; + break; + default: + pr_err("Invalid silicon revision %d.\n", priv->hw_revision); + return -EINVAL; + } + + /* Initialize common registers */ + APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE); + APB_WRITE(DOWNLOAD_PUT_REG, 0); + APB_WRITE(DOWNLOAD_GET_REG, 0); + APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING); + APB_WRITE(DOWNLOAD_FLAGS_REG, 0); + + /* Write the NOP Instruction */ + REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000); + REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE); + + /* Release CPU from RESET */ + REG_READ(ST90TDS_CONFIG_REG_ID, val32); + val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT; + REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); + + /* Enable Clock */ + val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT; + REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) + fw_path = etf_firmware; +#endif + + /* Load a firmware file */ + ret = request_firmware(&firmware, fw_path, priv->pdev); + if (ret) { + pr_err("Can't load firmware file %s.\n", fw_path); + goto error; + } + + buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); + if (!buf) { + pr_err("Can't allocate firmware load buffer.\n"); + ret = -ENOMEM; + goto error; + } + + /* Check if the bootloader is ready */ + for (i = 0; i < 100; i += 1 + i / 2) { + APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32); + if (val32 == DOWNLOAD_I_AM_HERE) + break; + mdelay(i); + } /* End of for loop */ + + if (val32 != DOWNLOAD_I_AM_HERE) { + pr_err("Bootloader is not ready.\n"); + ret = -ETIMEDOUT; + goto error; + } + + /* Calculcate number of download blocks */ + num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1; + + /* Updating the length in Download Ctrl Area */ + val32 = firmware->size; /* Explicit cast from size_t to u32 */ + APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32); + + /* Firmware downloading loop */ + for (block = 0; block < num_blocks; block++) { + size_t tx_size; + size_t block_size; + + /* check the download status */ + APB_READ(DOWNLOAD_STATUS_REG, val32); + if (val32 != DOWNLOAD_PENDING) { + pr_err("Bootloader reported error %d.\n", val32); + ret = -EIO; + goto error; + } + + /* loop until put - get <= 24K */ + for (i = 0; i < 100; i++) { + APB_READ(DOWNLOAD_GET_REG, get); + if ((put - get) <= + (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) + break; + mdelay(i); + } + + if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) { + pr_err("Timeout waiting for FIFO.\n"); + ret = -ETIMEDOUT; + goto error; + } + + /* calculate the block size */ + tx_size = block_size = min((size_t)(firmware->size - put), + (size_t)DOWNLOAD_BLOCK_SIZE); + + memcpy(buf, &firmware->data[put], block_size); + if (block_size < DOWNLOAD_BLOCK_SIZE) { + memset(&buf[block_size], 0, + DOWNLOAD_BLOCK_SIZE - block_size); + tx_size = DOWNLOAD_BLOCK_SIZE; + } + + /* send the block to sram */ + ret = cw1200_apb_write(priv, + CW1200_APB(DOWNLOAD_FIFO_OFFSET + + (put & (DOWNLOAD_FIFO_SIZE - 1))), + buf, tx_size); + if (ret < 0) { + pr_err("Can't write firmware block @ %d!\n", + put & (DOWNLOAD_FIFO_SIZE - 1)); + goto error; + } + + /* update the put register */ + put += block_size; + APB_WRITE(DOWNLOAD_PUT_REG, put); + } /* End of firmware download loop */ + + /* Wait for the download completion */ + for (i = 0; i < 300; i += 1 + i / 2) { + APB_READ(DOWNLOAD_STATUS_REG, val32); + if (val32 != DOWNLOAD_PENDING) + break; + mdelay(i); + } + if (val32 != DOWNLOAD_SUCCESS) { + pr_err("Wait for download completion failed: 0x%.8X\n", val32); + ret = -ETIMEDOUT; + goto error; + } else { + pr_info("Firmware download completed.\n"); + ret = 0; + } + +error: + kfree(buf); + if (firmware) + release_firmware(firmware); + return ret; + +#undef APB_WRITE +#undef APB_READ +#undef REG_WRITE +#undef REG_READ +} + + +static int config_reg_read(struct cw1200_common *priv, u32 *val) +{ + switch (priv->hw_type) { + case HIF_9000_SILICON_VERSATILE: { + u16 val16; + int ret = cw1200_reg_read_16(priv, + ST90TDS_CONFIG_REG_ID, + &val16); + if (ret < 0) + return ret; + *val = val16; + return 0; + } + case HIF_8601_VERSATILE: + case HIF_8601_SILICON: + default: + cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val); + break; + } + return 0; +} + +static int config_reg_write(struct cw1200_common *priv, u32 val) +{ + switch (priv->hw_type) { + case HIF_9000_SILICON_VERSATILE: + return cw1200_reg_write_16(priv, + ST90TDS_CONFIG_REG_ID, + (u16)val); + case HIF_8601_VERSATILE: + case HIF_8601_SILICON: + default: + return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val); + break; + } + return 0; +} + +int cw1200_load_firmware(struct cw1200_common *priv) +{ + int ret; + int i; + u32 val32; + u16 val16; + int major_revision = -1; + + /* Read CONFIG Register */ + ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + if (val32 == 0 || val32 == 0xffffffff) { + pr_err("Bad config register value (0x%08x)\n", val32); + ret = -EIO; + goto out; + } + + priv->hw_type = cw1200_get_hw_type(val32, &major_revision); + if (priv->hw_type < 0) { + pr_err("Can't deduce hardware type.\n"); + ret = -ENOTSUPP; + goto out; + } + + /* Set DPLL Reg value, and read back to confirm writes work */ + ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, + cw1200_dpll_from_clk(priv->hw_refclk)); + if (ret < 0) { + pr_err("Can't write DPLL register.\n"); + goto out; + } + + msleep(20); + + ret = cw1200_reg_read_32(priv, + ST90TDS_TSET_GEN_R_W_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read DPLL register.\n"); + goto out; + } + + if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) { + pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n", + cw1200_dpll_from_clk(priv->hw_refclk), val32); + ret = -EIO; + goto out; + } + + /* Set wakeup bit in device */ + ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16); + if (ret < 0) { + pr_err("set_wakeup: can't read control register.\n"); + goto out; + } + + ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, + val16 | ST90TDS_CONT_WUP_BIT); + if (ret < 0) { + pr_err("set_wakeup: can't write control register.\n"); + goto out; + } + + /* Wait for wakeup */ + for (i = 0; i < 300; i += (1 + i / 2)) { + ret = cw1200_reg_read_16(priv, + ST90TDS_CONTROL_REG_ID, &val16); + if (ret < 0) { + pr_err("wait_for_wakeup: can't read control register.\n"); + goto out; + } + + if (val16 & ST90TDS_CONT_RDY_BIT) + break; + + msleep(i); + } + + if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) { + pr_err("wait_for_wakeup: device is not responding.\n"); + ret = -ETIMEDOUT; + goto out; + } + + switch (major_revision) { + case 1: + /* CW1200 Hardware detection logic : Check for CUT1.1 */ + ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32); + if (ret) { + pr_err("HW detection: can't read CUT ID.\n"); + goto out; + } + + switch (val32) { + case CW1200_CUT_11_ID_STR: + pr_info("CW1x00 Cut 1.1 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT11; + break; + default: + pr_info("CW1x00 Cut 1.0 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT10; + break; + } + + /* According to ST-E, CUT<2.0 has busted BA TID0-3. + Just disable it entirely... + */ + priv->ba_rx_tid_mask = 0; + priv->ba_tx_tid_mask = 0; + break; + case 2: { + u32 ar1, ar2, ar3; + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1); + if (ret) { + pr_err("(1) HW detection: can't read CUT ID\n"); + goto out; + } + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2); + if (ret) { + pr_err("(2) HW detection: can't read CUT ID.\n"); + goto out; + } + + ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3); + if (ret) { + pr_err("(3) HW detection: can't read CUT ID.\n"); + goto out; + } + + if (ar1 == CW1200_CUT_22_ID_STR1 && + ar2 == CW1200_CUT_22_ID_STR2 && + ar3 == CW1200_CUT_22_ID_STR3) { + pr_info("CW1x00 Cut 2.2 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT22; + } else { + pr_info("CW1x00 Cut 2.0 silicon detected.\n"); + priv->hw_revision = CW1200_HW_REV_CUT20; + } + break; + } + case 4: + pr_info("CW1x60 silicon detected.\n"); + priv->hw_revision = CW1X60_HW_REV; + break; + default: + pr_err("Unsupported silicon major revision %d.\n", + major_revision); + ret = -ENOTSUPP; + goto out; + } + + /* Checking for access mode */ + ret = config_reg_read(priv, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) { + pr_err("Device is already in QUEUE mode!\n"); + ret = -EINVAL; + goto out; + } + + switch (priv->hw_type) { + case HIF_8601_SILICON: + if (priv->hw_revision == CW1X60_HW_REV) { + pr_err("Can't handle CW1160/1260 firmware load yet.\n"); + ret = -ENOTSUPP; + goto out; + } + ret = cw1200_load_firmware_cw1200(priv); + break; + default: + pr_err("Can't perform firmware load for hw type %d.\n", + priv->hw_type); + ret = -ENOTSUPP; + goto out; + } + if (ret < 0) { + pr_err("Firmware load error.\n"); + goto out; + } + + /* Enable interrupt signalling */ + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_irq_enable(priv, 1); + priv->hwbus_ops->unlock(priv->hwbus_priv); + if (ret < 0) + goto unsubscribe; + + /* Configure device for MESSSAGE MODE */ + ret = config_reg_read(priv, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto unsubscribe; + } + ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT); + if (ret < 0) { + pr_err("Can't write config register.\n"); + goto unsubscribe; + } + + /* Unless we read the CONFIG Register we are + * not able to get an interrupt + */ + mdelay(10); + config_reg_read(priv, &val32); + +out: + return ret; + +unsubscribe: + /* Disable interrupt signalling */ + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_irq_enable(priv, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/cw1200/fwio.h new file mode 100644 index 000000000000..ea3099362cdf --- /dev/null +++ b/drivers/net/wireless/cw1200/fwio.h @@ -0,0 +1,39 @@ +/* + * Firmware API for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef FWIO_H_INCLUDED +#define FWIO_H_INCLUDED + +#define BOOTLOADER_CW1X60 "boot_cw1x60.bin" +#define FIRMWARE_CW1X60 "wsm_cw1x60.bin" +#define FIRMWARE_CUT22 "wsm_22.bin" +#define FIRMWARE_CUT20 "wsm_20.bin" +#define FIRMWARE_CUT11 "wsm_11.bin" +#define FIRMWARE_CUT10 "wsm_10.bin" +#define SDD_FILE_CW1X60 "sdd_cw1x60.bin" +#define SDD_FILE_22 "sdd_22.bin" +#define SDD_FILE_20 "sdd_20.bin" +#define SDD_FILE_11 "sdd_11.bin" +#define SDD_FILE_10 "sdd_10.bin" + +int cw1200_load_firmware(struct cw1200_common *priv); + +/* SDD definitions */ +#define SDD_PTA_CFG_ELT_ID 0xEB +#define SDD_REFERENCE_FREQUENCY_ELT_ID 0xc5 +u32 cw1200_dpll_from_clk(u16 clk); + +#endif diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h new file mode 100644 index 000000000000..8b2fc831c3de --- /dev/null +++ b/drivers/net/wireless/cw1200/hwbus.h @@ -0,0 +1,33 @@ +/* + * Common hwbus abstraction layer interface for cw1200 wireless driver + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_HWBUS_H +#define CW1200_HWBUS_H + +struct hwbus_priv; + +void cw1200_irq_handler(struct cw1200_common *priv); + +/* This MUST be wrapped with hwbus_ops->lock/unlock! */ +int __cw1200_irq_enable(struct cw1200_common *priv, int enable); + +struct hwbus_ops { + int (*hwbus_memcpy_fromio)(struct hwbus_priv *self, unsigned int addr, + void *dst, int count); + int (*hwbus_memcpy_toio)(struct hwbus_priv *self, unsigned int addr, + const void *src, int count); + void (*lock)(struct hwbus_priv *self); + void (*unlock)(struct hwbus_priv *self); + size_t (*align_size)(struct hwbus_priv *self, size_t size); + int (*power_mgmt)(struct hwbus_priv *self, bool suspend); +}; + +#endif /* CW1200_HWBUS_H */ diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c new file mode 100644 index 000000000000..142f45efa204 --- /dev/null +++ b/drivers/net/wireless/cw1200/hwio.c @@ -0,0 +1,311 @@ +/* + * Low-level device IO routines for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver, which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> + +#include "cw1200.h" +#include "hwio.h" +#include "hwbus.h" + + /* Sdio addr is 4*spi_addr */ +#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2) +#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \ + ((((buf_id) & 0x1F) << 7) \ + | (((mpf) & 1) << 6) \ + | (((rfu) & 1) << 5) \ + | (((reg_id_ofs) & 0x1F) << 0)) +#define MAX_RETRY 3 + + +static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr, + void *buf, size_t buf_len, int buf_id) +{ + u16 addr_sdio; + u32 sdio_reg_addr_17bit; + + /* Check if buffer is aligned to 4 byte boundary */ + if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) { + pr_err("buffer is not aligned.\n"); + return -EINVAL; + } + + /* Convert to SDIO Register Address */ + addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); + sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); + + return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv, + sdio_reg_addr_17bit, + buf, buf_len); +} + +static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr, + const void *buf, size_t buf_len, int buf_id) +{ + u16 addr_sdio; + u32 sdio_reg_addr_17bit; + + /* Convert to SDIO Register Address */ + addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); + sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); + + return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv, + sdio_reg_addr_17bit, + buf, buf_len); +} + +static inline int __cw1200_reg_read_32(struct cw1200_common *priv, + u16 addr, u32 *val) +{ + int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0); + *val = le32_to_cpu(*val); + return i; +} + +static inline int __cw1200_reg_write_32(struct cw1200_common *priv, + u16 addr, u32 val) +{ + val = cpu_to_le32(val); + return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0); +} + +static inline int __cw1200_reg_read_16(struct cw1200_common *priv, + u16 addr, u16 *val) +{ + int i = __cw1200_reg_read(priv, addr, val, sizeof(*val), 0); + *val = le16_to_cpu(*val); + return i; +} + +static inline int __cw1200_reg_write_16(struct cw1200_common *priv, + u16 addr, u16 val) +{ + val = cpu_to_le16(val); + return __cw1200_reg_write(priv, addr, &val, sizeof(val), 0); +} + +int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, + size_t buf_len) +{ + int ret; + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, + size_t buf_len) +{ + int ret; + priv->hwbus_ops->lock(priv->hwbus_priv); + ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0); + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len) +{ + int ret, retry = 1; + int buf_id_rx = priv->buf_id_rx; + + priv->hwbus_ops->lock(priv->hwbus_priv); + + while (retry <= MAX_RETRY) { + ret = __cw1200_reg_read(priv, + ST90TDS_IN_OUT_QUEUE_REG_ID, buf, + buf_len, buf_id_rx + 1); + if (!ret) { + buf_id_rx = (buf_id_rx + 1) & 3; + priv->buf_id_rx = buf_id_rx; + break; + } else { + retry++; + mdelay(1); + pr_err("error :[%d]\n", ret); + } + } + + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_data_write(struct cw1200_common *priv, const void *buf, + size_t buf_len) +{ + int ret, retry = 1; + int buf_id_tx = priv->buf_id_tx; + + priv->hwbus_ops->lock(priv->hwbus_priv); + + while (retry <= MAX_RETRY) { + ret = __cw1200_reg_write(priv, + ST90TDS_IN_OUT_QUEUE_REG_ID, buf, + buf_len, buf_id_tx); + if (!ret) { + buf_id_tx = (buf_id_tx + 1) & 31; + priv->buf_id_tx = buf_id_tx; + break; + } else { + retry++; + mdelay(1); + pr_err("error :[%d]\n", ret); + } + } + + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, + size_t buf_len, u32 prefetch, u16 port_addr) +{ + u32 val32 = 0; + int i, ret; + + if ((buf_len / 2) >= 0x1000) { + pr_err("Can't read more than 0xfff words.\n"); + return -EINVAL; + goto out; + } + + priv->hwbus_ops->lock(priv->hwbus_priv); + /* Write address */ + ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); + if (ret < 0) { + pr_err("Can't write address register.\n"); + goto out; + } + + /* Read CONFIG Register Value - We will read 32 bits */ + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + goto out; + } + + /* Set PREFETCH bit */ + ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, + val32 | prefetch); + if (ret < 0) { + pr_err("Can't write prefetch bit.\n"); + goto out; + } + + /* Check for PRE-FETCH bit to be cleared */ + for (i = 0; i < 20; i++) { + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't check prefetch bit.\n"); + goto out; + } + if (!(val32 & prefetch)) + break; + + mdelay(i); + } + + if (val32 & prefetch) { + pr_err("Prefetch bit is not cleared.\n"); + goto out; + } + + /* Read data port */ + ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0); + if (ret < 0) { + pr_err("Can't read data port.\n"); + goto out; + } + +out: + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, + size_t buf_len) +{ + int ret; + + if ((buf_len / 2) >= 0x1000) { + pr_err("Can't write more than 0xfff words.\n"); + return -EINVAL; + } + + priv->hwbus_ops->lock(priv->hwbus_priv); + + /* Write address */ + ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); + if (ret < 0) { + pr_err("Can't write address register.\n"); + goto out; + } + + /* Write data port */ + ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID, + buf, buf_len, 0); + if (ret < 0) { + pr_err("Can't write data port.\n"); + goto out; + } + +out: + priv->hwbus_ops->unlock(priv->hwbus_priv); + return ret; +} + +int __cw1200_irq_enable(struct cw1200_common *priv, int enable) +{ + u32 val32; + u16 val16; + int ret; + + if (HIF_8601_SILICON == priv->hw_type) { + ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); + if (ret < 0) { + pr_err("Can't read config register.\n"); + return ret; + } + + if (enable) + val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE; + else + val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE; + + ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32); + if (ret < 0) { + pr_err("Can't write config register.\n"); + return ret; + } + } else { + ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16); + if (ret < 0) { + pr_err("Can't read control register.\n"); + return ret; + } + + if (enable) + val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE; + else + val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE; + + ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16); + if (ret < 0) { + pr_err("Can't write control register.\n"); + return ret; + } + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/cw1200/hwio.h new file mode 100644 index 000000000000..7ee73fe32ccf --- /dev/null +++ b/drivers/net/wireless/cw1200/hwio.h @@ -0,0 +1,247 @@ +/* + * Low-level API for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * ST-Ericsson UMAC CW1200 driver which is + * Copyright (c) 2010, ST-Ericsson + * Author: Ajitpal Singh <ajitpal.singh@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_HWIO_H_INCLUDED +#define CW1200_HWIO_H_INCLUDED + +/* extern */ struct cw1200_common; + +#define CW1200_CUT_11_ID_STR (0x302E3830) +#define CW1200_CUT_22_ID_STR1 (0x302e3132) +#define CW1200_CUT_22_ID_STR2 (0x32302e30) +#define CW1200_CUT_22_ID_STR3 (0x3335) +#define CW1200_CUT_ID_ADDR (0xFFF17F90) +#define CW1200_CUT2_ID_ADDR (0xFFF1FF90) + +/* Download control area */ +/* boot loader start address in SRAM */ +#define DOWNLOAD_BOOT_LOADER_OFFSET (0x00000000) +/* 32K, 0x4000 to 0xDFFF */ +#define DOWNLOAD_FIFO_OFFSET (0x00004000) +/* 32K */ +#define DOWNLOAD_FIFO_SIZE (0x00008000) +/* 128 bytes, 0xFF80 to 0xFFFF */ +#define DOWNLOAD_CTRL_OFFSET (0x0000FF80) +#define DOWNLOAD_CTRL_DATA_DWORDS (32-6) + +struct download_cntl_t { + /* size of whole firmware file (including Cheksum), host init */ + u32 image_size; + /* downloading flags */ + u32 flags; + /* No. of bytes put into the download, init & updated by host */ + u32 put; + /* last traced program counter, last ARM reg_pc */ + u32 trace_pc; + /* No. of bytes read from the download, host init, device updates */ + u32 get; + /* r0, boot losader status, host init to pending, device updates */ + u32 status; + /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */ + u32 debug_data[DOWNLOAD_CTRL_DATA_DWORDS]; +}; + +#define DOWNLOAD_IMAGE_SIZE_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, image_size)) +#define DOWNLOAD_FLAGS_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, flags)) +#define DOWNLOAD_PUT_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, put)) +#define DOWNLOAD_TRACE_PC_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, trace_pc)) +#define DOWNLOAD_GET_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, get)) +#define DOWNLOAD_STATUS_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, status)) +#define DOWNLOAD_DEBUG_DATA_REG \ + (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, debug_data)) +#define DOWNLOAD_DEBUG_DATA_LEN (108) + +#define DOWNLOAD_BLOCK_SIZE (1024) + +/* For boot loader detection */ +#define DOWNLOAD_ARE_YOU_HERE (0x87654321) +#define DOWNLOAD_I_AM_HERE (0x12345678) + +/* Download error code */ +#define DOWNLOAD_PENDING (0xFFFFFFFF) +#define DOWNLOAD_SUCCESS (0) +#define DOWNLOAD_EXCEPTION (1) +#define DOWNLOAD_ERR_MEM_1 (2) +#define DOWNLOAD_ERR_MEM_2 (3) +#define DOWNLOAD_ERR_SOFTWARE (4) +#define DOWNLOAD_ERR_FILE_SIZE (5) +#define DOWNLOAD_ERR_CHECKSUM (6) +#define DOWNLOAD_ERR_OVERFLOW (7) +#define DOWNLOAD_ERR_IMAGE (8) +#define DOWNLOAD_ERR_HOST (9) +#define DOWNLOAD_ERR_ABORT (10) + + +#define SYS_BASE_ADDR_SILICON (0) +#define PAC_BASE_ADDRESS_SILICON (SYS_BASE_ADDR_SILICON + 0x09000000) +#define PAC_SHARED_MEMORY_SILICON (PAC_BASE_ADDRESS_SILICON) + +#define CW1200_APB(addr) (PAC_SHARED_MEMORY_SILICON + (addr)) + +/* *************************************************************** +*Device register definitions +*************************************************************** */ +/* WBF - SPI Register Addresses */ +#define ST90TDS_ADDR_ID_BASE (0x0000) +/* 16/32 bits */ +#define ST90TDS_CONFIG_REG_ID (0x0000) +/* 16/32 bits */ +#define ST90TDS_CONTROL_REG_ID (0x0001) +/* 16 bits, Q mode W/R */ +#define ST90TDS_IN_OUT_QUEUE_REG_ID (0x0002) +/* 32 bits, AHB bus R/W */ +#define ST90TDS_AHB_DPORT_REG_ID (0x0003) +/* 16/32 bits */ +#define ST90TDS_SRAM_BASE_ADDR_REG_ID (0x0004) +/* 32 bits, APB bus R/W */ +#define ST90TDS_SRAM_DPORT_REG_ID (0x0005) +/* 32 bits, t_settle/general */ +#define ST90TDS_TSET_GEN_R_W_REG_ID (0x0006) +/* 16 bits, Q mode read, no length */ +#define ST90TDS_FRAME_OUT_REG_ID (0x0007) +#define ST90TDS_ADDR_ID_MAX (ST90TDS_FRAME_OUT_REG_ID) + +/* WBF - Control register bit set */ +/* next o/p length, bit 11 to 0 */ +#define ST90TDS_CONT_NEXT_LEN_MASK (0x0FFF) +#define ST90TDS_CONT_WUP_BIT (BIT(12)) +#define ST90TDS_CONT_RDY_BIT (BIT(13)) +#define ST90TDS_CONT_IRQ_ENABLE (BIT(14)) +#define ST90TDS_CONT_RDY_ENABLE (BIT(15)) +#define ST90TDS_CONT_IRQ_RDY_ENABLE (BIT(14)|BIT(15)) + +/* SPI Config register bit set */ +#define ST90TDS_CONFIG_FRAME_BIT (BIT(2)) +#define ST90TDS_CONFIG_WORD_MODE_BITS (BIT(3)|BIT(4)) +#define ST90TDS_CONFIG_WORD_MODE_1 (BIT(3)) +#define ST90TDS_CONFIG_WORD_MODE_2 (BIT(4)) +#define ST90TDS_CONFIG_ERROR_0_BIT (BIT(5)) +#define ST90TDS_CONFIG_ERROR_1_BIT (BIT(6)) +#define ST90TDS_CONFIG_ERROR_2_BIT (BIT(7)) +/* TBD: Sure??? */ +#define ST90TDS_CONFIG_CSN_FRAME_BIT (BIT(7)) +#define ST90TDS_CONFIG_ERROR_3_BIT (BIT(8)) +#define ST90TDS_CONFIG_ERROR_4_BIT (BIT(9)) +/* QueueM */ +#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10)) +/* AHB bus */ +#define ST90TDS_CONFIG_AHB_PRFETCH_BIT (BIT(11)) +#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12)) +/* APB bus */ +#define ST90TDS_CONFIG_PRFETCH_BIT (BIT(13)) +/* cpu reset */ +#define ST90TDS_CONFIG_CPU_RESET_BIT (BIT(14)) +#define ST90TDS_CONFIG_CLEAR_INT_BIT (BIT(15)) + +/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */ +#define ST90TDS_CONF_IRQ_ENABLE (BIT(16)) +#define ST90TDS_CONF_RDY_ENABLE (BIT(17)) +#define ST90TDS_CONF_IRQ_RDY_ENABLE (BIT(16)|BIT(17)) + +int cw1200_data_read(struct cw1200_common *priv, + void *buf, size_t buf_len); +int cw1200_data_write(struct cw1200_common *priv, + const void *buf, size_t buf_len); + +int cw1200_reg_read(struct cw1200_common *priv, u16 addr, + void *buf, size_t buf_len); +int cw1200_reg_write(struct cw1200_common *priv, u16 addr, + const void *buf, size_t buf_len); + +static inline int cw1200_reg_read_16(struct cw1200_common *priv, + u16 addr, u16 *val) +{ + u32 tmp; + int i; + i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp)); + tmp = le32_to_cpu(tmp); + *val = tmp & 0xffff; + return i; +} + +static inline int cw1200_reg_write_16(struct cw1200_common *priv, + u16 addr, u16 val) +{ + u32 tmp = val; + tmp = cpu_to_le32(tmp); + return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp)); +} + +static inline int cw1200_reg_read_32(struct cw1200_common *priv, + u16 addr, u32 *val) +{ + int i = cw1200_reg_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +static inline int cw1200_reg_write_32(struct cw1200_common *priv, + u16 addr, u32 val) +{ + val = cpu_to_le32(val); + return cw1200_reg_write(priv, addr, &val, sizeof(val)); +} + +int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, + size_t buf_len, u32 prefetch, u16 port_addr); +int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, + size_t buf_len); + +static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr, + void *buf, size_t buf_len) +{ + return cw1200_indirect_read(priv, addr, buf, buf_len, + ST90TDS_CONFIG_PRFETCH_BIT, + ST90TDS_SRAM_DPORT_REG_ID); +} + +static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr, + void *buf, size_t buf_len) +{ + return cw1200_indirect_read(priv, addr, buf, buf_len, + ST90TDS_CONFIG_AHB_PRFETCH_BIT, + ST90TDS_AHB_DPORT_REG_ID); +} + +static inline int cw1200_apb_read_32(struct cw1200_common *priv, + u32 addr, u32 *val) +{ + int i = cw1200_apb_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +static inline int cw1200_apb_write_32(struct cw1200_common *priv, + u32 addr, u32 val) +{ + val = cpu_to_le32(val); + return cw1200_apb_write(priv, addr, &val, sizeof(val)); +} +static inline int cw1200_ahb_read_32(struct cw1200_common *priv, + u32 addr, u32 *val) +{ + int i = cw1200_ahb_read(priv, addr, val, sizeof(*val)); + *val = le32_to_cpu(*val); + return i; +} + +#endif /* CW1200_HWIO_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/itp.c b/drivers/net/wireless/cw1200/itp.c new file mode 100644 index 000000000000..c0730bb49b75 --- /dev/null +++ b/drivers/net/wireless/cw1200/itp.c @@ -0,0 +1,730 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * ITP code + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/poll.h> +#include <linux/time.h> +#include <linux/random.h> +#include <linux/kallsyms.h> +#include <net/mac80211.h> +#include "cw1200.h" +#include "debug.h" +#include "itp.h" +#include "sta.h" + +static int __cw1200_itp_open(struct cw1200_common *priv); +static int __cw1200_itp_close(struct cw1200_common *priv); +static void cw1200_itp_rx_start(struct cw1200_common *priv); +static void cw1200_itp_rx_stop(struct cw1200_common *priv); +static void cw1200_itp_rx_stats(struct cw1200_common *priv); +static void cw1200_itp_rx_reset(struct cw1200_common *priv); +static void cw1200_itp_tx_stop(struct cw1200_common *priv); +static void cw1200_itp_handle(struct cw1200_common *priv, + struct sk_buff *skb); +static void cw1200_itp_err(struct cw1200_common *priv, + int err, + int arg); +static void __cw1200_itp_tx_stop(struct cw1200_common *priv); + +static ssize_t cw1200_itp_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + int ret; + + if (skb_queue_empty(&itp->log_queue)) + return 0; + + skb = skb_dequeue(&itp->log_queue); + ret = copy_to_user(user_buf, skb->data, skb->len); + *ppos += skb->len; + skb->data[skb->len] = 0; + pr_debug("[ITP] >>> %s", skb->data); + consume_skb(skb); + + return skb->len - ret; +} + +static ssize_t cw1200_itp_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct cw1200_common *priv = file->private_data; + struct sk_buff *skb; + + if (!count || count > 1024) + return -EINVAL; + skb = dev_alloc_skb(count + 1); + if (!skb) + return -ENOMEM; + skb_trim(skb, 0); + skb_put(skb, count + 1); + if (copy_from_user(skb->data, user_buf, count)) { + kfree_skb(skb); + return -EFAULT; + } + skb->data[count] = 0; + + cw1200_itp_handle(priv, skb); + consume_skb(skb); + return count; +} + +static unsigned int cw1200_itp_poll(struct file *file, poll_table *wait) +{ + struct cw1200_common *priv = file->private_data; + struct cw1200_itp *itp = &priv->debug->itp; + unsigned int mask = 0; + + poll_wait(file, &itp->read_wait, wait); + + if (!skb_queue_empty(&itp->log_queue)) + mask |= POLLIN | POLLRDNORM; + + mask |= POLLOUT | POLLWRNORM; + + return mask; +} + +static int cw1200_itp_open(struct inode *inode, struct file *file) +{ + struct cw1200_common *priv = inode->i_private; + struct cw1200_itp *itp = &priv->debug->itp; + int ret = 0; + + file->private_data = priv; + if (atomic_inc_return(&itp->open_count) == 1) { + ret = __cw1200_itp_open(priv); + if (ret && !atomic_dec_return(&itp->open_count)) + __cw1200_itp_close(priv); + } else { + atomic_dec(&itp->open_count); + ret = -EBUSY; + } + + return ret; +} + +static int cw1200_itp_close(struct inode *inode, struct file *file) +{ + struct cw1200_common *priv = file->private_data; + struct cw1200_itp *itp = &priv->debug->itp; + if (!atomic_dec_return(&itp->open_count)) { + __cw1200_itp_close(priv); + wake_up(&itp->close_wait); + } + return 0; +} + +static const struct file_operations fops_itp = { + .open = cw1200_itp_open, + .read = cw1200_itp_read, + .write = cw1200_itp_write, + .poll = cw1200_itp_poll, + .release = cw1200_itp_close, + .llseek = default_llseek, + .owner = THIS_MODULE, +}; + +static void cw1200_itp_fill_pattern(u8 *data, int size, + enum cw1200_itp_data_modes mode) +{ + if (size <= 0) + return; + + switch (mode) { + default: + case ITP_DATA_ZEROS: + memset(data, 0x0, size); + break; + case ITP_DATA_ONES: + memset(data, 0xff, size); + break; + case ITP_DATA_ZERONES: + memset(data, 0x55, size); + break; + case ITP_DATA_RANDOM: + get_random_bytes(data, size); + break; + } + return; +} + +static void cw1200_itp_tx_work(struct work_struct *work) +{ + struct cw1200_itp *itp = container_of(work, struct cw1200_itp, + tx_work.work); + struct cw1200_common *priv = itp->priv; + atomic_set(&priv->bh_tx, 1); + wake_up(&priv->bh_wq); +} + +static void cw1200_itp_tx_finish(struct work_struct *work) +{ + struct cw1200_itp *itp = container_of(work, struct cw1200_itp, + tx_finish.work); + __cw1200_itp_tx_stop(itp->priv); +} + +int cw1200_itp_init(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + itp->priv = priv; + atomic_set(&itp->open_count, 0); + atomic_set(&itp->stop_tx, 0); + atomic_set(&itp->awaiting_confirm, 0); + skb_queue_head_init(&itp->log_queue); + spin_lock_init(&itp->tx_lock); + init_waitqueue_head(&itp->read_wait); + init_waitqueue_head(&itp->write_wait); + init_waitqueue_head(&itp->close_wait); + INIT_DELAYED_WORK(&itp->tx_work, cw1200_itp_tx_work); + INIT_DELAYED_WORK(&itp->tx_finish, cw1200_itp_tx_finish); + itp->data = NULL; + itp->hdr_len = WSM_TX_EXTRA_HEADROOM + + sizeof(struct ieee80211_hdr_3addr); + + if (!debugfs_create_file("itp", S_IRUSR | S_IWUSR, + priv->debug->debugfs_phy, priv, &fops_itp)) + return -ENOMEM; + + return 0; +} + +void cw1200_itp_release(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + wait_event_interruptible(itp->close_wait, + !atomic_read(&itp->open_count)); + + WARN_ON(atomic_read(&itp->open_count)); + + skb_queue_purge(&itp->log_queue); + cw1200_itp_tx_stop(priv); +} + +static int __cw1200_itp_open(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + if (!priv->vif) + return -EINVAL; + if (priv->join_status) + return -EINVAL; + itp->saved_channel = priv->channel; + if (!priv->channel) + priv->channel = &priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + wsm_set_bssid_filtering(priv, false); + cw1200_itp_rx_reset(priv); + return 0; +} + +static int __cw1200_itp_close(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + if (atomic_read(&itp->test_mode) == TEST_MODE_RX_TEST) + cw1200_itp_rx_stop(priv); + cw1200_itp_tx_stop(priv); + cw1200_disable_listening(priv); + cw1200_update_filtering(priv); + priv->channel = itp->saved_channel; + return 0; +} + +bool cw1200_is_itp(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + return atomic_read(&itp->open_count) != 0; +} + +static void cw1200_itp_rx_reset(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + itp->rx_cnt = 0; + itp->rx_rssi = 0; + itp->rx_rssi_max = -1000; + itp->rx_rssi_min = 1000; +} + +static void cw1200_itp_rx_start(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + + pr_debug("[ITP] RX start, band = %d, ch = %d\n", + itp->band, itp->ch); + atomic_set(&itp->test_mode, TEST_MODE_RX_TEST); + cw1200_update_listening(priv, false); + priv->channel = &priv->hw-> + wiphy->bands[itp->band]->channels[itp->ch]; + cw1200_update_listening(priv, true); + wsm_set_bssid_filtering(priv, false); +} + +static void cw1200_itp_rx_stop(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + pr_debug("[ITP] RX stop\n"); + atomic_set(&itp->test_mode, TEST_MODE_NO_TEST); + cw1200_itp_rx_reset(priv); +} + +static void cw1200_itp_rx_stats(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + char buf[128]; + int len, ret; + struct wsm_mib_counters_table counters; + + ret = wsm_get_counters_table(priv, &counters); + + if (ret) + cw1200_itp_err(priv, -EBUSY, 20); + + if (!itp->rx_cnt) + len = snprintf(buf, sizeof(buf), "1,0,0,0,0,%d\n", + counters.rx_packet_errors); + else + len = snprintf(buf, sizeof(buf), "1,%d,%ld,%d,%d,%d\n", + itp->rx_cnt, + itp->rx_cnt ? itp->rx_rssi / itp->rx_cnt : 0, + itp->rx_rssi_min, itp->rx_rssi_max, + counters.rx_packet_errors); + + if (len <= 0) { + cw1200_itp_err(priv, -EBUSY, 21); + return; + } + + skb = dev_alloc_skb(len); + if (!skb) { + cw1200_itp_err(priv, -ENOMEM, 22); + return; + } + + itp->rx_cnt = 0; + itp->rx_rssi = 0; + itp->rx_rssi_max = -1000; + itp->rx_rssi_min = 1000; + + skb_trim(skb, 0); + skb_put(skb, len); + + memcpy(skb->data, buf, len); + skb_queue_tail(&itp->log_queue, skb); + wake_up(&itp->read_wait); +} + +static void cw1200_itp_tx_start(struct cw1200_common *priv) +{ + struct wsm_tx *tx; + struct ieee80211_hdr_3addr *hdr; + struct cw1200_itp *itp = &priv->debug->itp; + struct wsm_mib_association_mode assoc_mode = { + .flags = WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE, + .preamble = itp->preamble, + }; + int len; + u8 da_addr[6] = ITP_DEFAULT_DA_ADDR; + + /* Rates index 4 and 5 are not supported */ + if (itp->rate > 3) + itp->rate += 2; + + pr_debug("[ITP] TX start: band = %d, ch = %d, rate = %d, preamble = %d, number = %d, data_mode = %d, interval = %d, power = %d, data_len = %d\n", + itp->band, itp->ch, itp->rate, itp->preamble, + itp->number, itp->data_mode, itp->interval_us, + itp->power, itp->data_len); + + len = itp->hdr_len + itp->data_len; + + itp->data = kmalloc(len, GFP_KERNEL); + tx = (struct wsm_tx *)itp->data; + tx->hdr.len = itp->data_len + itp->hdr_len; + tx->hdr.id = __cpu_to_le16(0x0004 | 1 << 6); + tx->max_tx_rate = itp->rate; + tx->queue_id = 3; + tx->more = 0; + tx->flags = 0xc; + tx->packet_id = 0x55ff55; + tx->reserved = 0; + tx->expire_time = 1; + + if (itp->preamble == ITP_PREAMBLE_GREENFIELD) + tx->ht_tx_parameters = WSM_HT_TX_GREENFIELD; + else if (itp->preamble == ITP_PREAMBLE_MIXED) + tx->ht_tx_parameters = WSM_HT_TX_MIXED; + + hdr = (struct ieee80211_hdr_3addr *)&itp->data[sizeof(struct wsm_tx)]; + memset(hdr, 0, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); + memcpy(hdr->addr1, da_addr, ETH_ALEN); + memcpy(hdr->addr2, priv->vif->addr, ETH_ALEN); + memcpy(hdr->addr3, da_addr, ETH_ALEN); + + cw1200_itp_fill_pattern(&itp->data[itp->hdr_len], + itp->data_len, itp->data_mode); + + cw1200_update_listening(priv, false); + priv->channel = &priv->hw->wiphy->bands[itp->band]->channels[itp->ch]; + WARN_ON(wsm_set_output_power(priv, itp->power)); + if (itp->preamble == ITP_PREAMBLE_SHORT || + itp->preamble == ITP_PREAMBLE_LONG) + WARN_ON(wsm_set_association_mode(priv, + &assoc_mode)); + wsm_set_bssid_filtering(priv, false); + cw1200_update_listening(priv, true); + + spin_lock_bh(&itp->tx_lock); + atomic_set(&itp->test_mode, TEST_MODE_TX_TEST); + atomic_set(&itp->awaiting_confirm, 0); + atomic_set(&itp->stop_tx, 0); + atomic_set(&priv->bh_tx, 1); + ktime_get_ts(&itp->last_sent); + wake_up(&priv->bh_wq); + spin_unlock_bh(&itp->tx_lock); +} + +void __cw1200_itp_tx_stop(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + spin_lock_bh(&itp->tx_lock); + kfree(itp->data); + itp->data = NULL; + atomic_set(&itp->test_mode, TEST_MODE_NO_TEST); + spin_unlock_bh(&itp->tx_lock); +} + +static void cw1200_itp_tx_stop(struct cw1200_common *priv) +{ + struct cw1200_itp *itp = &priv->debug->itp; + pr_debug("[ITP] TX stop\n"); + atomic_set(&itp->stop_tx, 1); + flush_workqueue(priv->workqueue); + + /* time for FW to confirm all tx requests */ + msleep(500); + + __cw1200_itp_tx_stop(priv); +} + +static int cw1200_print_fw_version(struct cw1200_common *priv, + u8 *buf, size_t len) +{ + return snprintf(buf, len, "%s %d.%d", + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build); +} + +static void cw1200_itp_get_version(struct cw1200_common *priv, + enum cw1200_itp_version_type type) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + char buf[ITP_BUF_SIZE]; + size_t size = 0; + int len; + pr_debug("[ITP] print %s version\n", + type == ITP_CHIP_ID ? "chip" : "firmware"); + + len = snprintf(buf, ITP_BUF_SIZE, "2,"); + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 40); + return; + } + size += len; + + switch (type) { + case ITP_CHIP_ID: + len = cw1200_print_fw_version(priv, buf+size, + ITP_BUF_SIZE - size); + + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 41); + return; + } + size += len; + break; + case ITP_FW_VER: + len = snprintf(buf+size, ITP_BUF_SIZE - size, + "%d.%d", priv->wsm_caps.hw_id, + priv->wsm_caps.hw_subid); + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 42); + return; + } + size += len; + break; + default: + cw1200_itp_err(priv, -EINVAL, 43); + break; + } + + len = snprintf(buf+size, ITP_BUF_SIZE-size, "\n"); + if (len <= 0) { + cw1200_itp_err(priv, -EINVAL, 44); + return; + } + size += len; + + skb = dev_alloc_skb(size); + if (!skb) { + cw1200_itp_err(priv, -ENOMEM, 45); + return; + } + + skb_trim(skb, 0); + skb_put(skb, size); + + memcpy(skb->data, buf, size); + skb_queue_tail(&itp->log_queue, skb); + wake_up(&itp->read_wait); +} + +int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + struct cw1200_itp *itp; + struct timespec now; + int time_left_us; + + if (!priv->debug) + return 0; + + itp = &priv->debug->itp; + + if (!itp) + return 0; + + spin_lock_bh(&itp->tx_lock); + if (atomic_read(&itp->test_mode) != TEST_MODE_TX_TEST) + goto out; + + if (atomic_read(&itp->stop_tx)) + goto out; + + if (itp->number == 0) { + atomic_set(&itp->stop_tx, 1); + queue_delayed_work(priv->workqueue, &itp->tx_finish, HZ/10); + goto out; + } + + if (!itp->data) + goto out; + + if (priv->hw_bufs_used >= 2) { + if (!atomic_read(&priv->bh_rx)) + atomic_set(&priv->bh_rx, 1); + atomic_set(&priv->bh_tx, 1); + goto out; + } + + ktime_get_ts(&now); + time_left_us = (itp->last_sent.tv_sec - now.tv_sec)*1000000 + + (itp->last_sent.tv_nsec - now.tv_nsec)/1000 + + itp->interval_us; + + if (time_left_us > ITP_TIME_THRES_US) { + queue_delayed_work(priv->workqueue, &itp->tx_work, + ITP_US_TO_MS(time_left_us)*HZ/1000); + goto out; + } + + if (time_left_us > 50) + udelay(time_left_us); + + if (itp->number > 0) + itp->number--; + + *data = itp->data; + *tx_len = itp->data_len + itp->hdr_len; + + if (itp->data_mode == ITP_DATA_RANDOM) + cw1200_itp_fill_pattern(&itp->data[itp->hdr_len], + itp->data_len, itp->data_mode); + *burst = 2; + atomic_set(&priv->bh_tx, 1); + ktime_get_ts(&itp->last_sent); + atomic_add(1, &itp->awaiting_confirm); + spin_unlock_bh(&itp->tx_lock); + return 1; + +out: + spin_unlock_bh(&itp->tx_lock); + return 0; +} + +bool cw1200_itp_rxed(struct cw1200_common *priv, struct sk_buff *skb) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct ieee80211_rx_status *rx = IEEE80211_SKB_RXCB(skb); + int signal; + + if (atomic_read(&itp->test_mode) != TEST_MODE_RX_TEST) + return cw1200_is_itp(priv); + if (rx->freq != priv->channel->center_freq) + return true; + + signal = rx->signal; + itp->rx_cnt++; + itp->rx_rssi += signal; + if (itp->rx_rssi_min > rx->signal) + itp->rx_rssi_min = rx->signal; + if (itp->rx_rssi_max < rx->signal) + itp->rx_rssi_max = rx->signal; + + return true; +} + +void cw1200_itp_wake_up_tx(struct cw1200_common *priv) +{ + wake_up(&priv->debug->itp.write_wait); +} + +bool cw1200_itp_tx_running(struct cw1200_common *priv) +{ + if (atomic_read(&priv->debug->itp.awaiting_confirm) || + atomic_read(&priv->debug->itp.test_mode) == + TEST_MODE_TX_TEST) { + atomic_sub(1, &priv->debug->itp.awaiting_confirm); + return true; + } + return false; +} + +static void cw1200_itp_handle(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct cw1200_itp *itp = &priv->debug->itp; + const struct wiphy *wiphy = priv->hw->wiphy; + int cmd; + int ret; + + pr_debug("[ITP] <<< %s", skb->data); + if (sscanf(skb->data, "%d", &cmd) != 1) { + cw1200_itp_err(priv, -EINVAL, 1); + return; + } + + switch (cmd) { + case 1: /* RX test */ + if (atomic_read(&itp->test_mode)) { + cw1200_itp_err(priv, -EBUSY, 0); + return; + } + ret = sscanf(skb->data, "%d,%d,%d", + &cmd, &itp->band, &itp->ch); + if (ret != 3) { + cw1200_itp_err(priv, -EINVAL, ret + 1); + return; + } + if (itp->band >= 2) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (!wiphy->bands[itp->band]) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (itp->ch >= wiphy->bands[itp->band]->n_channels) { + cw1200_itp_err(priv, -EINVAL, 3); + } else { + cw1200_itp_rx_stats(priv); + cw1200_itp_rx_start(priv); + } + break; + case 2: /* RX stat */ + cw1200_itp_rx_stats(priv); + break; + case 3: /* RX/TX stop */ + if (atomic_read(&itp->test_mode) == TEST_MODE_RX_TEST) { + cw1200_itp_rx_stats(priv); + cw1200_itp_rx_stop(priv); + } else if (atomic_read(&itp->test_mode) == TEST_MODE_TX_TEST) { + cw1200_itp_tx_stop(priv); + } else { + cw1200_itp_err(priv, -EBUSY, 0); + } + break; + case 4: /* TX start */ + if (atomic_read(&itp->test_mode) != TEST_MODE_NO_TEST) { + cw1200_itp_err(priv, -EBUSY, 0); + return; + } + ret = sscanf(skb->data, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", + &cmd, &itp->band, &itp->ch, &itp->rate, + &itp->preamble, &itp->number, &itp->data_mode, + &itp->interval_us, &itp->power, &itp->data_len); + if (ret != 10) { + cw1200_itp_err(priv, -EINVAL, ret + 1); + return; + } + if (itp->band >= 2) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (!wiphy->bands[itp->band]) { + cw1200_itp_err(priv, -EINVAL, 2); + } else if (itp->ch >= wiphy->bands[itp->band]->n_channels) { + cw1200_itp_err(priv, -EINVAL, 3); + } else if (itp->rate >= 20) { + cw1200_itp_err(priv, -EINVAL, 4); + } else if (itp->preamble >= ITP_PREAMBLE_MAX) { + cw1200_itp_err(priv, -EINVAL, 5); + } else if (itp->data_mode >= ITP_DATA_MAX_MODE) { + cw1200_itp_err(priv, -EINVAL, 7); + } else if (itp->data_len < ITP_MIN_DATA_SIZE || + itp->data_len > (priv->wsm_caps.input_buffer_size - itp->hdr_len)) { + cw1200_itp_err(priv, -EINVAL, 8); + } else { + cw1200_itp_tx_start(priv); + } + break; + case 5: + cw1200_itp_get_version(priv, ITP_CHIP_ID); + break; + case 6: + cw1200_itp_get_version(priv, ITP_FW_VER); + break; + } +} + +static void cw1200_itp_err(struct cw1200_common *priv, + int err, int arg) +{ + struct cw1200_itp *itp = &priv->debug->itp; + struct sk_buff *skb; + static char buf[255]; + int len; + + len = snprintf(buf, sizeof(buf), "%d,%d\n", + err, arg); + if (len <= 0) + return; + + skb = dev_alloc_skb(len); + if (!skb) + return; + + skb_trim(skb, 0); + skb_put(skb, len); + + memcpy(skb->data, buf, len); + skb_queue_tail(&itp->log_queue, skb); + wake_up(&itp->read_wait); + + len = sprint_symbol(buf, + (unsigned long)__builtin_return_address(0)); + if (len <= 0) + return; + pr_debug("[ITP] error %d,%d from %s\n", + err, arg, buf); +} diff --git a/drivers/net/wireless/cw1200/itp.h b/drivers/net/wireless/cw1200/itp.h new file mode 100644 index 000000000000..1e9dfb7fcadc --- /dev/null +++ b/drivers/net/wireless/cw1200/itp.h @@ -0,0 +1,144 @@ +/* + * ITP code for ST-Ericsson CW1200 mac80211 driver + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_ITP_H_INCLUDED +#define CW1200_ITP_H_INCLUDED + +struct cw200_common; +struct wsm_tx_confirm; +struct dentry; + +#ifdef CONFIG_CW1200_ITP + +/*extern*/ struct ieee80211_channel; + +#define TEST_MODE_NO_TEST (0) +#define TEST_MODE_RX_TEST (1) +#define TEST_MODE_TX_TEST (2) +#define ITP_DEFAULT_DA_ADDR {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +#define ITP_MIN_DATA_SIZE 6 +#define ITP_MAX_DATA_SIZE 1600 +#define ITP_TIME_THRES_US 10000 +#define ITP_US_TO_MS(x) ((x)/1000) +#define ITP_MS_TO_US(x) ((x)*1000) +#define ITP_BUF_SIZE 255 + + +enum cw1200_itp_data_modes { + ITP_DATA_ZEROS, + ITP_DATA_ONES, + ITP_DATA_ZERONES, + ITP_DATA_RANDOM, + ITP_DATA_MAX_MODE, +}; + +enum cw1200_itp_version_type { + ITP_CHIP_ID, + ITP_FW_VER, +}; + +enum cw1200_itp_preamble_type { + ITP_PREAMBLE_LONG, + ITP_PREAMBLE_SHORT, + ITP_PREAMBLE_OFDM, + ITP_PREAMBLE_MIXED, + ITP_PREAMBLE_GREENFIELD, + ITP_PREAMBLE_MAX, +}; + + +struct cw1200_itp { + struct cw1200_common *priv; + atomic_t open_count; + atomic_t awaiting_confirm; + struct sk_buff_head log_queue; + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + wait_queue_head_t close_wait; + struct ieee80211_channel *saved_channel; + atomic_t stop_tx; + struct delayed_work tx_work; + struct delayed_work tx_finish; + spinlock_t tx_lock; + struct timespec last_sent; + atomic_t test_mode; + int rx_cnt; + long rx_rssi; + int rx_rssi_max; + int rx_rssi_min; + unsigned band; + unsigned ch; + unsigned rate; + unsigned preamble; + unsigned int number; + unsigned data_mode; + int interval_us; + int power; + u8 *data; + int hdr_len; + int data_len; +}; + +int cw1200_itp_init(struct cw1200_common *priv); +void cw1200_itp_release(struct cw1200_common *priv); + +bool cw1200_is_itp(struct cw1200_common *priv); +bool cw1200_itp_rxed(struct cw1200_common *priv, struct sk_buff *skb); +void cw1200_itp_wake_up_tx(struct cw1200_common *priv); +int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst); +bool cw1200_itp_tx_running(struct cw1200_common *priv); + +#else /* CONFIG_CW1200_ITP */ + +static inline int cw1200_itp_init(struct cw1200_common *priv) +{ + return 0; +} + +static inline void cw1200_itp_release(struct cw1200_common *priv) +{ +} + +static inline bool cw1200_is_itp(struct cw1200_common *priv) +{ + return false; +} + +static inline bool cw1200_itp_rxed(struct cw1200_common *priv, + struct sk_buff *skb) +{ + return false; +} + + +static inline void cw1200_itp_consume_txed(struct cw1200_common *priv) +{ +} + +static inline void cw1200_itp_wake_up_tx(struct cw1200_common *priv) +{ +} + +static inline int cw1200_itp_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + return 0; +} + +static inline bool cw1200_itp_tx_running(struct cw1200_common *priv) +{ + return false; +} + +#endif /* CONFIG_CW1200_ITP */ + +#endif /* CW1200_ITP_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c new file mode 100644 index 000000000000..c934ec52d129 --- /dev/null +++ b/drivers/net/wireless/cw1200/main.c @@ -0,0 +1,633 @@ +/* + * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on: + * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> + * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> + * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> + * + * Based on: + * - the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. + * - stlc45xx driver + * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <linux/random.h> +#include <linux/sched.h> +#include <net/mac80211.h> + +#include "cw1200.h" +#include "txrx.h" +#include "hwbus.h" +#include "fwio.h" +#include "hwio.h" +#include "bh.h" +#include "sta.h" +#include "scan.h" +#include "debug.h" +#include "pm.h" + +MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>"); +MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("cw1200_core"); + +/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */ +static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00}; +module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(macaddr, "Override platform_data MAC address"); + +static char *cw1200_sdd_path; +module_param(cw1200_sdd_path, charp, 0644); +MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file"); +static int cw1200_refclk; +module_param(cw1200_refclk, int, 0644); +MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock"); + +int cw1200_power_mode = wsm_power_mode_quiescent; +module_param(cw1200_power_mode, int, 0644); +MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)"); + +#ifdef CONFIG_CW1200_ETF +int etf_mode; +module_param(etf_mode, int, 0644); +MODULE_PARM_DESC(etf_mode, "Enable EngineeringTestingFramework operation"); +#endif + +#define RATETAB_ENT(_rate, _rateid, _flags) \ + { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate cw1200_rates[] = { + RATETAB_ENT(10, 0, 0), + RATETAB_ENT(20, 1, 0), + RATETAB_ENT(55, 2, 0), + RATETAB_ENT(110, 3, 0), + RATETAB_ENT(60, 6, 0), + RATETAB_ENT(90, 7, 0), + RATETAB_ENT(120, 8, 0), + RATETAB_ENT(180, 9, 0), + RATETAB_ENT(240, 10, 0), + RATETAB_ENT(360, 11, 0), + RATETAB_ENT(480, 12, 0), + RATETAB_ENT(540, 13, 0), +}; + +static struct ieee80211_rate cw1200_mcs_rates[] = { + RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS), + RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS), + RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS), + RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS), + RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS), + RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS), + RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS), + RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS), +}; + +#define cw1200_a_rates (cw1200_rates + 4) +#define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4) +#define cw1200_g_rates (cw1200_rates + 0) +#define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates)) +#define cw1200_n_rates (cw1200_mcs_rates) +#define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates)) + + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel cw1200_2ghz_chantable[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static struct ieee80211_channel cw1200_5ghz_chantable[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; + +static struct ieee80211_supported_band cw1200_band_2ghz = { + .channels = cw1200_2ghz_chantable, + .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable), + .bitrates = cw1200_g_rates, + .n_bitrates = cw1200_g_rates_size, + .ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | + IEEE80211_HT_CAP_MAX_AMSDU, + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask[0] = 0xFF, + .rx_highest = __cpu_to_le16(0x41), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static struct ieee80211_supported_band cw1200_band_5ghz = { + .channels = cw1200_5ghz_chantable, + .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable), + .bitrates = cw1200_a_rates, + .n_bitrates = cw1200_a_rates_size, + .ht_cap = { + .cap = IEEE80211_HT_CAP_GRN_FLD | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | + IEEE80211_HT_CAP_MAX_AMSDU, + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask[0] = 0xFF, + .rx_highest = __cpu_to_le16(0x41), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static const unsigned long cw1200_ttl[] = { + 1 * HZ, /* VO */ + 2 * HZ, /* VI */ + 5 * HZ, /* BE */ + 10 * HZ /* BK */ +}; + +static const struct ieee80211_ops cw1200_ops = { + .start = cw1200_start, + .stop = cw1200_stop, + .add_interface = cw1200_add_interface, + .remove_interface = cw1200_remove_interface, + .change_interface = cw1200_change_interface, + .tx = cw1200_tx, + .hw_scan = cw1200_hw_scan, + .set_tim = cw1200_set_tim, + .sta_notify = cw1200_sta_notify, + .sta_add = cw1200_sta_add, + .sta_remove = cw1200_sta_remove, + .set_key = cw1200_set_key, + .set_rts_threshold = cw1200_set_rts_threshold, + .config = cw1200_config, + .bss_info_changed = cw1200_bss_info_changed, + .prepare_multicast = cw1200_prepare_multicast, + .configure_filter = cw1200_configure_filter, + .conf_tx = cw1200_conf_tx, + .get_stats = cw1200_get_stats, + .ampdu_action = cw1200_ampdu_action, + .flush = cw1200_flush, +#ifdef CONFIG_PM + .suspend = cw1200_wow_suspend, + .resume = cw1200_wow_resume, +#endif + /* Intentionally not offloaded: */ + /*.channel_switch = cw1200_channel_switch, */ + /*.remain_on_channel = cw1200_remain_on_channel, */ + /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */ +}; + +int cw1200_ba_rx_tids = -1; +int cw1200_ba_tx_tids = -1; +module_param(cw1200_ba_rx_tids, int, 0644); +module_param(cw1200_ba_tx_tids, int, 0644); +MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs"); +MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs"); + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support cw1200_wowlan_support = { + /* Support only for limited wowlan functionalities */ + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + + +static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + const bool have_5ghz) +{ + int i, band; + struct ieee80211_hw *hw; + struct cw1200_common *priv; + + hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops); + if (!hw) + return NULL; + + priv = hw->priv; + priv->hw = hw; + priv->hw_type = -1; + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->rates = cw1200_rates; /* TODO: fetch from FW */ + priv->mcs_rates = cw1200_n_rates; + if (cw1200_ba_rx_tids != -1) + priv->ba_rx_tid_mask = cw1200_ba_rx_tids; + else + priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */ + if (cw1200_ba_tx_tids != -1) + priv->ba_tx_tid_mask = cw1200_ba_tx_tids; + else + priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */ + + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_SUPPORTS_UAPSD | + IEEE80211_HW_CONNECTION_MONITOR | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | + IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC; + + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + +#ifdef CONFIG_PM + hw->wiphy->wowlan = &cw1200_wowlan_support; +#endif + + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + + hw->channel_change_time = 1000; /* TODO: find actual value */ + hw->queues = 4; + + priv->rts_threshold = -1; + + hw->max_rates = 8; + hw->max_rate_tries = 15; + hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM + + 8; /* TKIP IV */ + + hw->sta_data_size = sizeof(struct cw1200_sta_priv); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz; + if (have_5ghz) + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz; + + /* Channel params have to be cleared before registering wiphy again */ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; + if (!sband) + continue; + for (i = 0; i < sband->n_channels; i++) { + sband->channels[i].flags = 0; + sband->channels[i].max_antenna_gain = 0; + sband->channels[i].max_power = 30; + } + } + + hw->wiphy->max_scan_ssids = 2; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + + if (macaddr) + SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr); + else + SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template); + + /* Fix up mac address if necessary */ + if (hw->wiphy->perm_addr[3] == 0 && + hw->wiphy->perm_addr[4] == 0 && + hw->wiphy->perm_addr[5] == 0) { + get_random_bytes(&hw->wiphy->perm_addr[3], 3); + } + + mutex_init(&priv->wsm_cmd_mux); + mutex_init(&priv->conf_mutex); + priv->workqueue = create_singlethread_workqueue("cw1200_wq"); + sema_init(&priv->scan.lock, 1); + INIT_WORK(&priv->scan.work, cw1200_scan_work); + INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); + INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout); + INIT_DELAYED_WORK(&priv->clear_recent_scan_work, + cw1200_clear_recent_scan_work); + INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout); + INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work); + INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work); + INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work); + INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work); + spin_lock_init(&priv->event_queue_lock); + INIT_LIST_HEAD(&priv->event_queue); + INIT_WORK(&priv->event_handler, cw1200_event_handler); + INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work); + INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work); + spin_lock_init(&priv->bss_loss_lock); + spin_lock_init(&priv->ps_state_lock); + INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work); + INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work); + INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work); + INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work); + INIT_WORK(&priv->link_id_work, cw1200_link_id_work); + INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work); + INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset); + INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work); + INIT_WORK(&priv->set_beacon_wakeup_period_work, + cw1200_set_beacon_wakeup_period_work); + init_timer(&priv->mcast_timeout); + priv->mcast_timeout.data = (unsigned long)priv; + priv->mcast_timeout.function = cw1200_mcast_timeout; + + if (cw1200_queue_stats_init(&priv->tx_queue_stats, + CW1200_LINK_ID_MAX, + cw1200_skb_dtor, + priv)) { + ieee80211_free_hw(hw); + return NULL; + } + + for (i = 0; i < 4; ++i) { + if (cw1200_queue_init(&priv->tx_queue[i], + &priv->tx_queue_stats, i, 16, + cw1200_ttl[i])) { + for (; i > 0; i--) + cw1200_queue_deinit(&priv->tx_queue[i - 1]); + cw1200_queue_stats_deinit(&priv->tx_queue_stats); + ieee80211_free_hw(hw); + return NULL; + } + } + + init_waitqueue_head(&priv->channel_switch_done); + init_waitqueue_head(&priv->wsm_cmd_wq); + init_waitqueue_head(&priv->wsm_startup_done); + init_waitqueue_head(&priv->ps_mode_switch_done); + wsm_buf_init(&priv->wsm_cmd_buf); + spin_lock_init(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + tx_policy_init(priv); + + return hw; +} + +static int cw1200_register_common(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int err; + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) + goto done; +#endif + +#ifdef CONFIG_PM + err = cw1200_pm_init(&priv->pm_state, priv); + if (err) { + pr_err("Cannot init PM. (%d).\n", + err); + return err; + } +#endif + + err = ieee80211_register_hw(dev); + if (err) { + pr_err("Cannot register device (%d).\n", + err); +#ifdef CONFIG_PM + cw1200_pm_deinit(&priv->pm_state); +#endif + return err; + } + +#ifdef CONFIG_CW1200_ETF +done: +#endif + cw1200_debug_init(priv); + + pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy)); + return 0; +} + +static void cw1200_free_common(struct ieee80211_hw *dev) +{ + ieee80211_free_hw(dev); +} + +static void cw1200_unregister_common(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int i; + +#ifdef CONFIG_CW1200_ETF + if (!etf_mode) { +#endif + ieee80211_unregister_hw(dev); +#ifdef CONFIG_CW1200_ETF + } +#endif + + del_timer_sync(&priv->mcast_timeout); + cw1200_unregister_bh(priv); + + cw1200_debug_release(priv); + + mutex_destroy(&priv->conf_mutex); + + wsm_buf_deinit(&priv->wsm_cmd_buf); + + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + if (priv->sdd) { + release_firmware(priv->sdd); + priv->sdd = NULL; + } + + for (i = 0; i < 4; ++i) + cw1200_queue_deinit(&priv->tx_queue[i]); + + cw1200_queue_stats_deinit(&priv->tx_queue_stats); +#ifdef CONFIG_PM + cw1200_pm_deinit(&priv->pm_state); +#endif +} + +/* Clock is in KHz */ +u32 cw1200_dpll_from_clk(u16 clk_khz) +{ + switch (clk_khz) { + case 0x32C8: /* 13000 KHz */ + return 0x1D89D241; + case 0x3E80: /* 16000 KHz */ + return 0x000001E1; + case 0x41A0: /* 16800 KHz */ + return 0x124931C1; + case 0x4B00: /* 19200 KHz */ + return 0x00000191; + case 0x5DC0: /* 24000 KHz */ + return 0x00000141; + case 0x6590: /* 26000 KHz */ + return 0x0EC4F121; + case 0x8340: /* 33600 KHz */ + return 0x092490E1; + case 0x9600: /* 38400 KHz */ + return 0x100010C1; + case 0x9C40: /* 40000 KHz */ + return 0x000000C1; + case 0xBB80: /* 48000 KHz */ + return 0x000000A1; + case 0xCB20: /* 52000 KHz */ + return 0x07627091; + default: + pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n", + clk_khz); + return 0x0EC4F121; + } +} + +int cw1200_core_probe(const struct hwbus_ops *hwbus_ops, + struct hwbus_priv *hwbus, + struct device *pdev, + struct cw1200_common **core, + int ref_clk, const u8 *macaddr, + const char *sdd_path, bool have_5ghz) +{ + int err = -EINVAL; + struct ieee80211_hw *dev; + struct cw1200_common *priv; + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + dev = cw1200_init_common(macaddr, have_5ghz); + if (!dev) + goto err; + + priv = dev->priv; + priv->hw_refclk = ref_clk; + if (cw1200_refclk) + priv->hw_refclk = cw1200_refclk; + + priv->sdd_path = (char *)sdd_path; + if (cw1200_sdd_path) + priv->sdd_path = cw1200_sdd_path; + + priv->hwbus_ops = hwbus_ops; + priv->hwbus_priv = hwbus; + priv->pdev = pdev; + SET_IEEE80211_DEV(priv->hw, pdev); + + /* Pass struct cw1200_common back up */ + *core = priv; + + err = cw1200_register_bh(priv); + if (err) + goto err1; + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) + goto skip_fw; +#endif + + err = cw1200_load_firmware(priv); + if (err) + goto err2; + + if (wait_event_interruptible_timeout(priv->wsm_startup_done, + priv->firmware_ready, + 3*HZ) <= 0) { + /* TODO: Need to find how to reset device + in QUEUE mode properly. + */ + pr_err("Timeout waiting on device startup\n"); + err = -ETIMEDOUT; + goto err2; + } + + /* Set low-power mode. */ + wsm_set_operational_mode(priv, &mode); + + /* Enable multi-TX confirmation */ + wsm_use_multi_tx_conf(priv, true); + +#ifdef CONFIG_CW1200_ETF +skip_fw: +#endif + err = cw1200_register_common(dev); + if (err) + goto err2; + + return err; + +err2: + cw1200_unregister_bh(priv); +err1: + cw1200_free_common(dev); +err: + *core = NULL; + return err; +} +EXPORT_SYMBOL_GPL(cw1200_core_probe); + +void cw1200_core_release(struct cw1200_common *self) +{ + /* Disable device interrupts */ + self->hwbus_ops->lock(self->hwbus_priv); + __cw1200_irq_enable(self, 0); + self->hwbus_ops->unlock(self->hwbus_priv); + + /* And then clean up */ + cw1200_unregister_common(self->hw); + cw1200_free_common(self->hw); + return; +} +EXPORT_SYMBOL_GPL(cw1200_core_release); diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c new file mode 100644 index 000000000000..b37abb9f0453 --- /dev/null +++ b/drivers/net/wireless/cw1200/pm.c @@ -0,0 +1,367 @@ +/* + * Mac80211 power management API for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/if_ether.h> +#include "cw1200.h" +#include "pm.h" +#include "sta.h" +#include "bh.h" +#include "hwbus.h" + +#define CW1200_BEACON_SKIPPING_MULTIPLIER 3 + +struct cw1200_udp_port_filter { + struct wsm_udp_port_filter_hdr hdr; + /* Up to 4 filters are allowed. */ + struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS]; +} __packed; + +struct cw1200_ether_type_filter { + struct wsm_ether_type_filter_hdr hdr; + /* Up to 4 filters are allowed. */ + struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS]; +} __packed; + +static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = { + .hdr.num = 2, + .filters = { + [0] = { + .action = WSM_FILTER_ACTION_FILTER_OUT, + .type = WSM_FILTER_PORT_TYPE_DST, + .port = __cpu_to_le16(67), /* DHCP Bootps */ + }, + [1] = { + .action = WSM_FILTER_ACTION_FILTER_OUT, + .type = WSM_FILTER_PORT_TYPE_DST, + .port = __cpu_to_le16(68), /* DHCP Bootpc */ + }, + } +}; + +static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = { + .num = 0, +}; + +#ifndef ETH_P_WAPI +#define ETH_P_WAPI 0x88B4 +#endif + +static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = { + .hdr.num = 4, + .filters = { + [0] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_IP), + }, + [1] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_PAE), + }, + [2] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_WAPI), + }, + [3] = { + .action = WSM_FILTER_ACTION_FILTER_IN, + .type = __cpu_to_le16(ETH_P_ARP), + }, + }, +}; + +static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = { + .num = 0, +}; + +/* private */ +struct cw1200_suspend_state { + unsigned long bss_loss_tmo; + unsigned long join_tmo; + unsigned long direct_probe; + unsigned long link_id_gc; + bool beacon_skipping; + u8 prev_ps_mode; +}; + +static void cw1200_pm_stay_awake_tmo(unsigned long arg) +{ + /* XXX what's the point of this ? */ +} + +int cw1200_pm_init(struct cw1200_pm_state *pm, + struct cw1200_common *priv) +{ + spin_lock_init(&pm->lock); + + init_timer(&pm->stay_awake); + pm->stay_awake.data = (unsigned long)pm; + pm->stay_awake.function = cw1200_pm_stay_awake_tmo; + + return 0; +} + +void cw1200_pm_deinit(struct cw1200_pm_state *pm) +{ + del_timer_sync(&pm->stay_awake); +} + +void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo) +{ + long cur_tmo; + spin_lock_bh(&pm->lock); + cur_tmo = pm->stay_awake.expires - jiffies; + if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo) + mod_timer(&pm->stay_awake, jiffies + tmo); + spin_unlock_bh(&pm->lock); +} + +static long cw1200_suspend_work(struct delayed_work *work) +{ + int ret = cancel_delayed_work(work); + long tmo; + if (ret > 0) { + /* Timer is pending */ + tmo = work->timer.expires - jiffies; + if (tmo < 0) + tmo = 0; + } else { + tmo = -1; + } + return tmo; +} + +static int cw1200_resume_work(struct cw1200_common *priv, + struct delayed_work *work, + unsigned long tmo) +{ + if ((long)tmo < 0) + return 1; + + return queue_delayed_work(priv->workqueue, work, tmo); +} + +int cw1200_can_suspend(struct cw1200_common *priv) +{ + if (atomic_read(&priv->bh_rx)) { + wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n"); + return 0; + } + return 1; +} +EXPORT_SYMBOL_GPL(cw1200_can_suspend); + +int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_pm_state *pm_state = &priv->pm_state; + struct cw1200_suspend_state *state; + int ret; + + spin_lock_bh(&pm_state->lock); + ret = timer_pending(&pm_state->stay_awake); + spin_unlock_bh(&pm_state->lock); + if (ret) + return -EAGAIN; + + /* Do not suspend when datapath is not idle */ + if (priv->tx_queue_stats.num_queued) + return -EBUSY; + + /* Make sure there is no configuration requests in progress. */ + if (!mutex_trylock(&priv->conf_mutex)) + return -EBUSY; + + /* Ensure pending operations are done. + * Note also that wow_suspend must return in ~2.5sec, before + * watchdog is triggered. + */ + if (priv->channel_switch_in_progress) + goto revert1; + + /* Do not suspend when join is pending */ + if (priv->join_pending) + goto revert1; + + /* Do not suspend when scanning */ + if (down_trylock(&priv->scan.lock)) + goto revert1; + + /* Lock TX. */ + wsm_lock_tx_async(priv); + + /* Wait to avoid possible race with bh code. + * But do not wait too long... + */ + if (wait_event_timeout(priv->bh_evt_wq, + !priv->hw_bufs_used, HZ / 10) <= 0) + goto revert2; + + /* Set UDP filter */ + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr); + + /* Set ethernet frame type filter */ + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr); + + /* Allocate state */ + state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL); + if (!state) + goto revert3; + + /* Change to legacy PS while going to suspend */ + if (!priv->vif->p2p && + priv->join_status == CW1200_JOIN_STATUS_STA && + priv->powersave_mode.mode != WSM_PSM_PS) { + state->prev_ps_mode = priv->powersave_mode.mode; + priv->powersave_mode.mode = WSM_PSM_PS; + cw1200_set_pm(priv, &priv->powersave_mode); + if (wait_event_interruptible_timeout(priv->ps_mode_switch_done, + !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) { + goto revert3; + } + } + + /* Store delayed work states. */ + state->bss_loss_tmo = + cw1200_suspend_work(&priv->bss_loss_work); + state->join_tmo = + cw1200_suspend_work(&priv->join_timeout); + state->direct_probe = + cw1200_suspend_work(&priv->scan.probe_work); + state->link_id_gc = + cw1200_suspend_work(&priv->link_id_gc_work); + + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + atomic_set(&priv->recent_scan, 0); + + /* Enable beacon skipping */ + if (priv->join_status == CW1200_JOIN_STATUS_STA && + priv->join_dtim_period && + !priv->has_multicast_subscription) { + state->beacon_skipping = true; + wsm_set_beacon_wakeup_period(priv, + priv->join_dtim_period, + CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period); + } + + /* Stop serving thread */ + if (cw1200_bh_suspend(priv)) + goto revert4; + + ret = timer_pending(&priv->mcast_timeout); + if (ret) + goto revert5; + + /* Store suspend state */ + pm_state->suspend_state = state; + + /* Enable IRQ wake */ + ret = priv->hwbus_ops->power_mgmt(priv->hwbus_priv, true); + if (ret) { + wiphy_err(priv->hw->wiphy, + "PM request failed: %d. WoW is disabled.\n", ret); + cw1200_wow_resume(hw); + return -EBUSY; + } + + /* Force resume if event is coming from the device. */ + if (atomic_read(&priv->bh_rx)) { + cw1200_wow_resume(hw); + return -EAGAIN; + } + + return 0; + +revert5: + WARN_ON(cw1200_bh_resume(priv)); +revert4: + cw1200_resume_work(priv, &priv->bss_loss_work, + state->bss_loss_tmo); + cw1200_resume_work(priv, &priv->join_timeout, + state->join_tmo); + cw1200_resume_work(priv, &priv->scan.probe_work, + state->direct_probe); + cw1200_resume_work(priv, &priv->link_id_gc_work, + state->link_id_gc); + kfree(state); +revert3: + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); +revert2: + wsm_unlock_tx(priv); + up(&priv->scan.lock); +revert1: + mutex_unlock(&priv->conf_mutex); + return -EBUSY; +} + +int cw1200_wow_resume(struct ieee80211_hw *hw) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_pm_state *pm_state = &priv->pm_state; + struct cw1200_suspend_state *state; + + state = pm_state->suspend_state; + pm_state->suspend_state = NULL; + + /* Disable IRQ wake */ + priv->hwbus_ops->power_mgmt(priv->hwbus_priv, false); + + /* Scan.lock must be released before BH is resumed other way + * in case when BSS_LOST command arrived the processing of the + * command will be delayed. + */ + up(&priv->scan.lock); + + /* Resume BH thread */ + WARN_ON(cw1200_bh_resume(priv)); + + /* Restores previous PS mode */ + if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) { + priv->powersave_mode.mode = state->prev_ps_mode; + cw1200_set_pm(priv, &priv->powersave_mode); + } + + if (state->beacon_skipping) { + wsm_set_beacon_wakeup_period(priv, priv->beacon_int * + priv->join_dtim_period > + MAX_BEACON_SKIP_TIME_MS ? 1 : + priv->join_dtim_period, 0); + state->beacon_skipping = false; + } + + /* Resume delayed work */ + cw1200_resume_work(priv, &priv->bss_loss_work, + state->bss_loss_tmo); + cw1200_resume_work(priv, &priv->join_timeout, + state->join_tmo); + cw1200_resume_work(priv, &priv->scan.probe_work, + state->direct_probe); + cw1200_resume_work(priv, &priv->link_id_gc_work, + state->link_id_gc); + + /* Remove UDP port filter */ + wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); + + /* Remove ethernet frame type filter */ + wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); + + /* Unlock datapath */ + wsm_unlock_tx(priv); + + /* Unlock configuration mutex */ + mutex_unlock(&priv->conf_mutex); + + /* Free memory */ + kfree(state); + + return 0; +} diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h new file mode 100644 index 000000000000..3ed90ff22bb8 --- /dev/null +++ b/drivers/net/wireless/cw1200/pm.h @@ -0,0 +1,43 @@ +/* + * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2011, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef PM_H_INCLUDED +#define PM_H_INCLUDED + +/* ******************************************************************** */ +/* mac80211 API */ + +/* extern */ struct cw1200_common; +/* private */ struct cw1200_suspend_state; + +struct cw1200_pm_state { + struct cw1200_suspend_state *suspend_state; + struct timer_list stay_awake; + struct platform_device *pm_dev; + spinlock_t lock; /* Protect access */ +}; + +#ifdef CONFIG_PM +int cw1200_pm_init(struct cw1200_pm_state *pm, + struct cw1200_common *priv); +void cw1200_pm_deinit(struct cw1200_pm_state *pm); +int cw1200_wow_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int cw1200_wow_resume(struct ieee80211_hw *hw); +int cw1200_can_suspend(struct cw1200_common *priv); +void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo); +#else +static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, + unsigned long tmo) { +} +#endif +#endif diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c new file mode 100644 index 000000000000..8510454d5db1 --- /dev/null +++ b/drivers/net/wireless/cw1200/queue.c @@ -0,0 +1,583 @@ +/* + * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <net/mac80211.h> +#include <linux/sched.h> +#include "queue.h" +#include "cw1200.h" +#include "debug.h" + +/* private */ struct cw1200_queue_item +{ + struct list_head head; + struct sk_buff *skb; + u32 packet_id; + unsigned long queue_timestamp; + unsigned long xmit_timestamp; + struct cw1200_txpriv txpriv; + u8 generation; +}; + +static inline void __cw1200_queue_lock(struct cw1200_queue *queue) +{ + struct cw1200_queue_stats *stats = queue->stats; + if (queue->tx_locked_cnt++ == 0) { + pr_debug("[TX] Queue %d is locked.\n", + queue->queue_id); + ieee80211_stop_queue(stats->priv->hw, queue->queue_id); + } +} + +static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) +{ + struct cw1200_queue_stats *stats = queue->stats; + BUG_ON(!queue->tx_locked_cnt); + if (--queue->tx_locked_cnt == 0) { + pr_debug("[TX] Queue %d is unlocked.\n", + queue->queue_id); + ieee80211_wake_queue(stats->priv->hw, queue->queue_id); + } +} + +static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, + u8 *queue_id, u8 *item_generation, + u8 *item_id) +{ + *item_id = (packet_id >> 0) & 0xFF; + *item_generation = (packet_id >> 8) & 0xFF; + *queue_id = (packet_id >> 16) & 0xFF; + *queue_generation = (packet_id >> 24) & 0xFF; +} + +static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, + u8 item_generation, u8 item_id) +{ + return ((u32)item_id << 0) | + ((u32)item_generation << 8) | + ((u32)queue_id << 16) | + ((u32)queue_generation << 24); +} + +static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, + struct list_head *gc_list) +{ + struct cw1200_queue_item *item, *tmp; + + list_for_each_entry_safe(item, tmp, gc_list, head) { + list_del(&item->head); + stats->skb_dtor(stats->priv, item->skb, &item->txpriv); + kfree(item); + } +} + +static void cw1200_queue_register_post_gc(struct list_head *gc_list, + struct cw1200_queue_item *item) +{ + struct cw1200_queue_item *gc_item; + gc_item = kmalloc(sizeof(struct cw1200_queue_item), + GFP_ATOMIC); + BUG_ON(!gc_item); + memcpy(gc_item, item, sizeof(struct cw1200_queue_item)); + list_add_tail(&gc_item->head, gc_list); +} + +static void __cw1200_queue_gc(struct cw1200_queue *queue, + struct list_head *head, + bool unlock) +{ + struct cw1200_queue_stats *stats = queue->stats; + struct cw1200_queue_item *item = NULL, *tmp; + bool wakeup_stats = false; + + list_for_each_entry_safe(item, tmp, &queue->queue, head) { + if (jiffies - item->queue_timestamp < queue->ttl) + break; + --queue->num_queued; + --queue->link_map_cache[item->txpriv.link_id]; + spin_lock_bh(&stats->lock); + --stats->num_queued; + if (!--stats->link_map_cache[item->txpriv.link_id]) + wakeup_stats = true; + spin_unlock_bh(&stats->lock); + cw1200_debug_tx_ttl(stats->priv); + cw1200_queue_register_post_gc(head, item); + item->skb = NULL; + list_move_tail(&item->head, &queue->free_pool); + } + + if (wakeup_stats) + wake_up(&stats->wait_link_id_empty); + + if (queue->overfull) { + if (queue->num_queued <= (queue->capacity >> 1)) { + queue->overfull = false; + if (unlock) + __cw1200_queue_unlock(queue); + } else if (item) { + unsigned long tmo = item->queue_timestamp + queue->ttl; + mod_timer(&queue->gc, tmo); + cw1200_pm_stay_awake(&stats->priv->pm_state, + tmo - jiffies); + } + } +} + +static void cw1200_queue_gc(unsigned long arg) +{ + LIST_HEAD(list); + struct cw1200_queue *queue = + (struct cw1200_queue *)arg; + + spin_lock_bh(&queue->lock); + __cw1200_queue_gc(queue, &list, true); + spin_unlock_bh(&queue->lock); + cw1200_queue_post_gc(queue->stats, &list); +} + +int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, + size_t map_capacity, + cw1200_queue_skb_dtor_t skb_dtor, + struct cw1200_common *priv) +{ + memset(stats, 0, sizeof(*stats)); + stats->map_capacity = map_capacity; + stats->skb_dtor = skb_dtor; + stats->priv = priv; + spin_lock_init(&stats->lock); + init_waitqueue_head(&stats->wait_link_id_empty); + + stats->link_map_cache = kzalloc(sizeof(int) * map_capacity, + GFP_KERNEL); + if (!stats->link_map_cache) + return -ENOMEM; + + return 0; +} + +int cw1200_queue_init(struct cw1200_queue *queue, + struct cw1200_queue_stats *stats, + u8 queue_id, + size_t capacity, + unsigned long ttl) +{ + size_t i; + + memset(queue, 0, sizeof(*queue)); + queue->stats = stats; + queue->capacity = capacity; + queue->queue_id = queue_id; + queue->ttl = ttl; + INIT_LIST_HEAD(&queue->queue); + INIT_LIST_HEAD(&queue->pending); + INIT_LIST_HEAD(&queue->free_pool); + spin_lock_init(&queue->lock); + init_timer(&queue->gc); + queue->gc.data = (unsigned long)queue; + queue->gc.function = cw1200_queue_gc; + + queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, + GFP_KERNEL); + if (!queue->pool) + return -ENOMEM; + + queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity, + GFP_KERNEL); + if (!queue->link_map_cache) { + kfree(queue->pool); + queue->pool = NULL; + return -ENOMEM; + } + + for (i = 0; i < capacity; ++i) + list_add_tail(&queue->pool[i].head, &queue->free_pool); + + return 0; +} + +int cw1200_queue_clear(struct cw1200_queue *queue) +{ + int i; + LIST_HEAD(gc_list); + struct cw1200_queue_stats *stats = queue->stats; + struct cw1200_queue_item *item, *tmp; + + spin_lock_bh(&queue->lock); + queue->generation++; + list_splice_tail_init(&queue->queue, &queue->pending); + list_for_each_entry_safe(item, tmp, &queue->pending, head) { + WARN_ON(!item->skb); + cw1200_queue_register_post_gc(&gc_list, item); + item->skb = NULL; + list_move_tail(&item->head, &queue->free_pool); + } + queue->num_queued = 0; + queue->num_pending = 0; + + spin_lock_bh(&stats->lock); + for (i = 0; i < stats->map_capacity; ++i) { + stats->num_queued -= queue->link_map_cache[i]; + stats->link_map_cache[i] -= queue->link_map_cache[i]; + queue->link_map_cache[i] = 0; + } + spin_unlock_bh(&stats->lock); + if (queue->overfull) { + queue->overfull = false; + __cw1200_queue_unlock(queue); + } + spin_unlock_bh(&queue->lock); + wake_up(&stats->wait_link_id_empty); + cw1200_queue_post_gc(stats, &gc_list); + return 0; +} + +void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) +{ + kfree(stats->link_map_cache); + stats->link_map_cache = NULL; +} + +void cw1200_queue_deinit(struct cw1200_queue *queue) +{ + cw1200_queue_clear(queue); + del_timer_sync(&queue->gc); + INIT_LIST_HEAD(&queue->free_pool); + kfree(queue->pool); + kfree(queue->link_map_cache); + queue->pool = NULL; + queue->link_map_cache = NULL; + queue->capacity = 0; +} + +size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, + u32 link_id_map) +{ + size_t ret; + int i, bit; + size_t map_capacity = queue->stats->map_capacity; + + if (!link_id_map) + return 0; + + spin_lock_bh(&queue->lock); + if (link_id_map == (u32)-1) { + ret = queue->num_queued - queue->num_pending; + } else { + ret = 0; + for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { + if (link_id_map & bit) + ret += queue->link_map_cache[i]; + } + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_put(struct cw1200_queue *queue, + struct sk_buff *skb, + struct cw1200_txpriv *txpriv) +{ + int ret = 0; + LIST_HEAD(gc_list); + struct cw1200_queue_stats *stats = queue->stats; + + if (txpriv->link_id >= queue->stats->map_capacity) + return -EINVAL; + + spin_lock_bh(&queue->lock); + if (!WARN_ON(list_empty(&queue->free_pool))) { + struct cw1200_queue_item *item = list_first_entry( + &queue->free_pool, struct cw1200_queue_item, head); + BUG_ON(item->skb); + + list_move_tail(&item->head, &queue->queue); + item->skb = skb; + item->txpriv = *txpriv; + item->generation = 0; + item->packet_id = cw1200_queue_mk_packet_id(queue->generation, + queue->queue_id, + item->generation, + item - queue->pool); + item->queue_timestamp = jiffies; + + ++queue->num_queued; + ++queue->link_map_cache[txpriv->link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[txpriv->link_id]; + spin_unlock_bh(&stats->lock); + + /* TX may happen in parallel sometimes. + * Leave extra queue slots so we don't overflow. + */ + if (queue->overfull == false && + queue->num_queued >= + (queue->capacity - (num_present_cpus() - 1))) { + queue->overfull = true; + __cw1200_queue_lock(queue); + mod_timer(&queue->gc, jiffies); + } + } else { + ret = -ENOENT; + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_get(struct cw1200_queue *queue, + u32 link_id_map, + struct wsm_tx **tx, + struct ieee80211_tx_info **tx_info, + const struct cw1200_txpriv **txpriv) +{ + int ret = -ENOENT; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + bool wakeup_stats = false; + + spin_lock_bh(&queue->lock); + list_for_each_entry(item, &queue->queue, head) { + if (link_id_map & BIT(item->txpriv.link_id)) { + ret = 0; + break; + } + } + + if (!WARN_ON(ret)) { + *tx = (struct wsm_tx *)item->skb->data; + *tx_info = IEEE80211_SKB_CB(item->skb); + *txpriv = &item->txpriv; + (*tx)->packet_id = __cpu_to_le32(item->packet_id); + list_move_tail(&item->head, &queue->pending); + ++queue->num_pending; + --queue->link_map_cache[item->txpriv.link_id]; + item->xmit_timestamp = jiffies; + + spin_lock_bh(&stats->lock); + --stats->num_queued; + if (!--stats->link_map_cache[item->txpriv.link_id]) + wakeup_stats = true; + spin_unlock_bh(&stats->lock); + } + spin_unlock_bh(&queue->lock); + if (wakeup_stats) + wake_up(&stats->wait_link_id_empty); + return ret; +} + +int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + --queue->num_pending; + ++queue->link_map_cache[item->txpriv.link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[item->txpriv.link_id]; + spin_unlock_bh(&stats->lock); + + item->generation = ++item_generation; + item->packet_id = cw1200_queue_mk_packet_id(queue_generation, + queue_id, + item_generation, + item_id); + list_move(&item->head, &queue->queue); + } + spin_unlock_bh(&queue->lock); + return ret; +} + +int cw1200_queue_requeue_all(struct cw1200_queue *queue) +{ + struct cw1200_queue_item *item, *tmp; + struct cw1200_queue_stats *stats = queue->stats; + spin_lock_bh(&queue->lock); + + list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { + --queue->num_pending; + ++queue->link_map_cache[item->txpriv.link_id]; + + spin_lock_bh(&stats->lock); + ++stats->num_queued; + ++stats->link_map_cache[item->txpriv.link_id]; + spin_unlock_bh(&stats->lock); + + ++item->generation; + item->packet_id = cw1200_queue_mk_packet_id(queue->generation, + queue->queue_id, + item->generation, + item - queue->pool); + list_move(&item->head, &queue->queue); + } + spin_unlock_bh(&queue->lock); + + return 0; +} + +int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + struct cw1200_queue_stats *stats = queue->stats; + struct sk_buff *gc_skb = NULL; + struct cw1200_txpriv gc_txpriv; + + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + gc_txpriv = item->txpriv; + gc_skb = item->skb; + item->skb = NULL; + --queue->num_pending; + --queue->num_queued; + ++queue->num_sent; + ++item->generation; + /* Do not use list_move_tail here, but list_move: + * try to utilize cache row. + */ + list_move(&item->head, &queue->free_pool); + + if (queue->overfull && + (queue->num_queued <= (queue->capacity >> 1))) { + queue->overfull = false; + __cw1200_queue_unlock(queue); + } + } + spin_unlock_bh(&queue->lock); + + if (gc_skb) + stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); + + return ret; +} + +int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, + struct sk_buff **skb, + const struct cw1200_txpriv **txpriv) +{ + int ret = 0; + u8 queue_generation, queue_id, item_generation, item_id; + struct cw1200_queue_item *item; + cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, + &item_generation, &item_id); + + item = &queue->pool[item_id]; + + spin_lock_bh(&queue->lock); + BUG_ON(queue_id != queue->queue_id); + if (queue_generation != queue->generation) { + ret = -ENOENT; + } else if (item_id >= (unsigned) queue->capacity) { + WARN_ON(1); + ret = -EINVAL; + } else if (item->generation != item_generation) { + WARN_ON(1); + ret = -ENOENT; + } else { + *skb = item->skb; + *txpriv = &item->txpriv; + } + spin_unlock_bh(&queue->lock); + return ret; +} + +void cw1200_queue_lock(struct cw1200_queue *queue) +{ + spin_lock_bh(&queue->lock); + __cw1200_queue_lock(queue); + spin_unlock_bh(&queue->lock); +} + +void cw1200_queue_unlock(struct cw1200_queue *queue) +{ + spin_lock_bh(&queue->lock); + __cw1200_queue_unlock(queue); + spin_unlock_bh(&queue->lock); +} + +bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, + unsigned long *timestamp, + u32 pending_frame_id) +{ + struct cw1200_queue_item *item; + bool ret; + + spin_lock_bh(&queue->lock); + ret = !list_empty(&queue->pending); + if (ret) { + list_for_each_entry(item, &queue->pending, head) { + if (item->packet_id != pending_frame_id) + if (time_before(item->xmit_timestamp, + *timestamp)) + *timestamp = item->xmit_timestamp; + } + } + spin_unlock_bh(&queue->lock); + return ret; +} + +bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, + u32 link_id_map) +{ + bool empty = true; + + spin_lock_bh(&stats->lock); + if (link_id_map == (u32)-1) { + empty = stats->num_queued == 0; + } else { + int i; + for (i = 0; i < stats->map_capacity; ++i) { + if (link_id_map & BIT(i)) { + if (stats->link_map_cache[i]) { + empty = false; + break; + } + } + } + } + spin_unlock_bh(&stats->lock); + + return empty; +} diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/cw1200/queue.h new file mode 100644 index 000000000000..119f9c79c14e --- /dev/null +++ b/drivers/net/wireless/cw1200/queue.h @@ -0,0 +1,116 @@ +/* + * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_QUEUE_H_INCLUDED +#define CW1200_QUEUE_H_INCLUDED + +/* private */ struct cw1200_queue_item; + +/* extern */ struct sk_buff; +/* extern */ struct wsm_tx; +/* extern */ struct cw1200_common; +/* extern */ struct ieee80211_tx_queue_stats; +/* extern */ struct cw1200_txpriv; + +/* forward */ struct cw1200_queue_stats; + +typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv); + +struct cw1200_queue { + struct cw1200_queue_stats *stats; + size_t capacity; + size_t num_queued; + size_t num_pending; + size_t num_sent; + struct cw1200_queue_item *pool; + struct list_head queue; + struct list_head free_pool; + struct list_head pending; + int tx_locked_cnt; + int *link_map_cache; + bool overfull; + spinlock_t lock; /* Protect queue entry */ + u8 queue_id; + u8 generation; + struct timer_list gc; + unsigned long ttl; +}; + +struct cw1200_queue_stats { + spinlock_t lock; /* Protect stats entry */ + int *link_map_cache; + int num_queued; + size_t map_capacity; + wait_queue_head_t wait_link_id_empty; + cw1200_queue_skb_dtor_t skb_dtor; + struct cw1200_common *priv; +}; + +struct cw1200_txpriv { + u8 link_id; + u8 raw_link_id; + u8 tid; + u8 rate_id; + u8 offset; +}; + +int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, + size_t map_capacity, + cw1200_queue_skb_dtor_t skb_dtor, + struct cw1200_common *priv); +int cw1200_queue_init(struct cw1200_queue *queue, + struct cw1200_queue_stats *stats, + u8 queue_id, + size_t capacity, + unsigned long ttl); +int cw1200_queue_clear(struct cw1200_queue *queue); +void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats); +void cw1200_queue_deinit(struct cw1200_queue *queue); + +size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, + u32 link_id_map); +int cw1200_queue_put(struct cw1200_queue *queue, + struct sk_buff *skb, + struct cw1200_txpriv *txpriv); +int cw1200_queue_get(struct cw1200_queue *queue, + u32 link_id_map, + struct wsm_tx **tx, + struct ieee80211_tx_info **tx_info, + const struct cw1200_txpriv **txpriv); +int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id); +int cw1200_queue_requeue_all(struct cw1200_queue *queue); +int cw1200_queue_remove(struct cw1200_queue *queue, + u32 packet_id); +int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, + struct sk_buff **skb, + const struct cw1200_txpriv **txpriv); +void cw1200_queue_lock(struct cw1200_queue *queue); +void cw1200_queue_unlock(struct cw1200_queue *queue); +bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, + unsigned long *timestamp, + u32 pending_frame_id); + +bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, + u32 link_id_map); + +static inline u8 cw1200_queue_get_queue_id(u32 packet_id) +{ + return (packet_id >> 16) & 0xFF; +} + +static inline u8 cw1200_queue_get_generation(u32 packet_id) +{ + return (packet_id >> 8) & 0xFF; +} + +#endif /* CW1200_QUEUE_H_INCLUDED */ diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c new file mode 100644 index 000000000000..ee3c19037aac --- /dev/null +++ b/drivers/net/wireless/cw1200/scan.c @@ -0,0 +1,461 @@ +/* + * Scan implementation for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/sched.h> +#include "cw1200.h" +#include "scan.h" +#include "sta.h" +#include "pm.h" + +static void cw1200_scan_restart_delayed(struct cw1200_common *priv); + +static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan) +{ + int ret, i; + int tmo = 2000; + + switch (priv->join_status) { + case CW1200_JOIN_STATUS_PRE_STA: + case CW1200_JOIN_STATUS_JOINING: + return -EBUSY; + default: + break; + } + + wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n", + scan->type, scan->num_channels, scan->flags); + + for (i = 0; i < scan->num_channels; ++i) + tmo += scan->ch[i].max_chan_time + 10; + + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + atomic_set(&priv->scan.in_progress, 1); + atomic_set(&priv->recent_scan, 1); + cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000); + queue_delayed_work(priv->workqueue, &priv->scan.timeout, + tmo * HZ / 1000); + ret = wsm_scan(priv, scan); + if (ret) { + atomic_set(&priv->scan.in_progress, 0); + cancel_delayed_work_sync(&priv->scan.timeout); + cw1200_scan_restart_delayed(priv); + } + return ret; +} + +int cw1200_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct cw1200_common *priv = hw->priv; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, + }; + int i, ret; + + if (!priv->vif) + return -EINVAL; + + /* Scan when P2P_GO corrupt firmware MiniAP mode */ + if (priv->join_status == CW1200_JOIN_STATUS_AP) + return -EOPNOTSUPP; + + if (req->n_ssids == 1 && !req->ssids[0].ssid_len) + req->n_ssids = 0; + + wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n", + req->n_ssids); + + if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) + return -EINVAL; + + frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0, + req->ie_len); + if (!frame.skb) + return -ENOMEM; + + if (req->ie_len) + memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len); + + /* will be unlocked in cw1200_scan_work() */ + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + + ret = wsm_set_template_frame(priv, &frame); + if (!ret) { + /* Host want to be the probe responder. */ + ret = wsm_set_probe_responder(priv, true); + } + if (ret) { + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); + dev_kfree_skb(frame.skb); + return ret; + } + + wsm_lock_tx(priv); + + BUG_ON(priv->scan.req); + priv->scan.req = req; + priv->scan.n_ssids = 0; + priv->scan.status = 0; + priv->scan.begin = &req->channels[0]; + priv->scan.curr = priv->scan.begin; + priv->scan.end = &req->channels[req->n_channels]; + priv->scan.output_power = priv->output_power; + + for (i = 0; i < req->n_ssids; ++i) { + struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids]; + memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid)); + dst->length = req->ssids[i].ssid_len; + ++priv->scan.n_ssids; + } + + mutex_unlock(&priv->conf_mutex); + + if (frame.skb) + dev_kfree_skb(frame.skb); + queue_work(priv->workqueue, &priv->scan.work); + return 0; +} + +void cw1200_scan_work(struct work_struct *work) +{ + struct cw1200_common *priv = container_of(work, struct cw1200_common, + scan.work); + struct ieee80211_channel **it; + struct wsm_scan scan = { + .type = WSM_SCAN_TYPE_FOREGROUND, + .flags = WSM_SCAN_FLAG_SPLIT_METHOD, + }; + bool first_run = (priv->scan.begin == priv->scan.curr && + priv->scan.begin != priv->scan.end); + int i; + + if (first_run) { + /* Firmware gets crazy if scan request is sent + * when STA is joined but not yet associated. + * Force unjoin in this case. + */ + if (cancel_delayed_work_sync(&priv->join_timeout) > 0) + cw1200_join_timeout(&priv->join_timeout.work); + } + + mutex_lock(&priv->conf_mutex); + + if (first_run) { + if (priv->join_status == CW1200_JOIN_STATUS_STA && + !(priv->powersave_mode.mode & WSM_PSM_PS)) { + struct wsm_set_pm pm = priv->powersave_mode; + pm.mode = WSM_PSM_PS; + cw1200_set_pm(priv, &pm); + } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + /* FW bug: driver has to restart p2p-dev mode + * after scan + */ + cw1200_disable_listening(priv); + } + } + + if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { + if (priv->scan.output_power != priv->output_power) + wsm_set_output_power(priv, priv->output_power * 10); + if (priv->join_status == CW1200_JOIN_STATUS_STA && + !(priv->powersave_mode.mode & WSM_PSM_PS)) + cw1200_set_pm(priv, &priv->powersave_mode); + + if (priv->scan.status < 0) + wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n", + priv->scan.status); + else if (priv->scan.req) + wiphy_dbg(priv->hw->wiphy, + "[SCAN] Scan completed.\n"); + else + wiphy_dbg(priv->hw->wiphy, + "[SCAN] Scan canceled.\n"); + + priv->scan.req = NULL; + cw1200_scan_restart_delayed(priv); + wsm_unlock_tx(priv); + mutex_unlock(&priv->conf_mutex); + ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0); + up(&priv->scan.lock); + return; + } else { + struct ieee80211_channel *first = *priv->scan.curr; + for (it = priv->scan.curr + 1, i = 1; + it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS; + ++it, ++i) { + if ((*it)->band != first->band) + break; + if (((*it)->flags ^ first->flags) & + IEEE80211_CHAN_PASSIVE_SCAN) + break; + if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + (*it)->max_power != first->max_power) + break; + } + scan.band = first->band; + + if (priv->scan.req->no_cck) + scan.max_tx_rate = WSM_TRANSMIT_RATE_6; + else + scan.max_tx_rate = WSM_TRANSMIT_RATE_1; + scan.num_probes = + (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2; + scan.num_ssids = priv->scan.n_ssids; + scan.ssids = &priv->scan.ssids[0]; + scan.num_channels = it - priv->scan.curr; + /* TODO: Is it optimal? */ + scan.probe_delay = 100; + /* It is not stated in WSM specification, however + * FW team says that driver may not use FG scan + * when joined. + */ + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + scan.type = WSM_SCAN_TYPE_BACKGROUND; + scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; + } + scan.ch = kzalloc( + sizeof(struct wsm_scan_ch) * (it - priv->scan.curr), + GFP_KERNEL); + if (!scan.ch) { + priv->scan.status = -ENOMEM; + goto fail; + } + for (i = 0; i < scan.num_channels; ++i) { + scan.ch[i].number = priv->scan.curr[i]->hw_value; + if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) { + scan.ch[i].min_chan_time = 50; + scan.ch[i].max_chan_time = 100; + } else { + scan.ch[i].min_chan_time = 10; + scan.ch[i].max_chan_time = 25; + } + } + if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) && + priv->scan.output_power != first->max_power) { + priv->scan.output_power = first->max_power; + wsm_set_output_power(priv, + priv->scan.output_power * 10); + } + priv->scan.status = cw1200_scan_start(priv, &scan); + kfree(scan.ch); + if (priv->scan.status) + goto fail; + priv->scan.curr = it; + } + mutex_unlock(&priv->conf_mutex); + return; + +fail: + priv->scan.curr = priv->scan.end; + mutex_unlock(&priv->conf_mutex); + queue_work(priv->workqueue, &priv->scan.work); + return; +} + +static void cw1200_scan_restart_delayed(struct cw1200_common *priv) +{ + /* FW bug: driver has to restart p2p-dev mode after scan. */ + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + cw1200_enable_listening(priv); + cw1200_update_filtering(priv); + } + + if (priv->delayed_unjoin) { + priv->delayed_unjoin = false; + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else if (priv->delayed_link_loss) { + wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n"); + priv->delayed_link_loss = 0; + cw1200_cqm_bssloss_sm(priv, 1, 0, 0); + } +} + +static void cw1200_scan_complete(struct cw1200_common *priv) +{ + queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ); + if (priv->scan.direct_probe) { + wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n"); + cw1200_scan_restart_delayed(priv); + priv->scan.direct_probe = 0; + up(&priv->scan.lock); + wsm_unlock_tx(priv); + } else { + cw1200_scan_work(&priv->scan.work); + } +} + +void cw1200_scan_failed_cb(struct cw1200_common *priv) +{ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + /* STA is stopped. */ + return; + + if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { + priv->scan.status = -EIO; + queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); + } +} + + +void cw1200_scan_complete_cb(struct cw1200_common *priv, + struct wsm_scan_complete *arg) +{ + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + /* STA is stopped. */ + return; + + if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { + priv->scan.status = 1; + queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); + } +} + +void cw1200_clear_recent_scan_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + clear_recent_scan_work.work); + atomic_xchg(&priv->recent_scan, 0); +} + +void cw1200_scan_timeout(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, scan.timeout.work); + if (atomic_xchg(&priv->scan.in_progress, 0)) { + if (priv->scan.status > 0) { + priv->scan.status = 0; + } else if (!priv->scan.status) { + wiphy_warn(priv->hw->wiphy, + "Timeout waiting for scan complete notification.\n"); + priv->scan.status = -ETIMEDOUT; + priv->scan.curr = priv->scan.end; + wsm_stop_scan(priv); + } + cw1200_scan_complete(priv); + } +} + +void cw1200_probe_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, scan.probe_work.work); + u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + const struct cw1200_txpriv *txpriv; + struct wsm_tx *wsm; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, + }; + struct wsm_ssid ssids[1] = {{ + .length = 0, + } }; + struct wsm_scan_ch ch[1] = {{ + .min_chan_time = 0, + .max_chan_time = 10, + } }; + struct wsm_scan scan = { + .type = WSM_SCAN_TYPE_FOREGROUND, + .num_probes = 1, + .probe_delay = 0, + .num_channels = 1, + .ssids = ssids, + .ch = ch, + }; + u8 *ies; + size_t ies_len; + int ret; + + wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n"); + + mutex_lock(&priv->conf_mutex); + if (down_trylock(&priv->scan.lock)) { + /* Scan is already in progress. Requeue self. */ + schedule(); + queue_delayed_work(priv->workqueue, + &priv->scan.probe_work, HZ / 10); + mutex_unlock(&priv->conf_mutex); + return; + } + + /* Make sure we still have a pending probe req */ + if (cw1200_queue_get_skb(queue, priv->pending_frame_id, + &frame.skb, &txpriv)) { + up(&priv->scan.lock); + mutex_unlock(&priv->conf_mutex); + wsm_unlock_tx(priv); + return; + } + wsm = (struct wsm_tx *)frame.skb->data; + scan.max_tx_rate = wsm->max_tx_rate; + scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + if (priv->join_status == CW1200_JOIN_STATUS_STA || + priv->join_status == CW1200_JOIN_STATUS_IBSS) { + scan.type = WSM_SCAN_TYPE_BACKGROUND; + scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; + } + ch[0].number = priv->channel->hw_value; + + skb_pull(frame.skb, txpriv->offset); + + ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)]; + ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr); + + if (ies_len) { + u8 *ssidie = + (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len); + if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) { + u8 *nextie = &ssidie[2 + ssidie[1]]; + /* Remove SSID from the IE list. It has to be provided + * as a separate argument in cw1200_scan_start call + */ + + /* Store SSID localy */ + ssids[0].length = ssidie[1]; + memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length); + scan.num_ssids = 1; + + /* Remove SSID from IE list */ + ssidie[1] = 0; + memmove(&ssidie[2], nextie, &ies[ies_len] - nextie); + skb_trim(frame.skb, frame.skb->len - ssids[0].length); + } + } + + /* FW bug: driver has to restart p2p-dev mode after scan */ + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + cw1200_disable_listening(priv); + ret = wsm_set_template_frame(priv, &frame); + priv->scan.direct_probe = 1; + if (!ret) { + wsm_flush_tx(priv); + ret = cw1200_scan_start(priv, &scan); + } + mutex_unlock(&priv->conf_mutex); + + skb_push(frame.skb, txpriv->offset); + if (!ret) + IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK; + BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id)); + + if (ret) { + priv->scan.direct_probe = 0; + up(&priv->scan.lock); + wsm_unlock_tx(priv); + } + + return; +} diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/cw1200/scan.h new file mode 100644 index 000000000000..5a8296ccfa82 --- /dev/null +++ b/drivers/net/wireless/cw1200/scan.h @@ -0,0 +1,56 @@ +/* + * Scan interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SCAN_H_INCLUDED +#define SCAN_H_INCLUDED + +#include <linux/semaphore.h> +#include "wsm.h" + +/* external */ struct sk_buff; +/* external */ struct cfg80211_scan_request; +/* external */ struct ieee80211_channel; +/* external */ struct ieee80211_hw; +/* external */ struct work_struct; + +struct cw1200_scan { + struct semaphore lock; + struct work_struct work; + struct delayed_work timeout; + struct cfg80211_scan_request *req; + struct ieee80211_channel **begin; + struct ieee80211_channel **curr; + struct ieee80211_channel **end; + struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS]; + int output_power; + int n_ssids; + int status; + atomic_t in_progress; + /* Direct probe requests workaround */ + struct delayed_work probe_work; + int direct_probe; +}; + +int cw1200_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void cw1200_scan_work(struct work_struct *work); +void cw1200_scan_timeout(struct work_struct *work); +void cw1200_clear_recent_scan_work(struct work_struct *work); +void cw1200_scan_complete_cb(struct cw1200_common *priv, + struct wsm_scan_complete *arg); +void cw1200_scan_failed_cb(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* Raw probe requests TX workaround */ +void cw1200_probe_work(struct work_struct *work); + +#endif diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c new file mode 100644 index 000000000000..679c55f15c67 --- /dev/null +++ b/drivers/net/wireless/cw1200/sta.c @@ -0,0 +1,2406 @@ +/* + * Mac80211 STA API for ST-Ericsson CW1200 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/vmalloc.h> +#include <linux/sched.h> +#include <linux/firmware.h> +#include <linux/module.h> + +#include "cw1200.h" +#include "sta.h" +#include "fwio.h" +#include "bh.h" +#include "debug.h" + +#ifndef ERP_INFO_BYTE_OFFSET +#define ERP_INFO_BYTE_OFFSET 2 +#endif + +static void cw1200_do_join(struct cw1200_common *priv); +static void cw1200_do_unjoin(struct cw1200_common *priv); + +static int cw1200_upload_beacon(struct cw1200_common *priv); +static int cw1200_upload_pspoll(struct cw1200_common *priv); +static int cw1200_upload_null(struct cw1200_common *priv); +static int cw1200_upload_qosnull(struct cw1200_common *priv); +static int cw1200_start_ap(struct cw1200_common *priv); +static int cw1200_update_beaconing(struct cw1200_common *priv); +static int cw1200_enable_beaconing(struct cw1200_common *priv, + bool enable); +static void __cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + int link_id); +static int __cw1200_flush(struct cw1200_common *priv, bool drop); + +static inline void __cw1200_free_event_queue(struct list_head *list) +{ + struct cw1200_wsm_event *event, *tmp; + list_for_each_entry_safe(event, tmp, list, link) { + list_del(&event->link); + kfree(event); + } +} + +/* ******************************************************************** */ +/* STA API */ + +int cw1200_start(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + int ret = 0; + + cw1200_pm_stay_awake(&priv->pm_state, HZ); + + mutex_lock(&priv->conf_mutex); + + /* default EDCA */ + WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false); + WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false); + ret = wsm_set_edca_params(priv, &priv->edca); + if (ret) + goto out; + + ret = cw1200_set_uapsd_param(priv, &priv->edca); + if (ret) + goto out; + + priv->setbssparams_done = false; + + memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN); + priv->mode = NL80211_IFTYPE_MONITOR; + priv->wep_default_key_id = -1; + + priv->cqm_beacon_loss_count = 10; + + ret = cw1200_setup_mac(priv); + if (ret) + goto out; + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_stop(struct ieee80211_hw *dev) +{ + struct cw1200_common *priv = dev->priv; + LIST_HEAD(list); + int i; + + wsm_lock_tx(priv); + + while (down_trylock(&priv->scan.lock)) { + /* Scan is in progress. Force it to stop. */ + priv->scan.req = NULL; + schedule(); + } + up(&priv->scan.lock); + + cancel_delayed_work_sync(&priv->scan.probe_work); + cancel_delayed_work_sync(&priv->scan.timeout); + cancel_delayed_work_sync(&priv->clear_recent_scan_work); + cancel_delayed_work_sync(&priv->join_timeout); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + cancel_delayed_work_sync(&priv->link_id_gc_work); + flush_workqueue(priv->workqueue); + del_timer_sync(&priv->mcast_timeout); + mutex_lock(&priv->conf_mutex); + priv->mode = NL80211_IFTYPE_UNSPECIFIED; + priv->listening = false; + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + __cw1200_free_event_queue(&list); + + + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + priv->join_pending = false; + + for (i = 0; i < 4; i++) + cw1200_queue_clear(&priv->tx_queue[i]); + mutex_unlock(&priv->conf_mutex); + tx_policy_clean(priv); + + /* HACK! */ + if (atomic_xchg(&priv->tx_lock, 1) != 1) + pr_debug("[STA] TX is force-unlocked due to stop request.\n"); + + wsm_unlock_tx(priv); + atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */ +} + +static int cw1200_bssloss_mitigation = 1; +module_param(cw1200_bssloss_mitigation, int, 0644); +MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)"); + + +void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, + int init, int good, int bad) +{ + int tx = 0; + + priv->delayed_link_loss = 0; + cancel_work_sync(&priv->bss_params_work); + + pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n", + priv->bss_loss_state, + init, good, bad, + atomic_read(&priv->tx_lock), + priv->delayed_unjoin); + + /* If we have a pending unjoin */ + if (priv->delayed_unjoin) + return; + + if (init) { + queue_delayed_work(priv->workqueue, + &priv->bss_loss_work, + HZ); + priv->bss_loss_state = 0; + + /* Skip the confimration procedure in P2P case */ + if (!priv->vif->p2p && !atomic_read(&priv->tx_lock)) + tx = 1; + } else if (good) { + cancel_delayed_work_sync(&priv->bss_loss_work); + priv->bss_loss_state = 0; + queue_work(priv->workqueue, &priv->bss_params_work); + } else if (bad) { + /* XXX Should we just keep going until we time out? */ + if (priv->bss_loss_state < 3) + tx = 1; + } else { + cancel_delayed_work_sync(&priv->bss_loss_work); + priv->bss_loss_state = 0; + } + + /* Bypass mitigation if it's disabled */ + if (!cw1200_bssloss_mitigation) + tx = 0; + + /* Spit out a NULL packet to our AP if necessary */ + if (tx) { + struct sk_buff *skb; + + priv->bss_loss_state++; + + skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + WARN_ON(!skb); + if (skb) + cw1200_tx(priv->hw, NULL, skb); + } +} + +int cw1200_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + int ret; + struct cw1200_common *priv = dev->priv; + /* __le32 auto_calibration_mode = __cpu_to_le32(1); */ + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + + mutex_lock(&priv->conf_mutex); + + if (priv->mode != NL80211_IFTYPE_MONITOR) { + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_AP: + priv->mode = vif->type; + break; + default: + mutex_unlock(&priv->conf_mutex); + return -EOPNOTSUPP; + } + + priv->vif = vif; + memcpy(priv->mac_addr, vif->addr, ETH_ALEN); + ret = cw1200_setup_mac(priv); + /* Enable auto-calibration */ + /* Exception in subsequent channel switch; disabled. + * wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE, + * &auto_calibration_mode, sizeof(auto_calibration_mode)); + */ + + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif) +{ + struct cw1200_common *priv = dev->priv; + struct wsm_reset reset = { + .reset_statistics = true, + }; + int i; + + mutex_lock(&priv->conf_mutex); + switch (priv->join_status) { + case CW1200_JOIN_STATUS_JOINING: + case CW1200_JOIN_STATUS_PRE_STA: + case CW1200_JOIN_STATUS_STA: + case CW1200_JOIN_STATUS_IBSS: + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + break; + case CW1200_JOIN_STATUS_AP: + for (i = 0; priv->link_id_map; ++i) { + if (priv->link_id_map & BIT(i)) { + reset.link_id = i; + wsm_reset(priv, &reset); + priv->link_id_map &= ~BIT(i); + } + } + memset(priv->link_id_db, 0, sizeof(priv->link_id_db)); + priv->sta_asleep_mask = 0; + priv->enable_beacon = false; + priv->tx_multicast = false; + priv->aid0_bit_set = false; + priv->buffered_multicasts = false; + priv->pspoll_mask = 0; + reset.link_id = 0; + wsm_reset(priv, &reset); + break; + case CW1200_JOIN_STATUS_MONITOR: + cw1200_update_listening(priv, false); + break; + default: + break; + } + priv->vif = NULL; + priv->mode = NL80211_IFTYPE_MONITOR; + memset(priv->mac_addr, 0, ETH_ALEN); + memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); + cw1200_free_keys(priv); + cw1200_setup_mac(priv); + priv->listening = false; + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + if (!__cw1200_flush(priv, true)) + wsm_unlock_tx(priv); + + mutex_unlock(&priv->conf_mutex); +} + +int cw1200_change_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, + bool p2p) +{ + int ret = 0; + pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type, + p2p, vif->type, vif->p2p); + + if (new_type != vif->type || vif->p2p != p2p) { + cw1200_remove_interface(dev, vif); + vif->type = new_type; + vif->p2p = p2p; + ret = cw1200_add_interface(dev, vif); + } + + return ret; +} + +int cw1200_config(struct ieee80211_hw *dev, u32 changed) +{ + int ret = 0; + struct cw1200_common *priv = dev->priv; + struct ieee80211_conf *conf = &dev->conf; + + pr_debug("CONFIG CHANGED: %08x\n", changed); + + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + /* TODO: IEEE80211_CONF_CHANGE_QOS */ + /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */ + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + priv->output_power = conf->power_level; + pr_debug("[STA] TX power: %d\n", priv->output_power); + wsm_set_output_power(priv, priv->output_power * 10); + } + + if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) && + (priv->channel != conf->chandef.chan)) { + struct ieee80211_channel *ch = conf->chandef.chan; + struct wsm_switch_channel channel = { + .channel_number = ch->hw_value, + }; + pr_debug("[STA] Freq %d (wsm ch: %d).\n", + ch->center_freq, ch->hw_value); + + /* __cw1200_flush() implicitly locks tx, if successful */ + if (!__cw1200_flush(priv, false)) { + if (!wsm_switch_channel(priv, &channel)) { + ret = wait_event_timeout(priv->channel_switch_done, + !priv->channel_switch_in_progress, + 3 * HZ); + if (ret) { + /* Already unlocks if successful */ + priv->channel = ch; + ret = 0; + } else { + ret = -ETIMEDOUT; + } + } else { + /* Unlock if switch channel fails */ + wsm_unlock_tx(priv); + } + } + } + + if (changed & IEEE80211_CONF_CHANGE_PS) { + if (!(conf->flags & IEEE80211_CONF_PS)) + priv->powersave_mode.mode = WSM_PSM_ACTIVE; + else if (conf->dynamic_ps_timeout <= 0) + priv->powersave_mode.mode = WSM_PSM_PS; + else + priv->powersave_mode.mode = WSM_PSM_FAST_PS; + + /* Firmware requires that value for this 1-byte field must + * be specified in units of 500us. Values above the 128ms + * threshold are not supported. + */ + if (conf->dynamic_ps_timeout >= 0x80) + priv->powersave_mode.fast_psm_idle_period = 0xFF; + else + priv->powersave_mode.fast_psm_idle_period = + conf->dynamic_ps_timeout << 1; + + if (priv->join_status == CW1200_JOIN_STATUS_STA && + priv->bss_params.aid) + cw1200_set_pm(priv, &priv->powersave_mode); + } + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + /* TBD: It looks like it's transparent + * there's a monitor interface present -- use this + * to determine for example whether to calculate + * timestamps for packets or not, do not use instead + * of filter flags! + */ + } + + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + wsm_lock_tx(priv); + /* Disable p2p-dev mode forced by TX request */ + if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) && + (conf->flags & IEEE80211_CONF_IDLE) && + !priv->listening) { + cw1200_disable_listening(priv); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + } + wsm_set_operational_mode(priv, &mode); + wsm_unlock_tx(priv); + } + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + pr_debug("[STA] Retry limits: %d (long), %d (short).\n", + conf->long_frame_max_tx_count, + conf->short_frame_max_tx_count); + spin_lock_bh(&priv->tx_policy_cache.lock); + priv->long_frame_max_tx_count = conf->long_frame_max_tx_count; + priv->short_frame_max_tx_count = + (conf->short_frame_max_tx_count < 0x0F) ? + conf->short_frame_max_tx_count : 0x0F; + priv->hw->max_rate_tries = priv->short_frame_max_tx_count; + spin_unlock_bh(&priv->tx_policy_cache.lock); + } + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); + return ret; +} + +void cw1200_update_filtering(struct cw1200_common *priv) +{ + int ret; + bool bssid_filtering = !priv->rx_filter.bssid; + bool is_p2p = priv->vif && priv->vif->p2p; + bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type; + + static struct wsm_beacon_filter_control bf_ctrl; + static struct wsm_mib_beacon_filter_table bf_tbl = { + .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC, + .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + .entry[0].oui[0] = 0x50, + .entry[0].oui[1] = 0x6F, + .entry[0].oui[2] = 0x9A, + .entry[1].ie_id = WLAN_EID_HT_OPERATION, + .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + .entry[2].ie_id = WLAN_EID_ERP_INFO, + .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | + WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | + WSM_BEACON_FILTER_IE_HAS_APPEARED, + }; + + if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) + return; + else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + bssid_filtering = false; + + if (priv->disable_beacon_filter) { + bf_ctrl.enabled = 0; + bf_ctrl.bcn_count = 1; + bf_tbl.num = __cpu_to_le32(0); + } else if (is_p2p || !is_sta) { + bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE | + WSM_BEACON_FILTER_AUTO_ERP; + bf_ctrl.bcn_count = 0; + bf_tbl.num = __cpu_to_le32(2); + } else { + bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE; + bf_ctrl.bcn_count = 0; + bf_tbl.num = __cpu_to_le32(3); + } + + /* + * When acting as p2p client being connected to p2p GO, in order to + * receive frames from a different p2p device, turn off bssid filter. + * + * WARNING: FW dependency! + * This can only be used with FW WSM371 and its successors. + * In that FW version even with bssid filter turned off, + * device will block most of the unwanted frames. + */ + if (is_p2p) + bssid_filtering = false; + + ret = wsm_set_rx_filter(priv, &priv->rx_filter); + if (!ret) + ret = wsm_set_beacon_filter_table(priv, &bf_tbl); + if (!ret) + ret = wsm_beacon_filter_control(priv, &bf_ctrl); + if (!ret) + ret = wsm_set_bssid_filtering(priv, bssid_filtering); + if (!ret) + ret = wsm_set_multicast_filter(priv, &priv->multicast_filter); + if (ret) + wiphy_err(priv->hw->wiphy, + "Update filtering failed: %d.\n", ret); + return; +} + +void cw1200_update_filtering_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + update_filtering_work); + + cw1200_update_filtering(priv); +} + +void cw1200_set_beacon_wakeup_period_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, + set_beacon_wakeup_period_work); + + wsm_set_beacon_wakeup_period(priv, + priv->beacon_int * priv->join_dtim_period > + MAX_BEACON_SKIP_TIME_MS ? 1 : + priv->join_dtim_period, 0); +} + +u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + static u8 broadcast_ipv6[ETH_ALEN] = { + 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 + }; + static u8 broadcast_ipv4[ETH_ALEN] = { + 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01 + }; + struct cw1200_common *priv = hw->priv; + struct netdev_hw_addr *ha; + int count = 0; + + /* Disable multicast filtering */ + priv->has_multicast_subscription = false; + memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter)); + + if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES) + return 0; + + /* Enable if requested */ + netdev_hw_addr_list_for_each(ha, mc_list) { + pr_debug("[STA] multicast: %pM\n", ha->addr); + memcpy(&priv->multicast_filter.macaddrs[count], + ha->addr, ETH_ALEN); + if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) && + memcmp(ha->addr, broadcast_ipv6, ETH_ALEN)) + priv->has_multicast_subscription = true; + count++; + } + + if (count) { + priv->multicast_filter.enable = __cpu_to_le32(1); + priv->multicast_filter.num_addrs = __cpu_to_le32(count); + } + + return netdev_hw_addr_list_count(mc_list); +} + +void cw1200_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct cw1200_common *priv = dev->priv; + bool listening = !!(*total_flags & + (FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC | + FIF_PROBE_REQ)); + + *total_flags &= FIF_PROMISC_IN_BSS | + FIF_OTHER_BSS | + FIF_FCSFAIL | + FIF_BCN_PRBRESP_PROMISC | + FIF_PROBE_REQ; + + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + + priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS) + ? 1 : 0; + priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS | + FIF_PROBE_REQ)) ? 1 : 0; + priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0; + priv->disable_beacon_filter = !(*total_flags & + (FIF_BCN_PRBRESP_PROMISC | + FIF_PROMISC_IN_BSS | + FIF_PROBE_REQ)); + if (priv->listening != listening) { + priv->listening = listening; + wsm_lock_tx(priv); + cw1200_update_listening(priv, listening); + wsm_unlock_tx(priv); + } + cw1200_update_filtering(priv); + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); +} + +int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct cw1200_common *priv = dev->priv; + int ret = 0; + /* To prevent re-applying PM request OID again and again*/ + bool old_uapsd_flags; + + mutex_lock(&priv->conf_mutex); + + if (queue < dev->queues) { + old_uapsd_flags = priv->uapsd_info.uapsd_flags; + + WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0); + ret = wsm_set_tx_queue_params(priv, + &priv->tx_queue_params.params[queue], queue); + if (ret) { + ret = -EINVAL; + goto out; + } + + WSM_EDCA_SET(&priv->edca, queue, params->aifs, + params->cw_min, params->cw_max, + params->txop, 0xc8, + params->uapsd); + ret = wsm_set_edca_params(priv, &priv->edca); + if (ret) { + ret = -EINVAL; + goto out; + } + + if (priv->mode == NL80211_IFTYPE_STATION) { + ret = cw1200_set_uapsd_param(priv, &priv->edca); + if (!ret && priv->setbssparams_done && + (priv->join_status == CW1200_JOIN_STATUS_STA) && + (old_uapsd_flags != priv->uapsd_info.uapsd_flags)) + ret = cw1200_set_pm(priv, &priv->powersave_mode); + } + } else { + ret = -EINVAL; + } + +out: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +int cw1200_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct cw1200_common *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + return 0; +} + +int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) +{ + struct wsm_set_pm pm = *arg; + + if (priv->uapsd_info.uapsd_flags != 0) + pm.mode &= ~WSM_PSM_FAST_PS_FLAG; + + if (memcmp(&pm, &priv->firmware_ps_mode, + sizeof(struct wsm_set_pm))) { + priv->firmware_ps_mode = pm; + return wsm_set_pm(priv, &pm); + } else { + return 0; + } +} + +int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret = -EOPNOTSUPP; + struct cw1200_common *priv = dev->priv; + struct ieee80211_key_seq seq; + + mutex_lock(&priv->conf_mutex); + + if (cmd == SET_KEY) { + u8 *peer_addr = NULL; + int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ? + 1 : 0; + int idx = cw1200_alloc_key(priv); + struct wsm_add_key *wsm_key = &priv->keys[idx]; + + if (idx < 0) { + ret = -EINVAL; + goto finally; + } + + if (sta) + peer_addr = sta->addr; + + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + if (key->keylen > 16) { + cw1200_free_key(priv, idx); + ret = -EINVAL; + goto finally; + } + + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE; + memcpy(wsm_key->wep_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->wep_pairwise.keydata, + &key->key[0], key->keylen); + wsm_key->wep_pairwise.keylen = key->keylen; + } else { + wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT; + memcpy(wsm_key->wep_group.keydata, + &key->key[0], key->keylen); + wsm_key->wep_group.keylen = key->keylen; + wsm_key->wep_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_TKIP: + ieee80211_get_key_rx_seq(key, 0, &seq); + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE; + memcpy(wsm_key->tkip_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->tkip_pairwise.keydata, + &key->key[0], 16); + memcpy(wsm_key->tkip_pairwise.tx_mic_key, + &key->key[16], 8); + memcpy(wsm_key->tkip_pairwise.rx_mic_key, + &key->key[24], 8); + } else { + size_t mic_offset = + (priv->mode == NL80211_IFTYPE_AP) ? + 16 : 24; + wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP; + memcpy(wsm_key->tkip_group.keydata, + &key->key[0], 16); + memcpy(wsm_key->tkip_group.rx_mic_key, + &key->key[mic_offset], 8); + + wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff; + wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff; + wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff; + wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff; + wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff; + wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff; + wsm_key->tkip_group.rx_seqnum[6] = 0; + wsm_key->tkip_group.rx_seqnum[7] = 0; + + wsm_key->tkip_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_CCMP: + ieee80211_get_key_rx_seq(key, 0, &seq); + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE; + memcpy(wsm_key->aes_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->aes_pairwise.keydata, + &key->key[0], 16); + } else { + wsm_key->type = WSM_KEY_TYPE_AES_GROUP; + memcpy(wsm_key->aes_group.keydata, + &key->key[0], 16); + + wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5]; + wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4]; + wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3]; + wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2]; + wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1]; + wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0]; + wsm_key->aes_group.rx_seqnum[6] = 0; + wsm_key->aes_group.rx_seqnum[7] = 0; + wsm_key->aes_group.keyid = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_SMS4: + if (pairwise) { + wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE; + memcpy(wsm_key->wapi_pairwise.peer, + peer_addr, ETH_ALEN); + memcpy(wsm_key->wapi_pairwise.keydata, + &key->key[0], 16); + memcpy(wsm_key->wapi_pairwise.mic_key, + &key->key[16], 16); + wsm_key->wapi_pairwise.keyid = key->keyidx; + } else { + wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP; + memcpy(wsm_key->wapi_group.keydata, + &key->key[0], 16); + memcpy(wsm_key->wapi_group.mic_key, + &key->key[16], 16); + wsm_key->wapi_group.keyid = key->keyidx; + } + break; + default: + pr_warn("Unhandled key type %d\n", key->cipher); + cw1200_free_key(priv, idx); + ret = -EOPNOTSUPP; + goto finally; + } + ret = wsm_add_key(priv, wsm_key); + if (!ret) + key->hw_key_idx = idx; + else + cw1200_free_key(priv, idx); + } else if (cmd == DISABLE_KEY) { + struct wsm_remove_key wsm_key = { + .index = key->hw_key_idx, + }; + + if (wsm_key.index > WSM_KEY_MAX_INDEX) { + ret = -EINVAL; + goto finally; + } + + cw1200_free_key(priv, wsm_key.index); + ret = wsm_remove_key(priv, &wsm_key); + } else { + pr_warn("Unhandled key command %d\n", cmd); + } + +finally: + mutex_unlock(&priv->conf_mutex); + return ret; +} + +void cw1200_wep_key_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, wep_key_work); + u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + __le32 wep_default_key_id = __cpu_to_le32( + priv->wep_default_key_id); + + pr_debug("[STA] Setting default WEP key: %d\n", + priv->wep_default_key_id); + wsm_flush_tx(priv); + wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, + &wep_default_key_id, sizeof(wep_default_key_id)); + cw1200_queue_requeue(queue, priv->pending_frame_id); + wsm_unlock_tx(priv); +} + +int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + int ret = 0; + __le32 val32; + struct cw1200_common *priv = hw->priv; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) + return 0; + + if (value != (u32) -1) + val32 = __cpu_to_le32(value); + else + val32 = 0; /* disabled */ + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* device is down, can _not_ set threshold */ + ret = -ENODEV; + goto out; + } + + if (priv->rts_threshold == value) + goto out; + + pr_debug("[STA] Setting RTS threshold: %d\n", + priv->rts_threshold); + + /* mutex_lock(&priv->conf_mutex); */ + ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD, + &val32, sizeof(val32)); + if (!ret) + priv->rts_threshold = value; + /* mutex_unlock(&priv->conf_mutex); */ + +out: + return ret; +} + +/* If successful, LOCKS the TX queue! */ +static int __cw1200_flush(struct cw1200_common *priv, bool drop) +{ + int i, ret; + + for (;;) { + /* TODO: correct flush handling is required when dev_stop. + * Temporary workaround: 2s + */ + if (drop) { + for (i = 0; i < 4; ++i) + cw1200_queue_clear(&priv->tx_queue[i]); + } else { + ret = wait_event_timeout( + priv->tx_queue_stats.wait_link_id_empty, + cw1200_queue_stats_is_empty( + &priv->tx_queue_stats, -1), + 2 * HZ); + } + + if (!drop && ret <= 0) { + ret = -ETIMEDOUT; + break; + } else { + ret = 0; + } + + wsm_lock_tx(priv); + if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) { + /* Highly unlikely: WSM requeued frames. */ + wsm_unlock_tx(priv); + continue; + } + break; + } + return ret; +} + +void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +{ + struct cw1200_common *priv = hw->priv; + + switch (priv->mode) { + case NL80211_IFTYPE_MONITOR: + drop = true; + break; + case NL80211_IFTYPE_AP: + if (!priv->enable_beacon) + drop = true; + break; + } + + if (!__cw1200_flush(priv, drop)) + wsm_unlock_tx(priv); + + return; +} + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_free_event_queue(struct cw1200_common *priv) +{ + LIST_HEAD(list); + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + + __cw1200_free_event_queue(&list); +} + +void cw1200_event_handler(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, event_handler); + struct cw1200_wsm_event *event; + LIST_HEAD(list); + + spin_lock(&priv->event_queue_lock); + list_splice_init(&priv->event_queue, &list); + spin_unlock(&priv->event_queue_lock); + + list_for_each_entry(event, &list, link) { + switch (event->evt.id) { + case WSM_EVENT_ERROR: + pr_err("Unhandled WSM Error from LMAC\n"); + break; + case WSM_EVENT_BSS_LOST: + pr_debug("[CQM] BSS lost.\n"); + cancel_work_sync(&priv->unjoin_work); + if (!down_trylock(&priv->scan.lock)) { + cw1200_cqm_bssloss_sm(priv, 1, 0, 0); + up(&priv->scan.lock); + } else { + /* Scan is in progress. Delay reporting. + * Scan complete will trigger bss_loss_work + */ + priv->delayed_link_loss = 1; + /* Also start a watchdog. */ + queue_delayed_work(priv->workqueue, + &priv->bss_loss_work, 5*HZ); + } + break; + case WSM_EVENT_BSS_REGAINED: + pr_debug("[CQM] BSS regained.\n"); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + break; + case WSM_EVENT_RADAR_DETECTED: + wiphy_info(priv->hw->wiphy, "radar pulse detected\n"); + break; + case WSM_EVENT_RCPI_RSSI: + { + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + int rcpiRssi = (int)(event->evt.data & 0xFF); + int cqm_evt; + if (priv->cqm_use_rssi) + rcpiRssi = (s8)rcpiRssi; + else + rcpiRssi = rcpiRssi / 2 - 110; + + cqm_evt = (rcpiRssi <= priv->cqm_rssi_thold) ? + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW : + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + pr_debug("[CQM] RSSI event: %d.\n", rcpiRssi); + ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, + GFP_KERNEL); + break; + } + case WSM_EVENT_BT_INACTIVE: + pr_warn("Unhandled BT INACTIVE from LMAC\n"); + break; + case WSM_EVENT_BT_ACTIVE: + pr_warn("Unhandled BT ACTIVE from LMAC\n"); + break; + } + } + __cw1200_free_event_queue(&list); +} + +void cw1200_bss_loss_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bss_loss_work.work); + + pr_debug("[CQM] Reporting connection loss.\n"); + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); +} + +void cw1200_bss_params_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, bss_params_work); + mutex_lock(&priv->conf_mutex); + + priv->bss_params.reset_beacon_loss = 1; + wsm_set_bss_params(priv, &priv->bss_params); + priv->bss_params.reset_beacon_loss = 0; + + mutex_unlock(&priv->conf_mutex); +} + +/* ******************************************************************** */ +/* Internal API */ + +/* + * This function is called to Parse the SDD file + * to extract listen_interval and PTA related information + * sdd is a TLV: u8 id, u8 len, u8 data[] + */ +static int cw1200_parse_sdd_file(struct cw1200_common *priv) +{ + const u8 *p = priv->sdd->data; + int ret = 0; + + while (p + 2 <= priv->sdd->data + priv->sdd->size) { + if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) { + pr_warn("Malformed sdd structure\n"); + return -1; + } + switch (p[0]) { + case SDD_PTA_CFG_ELT_ID: { + u16 v; + if (p[1] < 4) { + pr_warn("SDD_PTA_CFG_ELT_ID malformed\n"); + ret = -1; + break; + } + v = le16_to_cpu(*((u16 *)(p + 2))); + if (!v) /* non-zero means this is enabled */ + break; + + v = le16_to_cpu(*((u16 *)(p + 4))); + priv->conf_listen_interval = (v >> 7) & 0x1F; + pr_debug("PTA found; Listen Interval %d\n", + priv->conf_listen_interval); + break; + } + case SDD_REFERENCE_FREQUENCY_ELT_ID: { + u16 clk = le16_to_cpu(*((u16 *)(p + 2))); + if (clk != priv->hw_refclk) + pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n", + clk, priv->hw_refclk); + break; + } + default: + break; + } + p += p[1] + 2; + } + + if (!priv->bt_present) { + pr_debug("PTA element NOT found.\n"); + priv->conf_listen_interval = 0; + } + return ret; +} + +int cw1200_setup_mac(struct cw1200_common *priv) +{ + int ret = 0; + + /* NOTE: There is a bug in FW: it reports signal + * as RSSI if RSSI subscription is enabled. + * It's not enough to set WSM_RCPI_RSSI_USE_RSSI. + * + * NOTE2: RSSI based reports have been switched to RCPI, since + * FW has a bug and RSSI reported values are not stable, + * what can leads to signal level oscilations in user-end applications + */ + struct wsm_rcpi_rssi_threshold threshold = { + .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE | + WSM_RCPI_RSSI_DONT_USE_UPPER | + WSM_RCPI_RSSI_DONT_USE_LOWER, + .rollingAverageCount = 16, + }; + + struct wsm_configuration cfg = { + .dot11StationId = &priv->mac_addr[0], + }; + + /* Remember the decission here to make sure, we will handle + * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS + */ + if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI) + priv->cqm_use_rssi = true; + + if (!priv->sdd) { + ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev); + if (ret) { + pr_err("Can't load sdd file %s.\n", priv->sdd_path); + return ret; + } + cw1200_parse_sdd_file(priv); + } + + cfg.dpdData = priv->sdd->data; + cfg.dpdData_size = priv->sdd->size; + ret = wsm_configuration(priv, &cfg); + if (ret) + return ret; + + /* Configure RSSI/SCPI reporting as RSSI. */ + wsm_set_rcpi_rssi_threshold(priv, &threshold); + + return 0; +} + +static void cw1200_join_complete(struct cw1200_common *priv) +{ + pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status); + + priv->join_pending = false; + if (priv->join_complete_status) { + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + cw1200_update_listening(priv, priv->listening); + cw1200_do_unjoin(priv); + ieee80211_connection_loss(priv->vif); + } else { + if (priv->mode == NL80211_IFTYPE_ADHOC) + priv->join_status = CW1200_JOIN_STATUS_IBSS; + else + priv->join_status = CW1200_JOIN_STATUS_PRE_STA; + } + wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */ +} + +void cw1200_join_complete_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, join_complete_work); + mutex_lock(&priv->conf_mutex); + cw1200_join_complete(priv); + mutex_unlock(&priv->conf_mutex); +} + +void cw1200_join_complete_cb(struct cw1200_common *priv, + struct wsm_join_complete *arg) +{ + pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n", + arg->status); + + if (cancel_delayed_work(&priv->join_timeout)) { + priv->join_complete_status = arg->status; + queue_work(priv->workqueue, &priv->join_complete_work); + } +} + +/* MUST be called with tx_lock held! It will be unlocked for us. */ +static void cw1200_do_join(struct cw1200_common *priv) +{ + const u8 *bssid; + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct cfg80211_bss *bss = NULL; + struct wsm_protected_mgmt_policy mgmt_policy; + struct wsm_join join = { + .mode = conf->ibss_joined ? + WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS, + .preamble_type = WSM_JOIN_PREAMBLE_LONG, + .probe_for_join = 1, + .atim_window = 0, + .basic_rate_set = cw1200_rate_mask_to_wsm(priv, + conf->basic_rates), + }; + if (delayed_work_pending(&priv->join_timeout)) { + pr_warn("[STA] - Join request already pending, skipping..\n"); + wsm_unlock_tx(priv); + return; + } + + if (priv->join_status) + cw1200_do_unjoin(priv); + + bssid = priv->vif->bss_conf.bssid; + + bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, + bssid, NULL, 0, 0, 0); + + if (!bss && !conf->ibss_joined) { + wsm_unlock_tx(priv); + return; + } + + mutex_lock(&priv->conf_mutex); + + /* Under the conf lock: check scan status and + * bail out if it is in progress. + */ + if (atomic_read(&priv->scan.in_progress)) { + wsm_unlock_tx(priv); + goto done_put; + } + + priv->join_pending = true; + + /* Sanity check basic rates */ + if (!join.basic_rate_set) + join.basic_rate_set = 7; + + /* Sanity check beacon interval */ + if (!priv->beacon_int) + priv->beacon_int = 1; + + join.beacon_interval = priv->beacon_int; + + /* BT Coex related changes */ + if (priv->bt_present) { + if (((priv->conf_listen_interval * 100) % + priv->beacon_int) == 0) + priv->listen_interval = + ((priv->conf_listen_interval * 100) / + priv->beacon_int); + else + priv->listen_interval = + ((priv->conf_listen_interval * 100) / + priv->beacon_int + 1); + } + + if (priv->hw->conf.ps_dtim_period) + priv->join_dtim_period = priv->hw->conf.ps_dtim_period; + join.dtim_period = priv->join_dtim_period; + + join.channel_number = priv->channel->hw_value; + join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + + memcpy(join.bssid, bssid, sizeof(join.bssid)); + + pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n", + join.bssid, + join.dtim_period, priv->beacon_int); + + if (!conf->ibss_joined) { + const u8 *ssidie; + rcu_read_lock(); + ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssidie) { + join.ssid_len = ssidie[1]; + memcpy(join.ssid, &ssidie[2], join.ssid_len); + } + rcu_read_unlock(); + } + + if (priv->vif->p2p) { + join.flags |= WSM_JOIN_FLAGS_P2P_GO; + join.basic_rate_set = + cw1200_rate_mask_to_wsm(priv, 0xFF0); + } + + /* Enable asynchronous join calls */ + if (!conf->ibss_joined) { + join.flags |= WSM_JOIN_FLAGS_FORCE; + join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND; + } + + wsm_flush_tx(priv); + + /* Stay Awake for Join and Auth Timeouts and a bit more */ + cw1200_pm_stay_awake(&priv->pm_state, + CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT); + + cw1200_update_listening(priv, false); + + /* Turn on Block ACKs */ + wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask, + priv->ba_rx_tid_mask); + + /* Set up timeout */ + if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) { + priv->join_status = CW1200_JOIN_STATUS_JOINING; + queue_delayed_work(priv->workqueue, + &priv->join_timeout, + CW1200_JOIN_TIMEOUT); + } + + /* 802.11w protected mgmt frames */ + mgmt_policy.protectedMgmtEnable = 0; + mgmt_policy.unprotectedMgmtFramesAllowed = 1; + mgmt_policy.encryptionForAuthFrame = 1; + wsm_set_protected_mgmt_policy(priv, &mgmt_policy); + + /* Perform actual join */ + if (wsm_join(priv, &join)) { + pr_err("[STA] cw1200_join_work: wsm_join failed!\n"); + cancel_delayed_work_sync(&priv->join_timeout); + cw1200_update_listening(priv, priv->listening); + /* Tx lock still held, unjoin will clear it. */ + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else { + if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND)) + cw1200_join_complete(priv); /* Will clear tx_lock */ + + /* Upload keys */ + cw1200_upload_keys(priv); + + /* Due to beacon filtering it is possible that the + * AP's beacon is not known for the mac80211 stack. + * Disable filtering temporary to make sure the stack + * receives at least one + */ + priv->disable_beacon_filter = true; + } + cw1200_update_filtering(priv); + +done_put: + mutex_unlock(&priv->conf_mutex); + if (bss) + cfg80211_put_bss(priv->hw->wiphy, bss); +} + +void cw1200_join_timeout(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, join_timeout.work); + pr_debug("[WSM] Join timed out.\n"); + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); +} + +static void cw1200_do_unjoin(struct cw1200_common *priv) +{ + struct wsm_reset reset = { + .reset_statistics = true, + }; + + cancel_delayed_work_sync(&priv->join_timeout); + + mutex_lock(&priv->conf_mutex); + priv->join_pending = false; + + if (atomic_read(&priv->scan.in_progress)) { + if (priv->delayed_unjoin) + wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n"); + else + priv->delayed_unjoin = true; + goto done; + } + + priv->delayed_link_loss = false; + + if (!priv->join_status) + goto done; + + if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { + wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", + priv->join_status); + BUG_ON(1); + } + + cancel_work_sync(&priv->update_filtering_work); + cancel_work_sync(&priv->set_beacon_wakeup_period_work); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + + /* Unjoin is a reset. */ + wsm_flush_tx(priv); + wsm_keep_alive_period(priv, 0); + wsm_reset(priv, &reset); + wsm_set_output_power(priv, priv->output_power * 10); + priv->join_dtim_period = 0; + cw1200_setup_mac(priv); + cw1200_free_event_queue(priv); + cancel_work_sync(&priv->event_handler); + cw1200_update_listening(priv, priv->listening); + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + + /* Disable Block ACKs */ + wsm_set_block_ack_policy(priv, 0, 0); + + priv->disable_beacon_filter = false; + cw1200_update_filtering(priv); + memset(&priv->association_mode, 0, + sizeof(priv->association_mode)); + memset(&priv->bss_params, 0, sizeof(priv->bss_params)); + priv->setbssparams_done = false; + memset(&priv->firmware_ps_mode, 0, + sizeof(priv->firmware_ps_mode)); + + pr_debug("[STA] Unjoin completed.\n"); + +done: + mutex_unlock(&priv->conf_mutex); +} + +void cw1200_unjoin_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, unjoin_work); + + cw1200_do_unjoin(priv); + + /* Tell the stack we're dead */ + ieee80211_connection_loss(priv->vif); + + wsm_unlock_tx(priv); +} + +int cw1200_enable_listening(struct cw1200_common *priv) +{ + struct wsm_start start = { + .mode = WSM_START_MODE_P2P_DEV, + .band = WSM_PHY_BAND_2_4G, + .beacon_interval = 100, + .dtim_period = 1, + .probe_delay = 0, + .basic_rate_set = 0x0F, + }; + + if (priv->channel) { + start.band = priv->channel->band == IEEE80211_BAND_5GHZ ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; + start.channel_number = priv->channel->hw_value; + } else { + start.band = WSM_PHY_BAND_2_4G; + start.channel_number = 1; + } + + return wsm_start(priv, &start); +} + +int cw1200_disable_listening(struct cw1200_common *priv) +{ + int ret; + struct wsm_reset reset = { + .reset_statistics = true, + }; + ret = wsm_reset(priv, &reset); + return ret; +} + +void cw1200_update_listening(struct cw1200_common *priv, bool enabled) +{ + if (enabled) { + if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) { + if (!cw1200_enable_listening(priv)) + priv->join_status = CW1200_JOIN_STATUS_MONITOR; + wsm_set_probe_responder(priv, true); + } + } else { + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { + if (!cw1200_disable_listening(priv)) + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + wsm_set_probe_responder(priv, false); + } + } +} + +int cw1200_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg) +{ + int ret; + u16 uapsd_flags = 0; + + /* Here's the mapping AC [queue, bit] + * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0] + */ + + if (arg->uapsd_enable[0]) + uapsd_flags |= 1 << 3; + + if (arg->uapsd_enable[1]) + uapsd_flags |= 1 << 2; + + if (arg->uapsd_enable[2]) + uapsd_flags |= 1 << 1; + + if (arg->uapsd_enable[3]) + uapsd_flags |= 1; + + /* Currently pseudo U-APSD operation is not supported, so setting + * MinAutoTriggerInterval, MaxAutoTriggerInterval and + * AutoTriggerStep to 0 + */ + + priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags); + priv->uapsd_info.min_auto_trigger_interval = 0; + priv->uapsd_info.max_auto_trigger_interval = 0; + priv->uapsd_info.auto_trigger_step = 0; + + ret = wsm_set_uapsd_info(priv, &priv->uapsd_info); + return ret; +} + +/* ******************************************************************** */ +/* AP API */ + +int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + struct cw1200_link_entry *entry; + struct sk_buff *skb; + + if (priv->mode != NL80211_IFTYPE_AP) + return 0; + + sta_priv->link_id = cw1200_find_link_id(priv, sta->addr); + if (WARN_ON(!sta_priv->link_id)) { + wiphy_info(priv->hw->wiphy, + "[AP] No more link IDs available.\n"); + return -ENOENT; + } + + entry = &priv->link_id_db[sta_priv->link_id - 1]; + spin_lock_bh(&priv->ps_state_lock); + if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) == + IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) + priv->sta_asleep_mask |= BIT(sta_priv->link_id); + entry->status = CW1200_LINK_HARD; + while ((skb = skb_dequeue(&entry->rx_queue))) + ieee80211_rx_irqsafe(priv->hw, skb); + spin_unlock_bh(&priv->ps_state_lock); + return 0; +} + +int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = hw->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + struct cw1200_link_entry *entry; + + if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id) + return 0; + + entry = &priv->link_id_db[sta_priv->link_id - 1]; + spin_lock_bh(&priv->ps_state_lock); + entry->status = CW1200_LINK_RESERVE; + entry->timestamp = jiffies; + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + spin_unlock_bh(&priv->ps_state_lock); + flush_workqueue(priv->workqueue); + return 0; +} + +static void __cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + int link_id) +{ + struct cw1200_common *priv = dev->priv; + u32 bit, prev; + + /* Zero link id means "for all link IDs" */ + if (link_id) + bit = BIT(link_id); + else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE)) + bit = 0; + else + bit = priv->link_id_map; + prev = priv->sta_asleep_mask & bit; + + switch (notify_cmd) { + case STA_NOTIFY_SLEEP: + if (!prev) { + if (priv->buffered_multicasts && + !priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_start_work); + priv->sta_asleep_mask |= bit; + } + break; + case STA_NOTIFY_AWAKE: + if (prev) { + priv->sta_asleep_mask &= ~bit; + priv->pspoll_mask &= ~bit; + if (priv->tx_multicast && link_id && + !priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_stop_work); + cw1200_bh_wakeup(priv); + } + break; + } +} + +void cw1200_sta_notify(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta) +{ + struct cw1200_common *priv = dev->priv; + struct cw1200_sta_priv *sta_priv = + (struct cw1200_sta_priv *)&sta->drv_priv; + + spin_lock_bh(&priv->ps_state_lock); + __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id); + spin_unlock_bh(&priv->ps_state_lock); +} + +static void cw1200_ps_notify(struct cw1200_common *priv, + int link_id, bool ps) +{ + if (link_id > CW1200_MAX_STA_IN_AP_MODE) + return; + + pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n", + ps ? "Stop" : "Start", + link_id, priv->sta_asleep_mask); + + __cw1200_sta_notify(priv->hw, priv->vif, + ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id); +} + +static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set) +{ + struct sk_buff *skb; + struct wsm_update_ie update_ie = { + .what = WSM_UPDATE_IE_BEACON, + .count = 1, + }; + u16 tim_offset, tim_length; + + pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis"); + + skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, + &tim_offset, &tim_length); + if (!skb) { + if (!__cw1200_flush(priv, true)) + wsm_unlock_tx(priv); + return -ENOENT; + } + + if (tim_offset && tim_length >= 6) { + /* Ignore DTIM count from mac80211: + * firmware handles DTIM internally. + */ + skb->data[tim_offset + 2] = 0; + + /* Set/reset aid0 bit */ + if (aid0_bit_set) + skb->data[tim_offset + 4] |= 1; + else + skb->data[tim_offset + 4] &= ~1; + } + + update_ie.ies = &skb->data[tim_offset]; + update_ie.length = tim_length; + wsm_update_ie(priv, &update_ie); + + dev_kfree_skb(skb); + + return 0; +} + +void cw1200_set_tim_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, set_tim_work); + (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set); +} + +int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set) +{ + struct cw1200_common *priv = dev->priv; + queue_work(priv->workqueue, &priv->set_tim_work); + return 0; +} + +void cw1200_set_cts_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, set_cts_work); + + u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0}; + struct wsm_update_ie update_ie = { + .what = WSM_UPDATE_IE_BEACON, + .count = 1, + .ies = erp_ie, + .length = 3, + }; + u32 erp_info; + __le32 use_cts_prot; + mutex_lock(&priv->conf_mutex); + erp_info = priv->erp_info; + mutex_unlock(&priv->conf_mutex); + use_cts_prot = + erp_info & WLAN_ERP_USE_PROTECTION ? + __cpu_to_le32(1) : 0; + + erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info; + + pr_debug("[STA] ERP information 0x%x\n", erp_info); + + wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION, + &use_cts_prot, sizeof(use_cts_prot)); + wsm_update_ie(priv, &update_ie); + + return; +} + +static int cw1200_set_btcoexinfo(struct cw1200_common *priv) +{ + struct wsm_override_internal_txrate arg; + int ret = 0; + + if (priv->mode == NL80211_IFTYPE_STATION) { + /* Plumb PSPOLL and NULL template */ + cw1200_upload_pspoll(priv); + cw1200_upload_null(priv); + cw1200_upload_qosnull(priv); + } else { + return 0; + } + + memset(&arg, 0, sizeof(struct wsm_override_internal_txrate)); + + if (!priv->vif->p2p) { + /* STATION mode */ + if (priv->bss_params.operational_rate_set & ~0xF) { + pr_debug("[STA] STA has ERP rates\n"); + /* G or BG mode */ + arg.internalTxRate = (__ffs( + priv->bss_params.operational_rate_set & ~0xF)); + } else { + pr_debug("[STA] STA has non ERP rates\n"); + /* B only mode */ + arg.internalTxRate = (__ffs(priv->association_mode.basic_rate_set)); + } + arg.nonErpInternalTxRate = (__ffs(priv->association_mode.basic_rate_set)); + } else { + /* P2P mode */ + arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); + arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); + } + + pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n", + priv->mode, + arg.internalTxRate, + arg.nonErpInternalTxRate); + + ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, + &arg, sizeof(arg)); + + return ret; +} + +void cw1200_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct cw1200_common *priv = dev->priv; + bool do_join = false; + + mutex_lock(&priv->conf_mutex); + + pr_debug("BSS CHANGED: %08x\n", changed); + + /* TODO: BSS_CHANGED_QOS */ + /* TODO: BSS_CHANGED_TXPOWER */ + + if (changed & BSS_CHANGED_ARP_FILTER) { + struct wsm_mib_arp_ipv4_filter filter = {0}; + int i; + + pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n", + info->arp_addr_cnt); + + /* Currently only one IP address is supported by firmware. + * In case of more IPs arp filtering will be disabled. + */ + if (info->arp_addr_cnt > 0 && + info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) { + for (i = 0; i < info->arp_addr_cnt; i++) { + filter.ipv4addrs[i] = info->arp_addr_list[i]; + pr_debug("[STA] addr[%d]: 0x%X\n", + i, filter.ipv4addrs[i]); + } + filter.enable = __cpu_to_le32(1); + } + + pr_debug("[STA] arp ip filter enable: %d\n", + __le32_to_cpu(filter.enable)); + + wsm_set_arp_ipv4_filter(priv, &filter); + } + + if (changed & + (BSS_CHANGED_BEACON | + BSS_CHANGED_AP_PROBE_RESP | + BSS_CHANGED_BSSID | + BSS_CHANGED_SSID | + BSS_CHANGED_IBSS)) { + pr_debug("BSS_CHANGED_BEACON\n"); + priv->beacon_int = info->beacon_int; + cw1200_update_beaconing(priv); + cw1200_upload_beacon(priv); + } + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon); + + if (priv->enable_beacon != info->enable_beacon) { + cw1200_enable_beaconing(priv, info->enable_beacon); + priv->enable_beacon = info->enable_beacon; + } + } + + if (changed & BSS_CHANGED_BEACON_INT) { + pr_debug("CHANGED_BEACON_INT\n"); + if (info->ibss_joined) + do_join = true; + else if (priv->join_status == CW1200_JOIN_STATUS_AP) + cw1200_update_beaconing(priv); + } + + /* assoc/disassoc, or maybe AID changed */ + if (changed & BSS_CHANGED_ASSOC) { + wsm_lock_tx(priv); + priv->wep_default_key_id = -1; + wsm_unlock_tx(priv); + } + + if (changed & BSS_CHANGED_BSSID) { + pr_debug("BSS_CHANGED_BSSID\n"); + do_join = true; + } + + if (changed & + (BSS_CHANGED_ASSOC | + BSS_CHANGED_BSSID | + BSS_CHANGED_IBSS | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_HT)) { + pr_debug("BSS_CHANGED_ASSOC\n"); + if (info->assoc) { + if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) { + ieee80211_connection_loss(vif); + mutex_unlock(&priv->conf_mutex); + return; + } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) { + priv->join_status = CW1200_JOIN_STATUS_STA; + } + } else { + do_join = true; + } + + if (info->assoc || info->ibss_joined) { + struct ieee80211_sta *sta = NULL; + u32 val = 0; + + if (info->dtim_period) + priv->join_dtim_period = info->dtim_period; + priv->beacon_int = info->beacon_int; + + rcu_read_lock(); + + if (info->bssid && !info->ibss_joined) + sta = ieee80211_find_sta(vif, info->bssid); + if (sta) { + priv->ht_info.ht_cap = sta->ht_cap; + priv->bss_params.operational_rate_set = + cw1200_rate_mask_to_wsm(priv, + sta->supp_rates[priv->channel->band]); + priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef); + priv->ht_info.operation_mode = info->ht_operation_mode; + } else { + memset(&priv->ht_info, 0, + sizeof(priv->ht_info)); + priv->bss_params.operational_rate_set = -1; + } + rcu_read_unlock(); + + /* Non Greenfield stations present */ + if (priv->ht_info.operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) + val |= WSM_NON_GREENFIELD_STA_PRESENT; + + /* Set HT protection method */ + val |= (priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2; + + /* TODO: + * STBC_param.dual_cts + * STBC_param.LSIG_TXOP_FILL + */ + + val = cpu_to_le32(val); + wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION, + &val, sizeof(val)); + + priv->association_mode.greenfield = + cw1200_ht_greenfield(&priv->ht_info); + priv->association_mode.flags = + WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES | + WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE | + WSM_ASSOCIATION_MODE_USE_HT_MODE | + WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET | + WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING; + priv->association_mode.preamble = + info->use_short_preamble ? + WSM_JOIN_PREAMBLE_SHORT : + WSM_JOIN_PREAMBLE_LONG; + priv->association_mode.basic_rate_set = __cpu_to_le32( + cw1200_rate_mask_to_wsm(priv, + info->basic_rates)); + priv->association_mode.mpdu_start_spacing = + cw1200_ht_ampdu_density(&priv->ht_info); + + cw1200_cqm_bssloss_sm(priv, 0, 0, 0); + cancel_work_sync(&priv->unjoin_work); + + priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count; + priv->bss_params.aid = info->aid; + + if (priv->join_dtim_period < 1) + priv->join_dtim_period = 1; + + pr_debug("[STA] DTIM %d, interval: %d\n", + priv->join_dtim_period, priv->beacon_int); + pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n", + priv->association_mode.preamble, + priv->association_mode.greenfield, + priv->bss_params.aid, + priv->bss_params.operational_rate_set, + priv->association_mode.basic_rate_set); + wsm_set_association_mode(priv, &priv->association_mode); + + if (!info->ibss_joined) { + wsm_keep_alive_period(priv, 30 /* sec */); + wsm_set_bss_params(priv, &priv->bss_params); + priv->setbssparams_done = true; + cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work); + cw1200_set_pm(priv, &priv->powersave_mode); + } + if (priv->vif->p2p) { + pr_debug("[STA] Setting p2p powersave configuration.\n"); + wsm_set_p2p_ps_modeinfo(priv, + &priv->p2p_ps_modeinfo); + } + if (priv->bt_present) + cw1200_set_btcoexinfo(priv); + } else { + memset(&priv->association_mode, 0, + sizeof(priv->association_mode)); + memset(&priv->bss_params, 0, sizeof(priv->bss_params)); + } + } + + /* ERP Protection */ + if (changed & (BSS_CHANGED_ASSOC | + BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE)) { + u32 prev_erp_info = priv->erp_info; + if (info->use_cts_prot) + priv->erp_info |= WLAN_ERP_USE_PROTECTION; + else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT)) + priv->erp_info &= ~WLAN_ERP_USE_PROTECTION; + + if (info->use_short_preamble) + priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE; + else + priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE; + + pr_debug("[STA] ERP Protection: %x\n", priv->erp_info); + + if (prev_erp_info != priv->erp_info) + queue_work(priv->workqueue, &priv->set_cts_work); + } + + /* ERP Slottime */ + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) { + __le32 slot_time = info->use_short_slot ? + __cpu_to_le32(9) : __cpu_to_le32(20); + pr_debug("[STA] Slot time: %d us.\n", + __le32_to_cpu(slot_time)); + wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME, + &slot_time, sizeof(slot_time)); + } + + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) { + struct wsm_rcpi_rssi_threshold threshold = { + .rollingAverageCount = 8, + }; + pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n", + info->cqm_rssi_thold, info->cqm_rssi_hyst); + priv->cqm_rssi_thold = info->cqm_rssi_thold; + priv->cqm_rssi_hyst = info->cqm_rssi_hyst; + + if (info->cqm_rssi_thold || info->cqm_rssi_hyst) { + /* RSSI subscription enabled */ + /* TODO: It's not a correct way of setting threshold. + * Upper and lower must be set equal here and adjusted + * in callback. However current implementation is much + * more relaible and stable. + */ + + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + if (priv->cqm_use_rssi) { + threshold.upperThreshold = + info->cqm_rssi_thold + info->cqm_rssi_hyst; + threshold.lowerThreshold = + info->cqm_rssi_thold; + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; + } else { + threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2; + threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2; + } + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE; + } else { + /* There is a bug in FW, see sta.c. We have to enable + * dummy subscription to get correct RSSI values. + */ + threshold.rssiRcpiMode |= + WSM_RCPI_RSSI_THRESHOLD_ENABLE | + WSM_RCPI_RSSI_DONT_USE_UPPER | + WSM_RCPI_RSSI_DONT_USE_LOWER; + if (priv->cqm_use_rssi) + threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; + } + wsm_set_rcpi_rssi_threshold(priv, &threshold); + } + mutex_unlock(&priv->conf_mutex); + + if (do_join) { + wsm_lock_tx(priv); + cw1200_do_join(priv); /* Will unlock it for us */ + } +} + +void cw1200_multicast_start_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, multicast_start_work); + long tmo = priv->join_dtim_period * + (priv->beacon_int + 20) * HZ / 1024; + + cancel_work_sync(&priv->multicast_stop_work); + + if (!priv->aid0_bit_set) { + wsm_lock_tx(priv); + cw1200_set_tim_impl(priv, true); + priv->aid0_bit_set = true; + mod_timer(&priv->mcast_timeout, jiffies + tmo); + wsm_unlock_tx(priv); + } +} + +void cw1200_multicast_stop_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, multicast_stop_work); + + if (priv->aid0_bit_set) { + del_timer_sync(&priv->mcast_timeout); + wsm_lock_tx(priv); + priv->aid0_bit_set = false; + cw1200_set_tim_impl(priv, false); + wsm_unlock_tx(priv); + } +} + +void cw1200_mcast_timeout(unsigned long arg) +{ + struct cw1200_common *priv = + (struct cw1200_common *)arg; + + wiphy_warn(priv->hw->wiphy, + "Multicast delivery timeout.\n"); + spin_lock_bh(&priv->ps_state_lock); + priv->tx_multicast = priv->aid0_bit_set && + priv->buffered_multicasts; + if (priv->tx_multicast) + cw1200_bh_wakeup(priv); + spin_unlock_bh(&priv->ps_state_lock); +} + +int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + /* Aggregation is implemented fully in firmware, + * including block ack negotiation. Do not allow + * mac80211 stack to do anything: it interferes with + * the firmware. + */ + + /* Note that we still need this function stubbed. */ + return -ENOTSUPP; +} + +/* ******************************************************************** */ +/* WSM callback */ +void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg) +{ + pr_debug("[AP] %s: %s\n", + arg->stop ? "stop" : "start", + arg->multicast ? "broadcast" : "unicast"); + + if (arg->multicast) { + bool cancel_tmo = false; + spin_lock_bh(&priv->ps_state_lock); + if (arg->stop) { + priv->tx_multicast = false; + } else { + /* Firmware sends this indication every DTIM if there + * is a STA in powersave connected. There is no reason + * to suspend, following wakeup will consume much more + * power than it could be saved. + */ + cw1200_pm_stay_awake(&priv->pm_state, + priv->join_dtim_period * + (priv->beacon_int + 20) * HZ / 1024); + priv->tx_multicast = (priv->aid0_bit_set && + priv->buffered_multicasts); + if (priv->tx_multicast) { + cancel_tmo = true; + cw1200_bh_wakeup(priv); + } + } + spin_unlock_bh(&priv->ps_state_lock); + if (cancel_tmo) + del_timer_sync(&priv->mcast_timeout); + } else { + spin_lock_bh(&priv->ps_state_lock); + cw1200_ps_notify(priv, arg->link_id, arg->stop); + spin_unlock_bh(&priv->ps_state_lock); + if (!arg->stop) + cw1200_bh_wakeup(priv); + } + return; +} + +/* ******************************************************************** */ +/* AP privates */ + +static int cw1200_upload_beacon(struct cw1200_common *priv) +{ + int ret = 0; + struct ieee80211_mgmt *mgmt; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_BEACON, + }; + + u16 tim_offset; + u16 tim_len; + + if (priv->mode == NL80211_IFTYPE_STATION || + priv->mode == NL80211_IFTYPE_MONITOR || + priv->mode == NL80211_IFTYPE_UNSPECIFIED) + goto done; + + if (priv->vif->p2p) + frame.rate = WSM_TRANSMIT_RATE_6; + + frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, + &tim_offset, &tim_len); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + if (ret) + goto done; + + /* TODO: Distill probe resp; remove TIM + * and any other beacon-specific IEs + */ + mgmt = (void *)frame.skb->data; + mgmt->frame_control = + __cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + + frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE; + if (priv->vif->p2p) { + ret = wsm_set_probe_responder(priv, true); + } else { + ret = wsm_set_template_frame(priv, &frame); + wsm_set_probe_responder(priv, false); + } + +done: + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_pspoll(struct cw1200_common *priv) +{ + int ret = 0; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_PS_POLL, + .rate = 0xFF, + }; + + + frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_null(struct cw1200_common *priv) +{ + int ret = 0; + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_NULL, + .rate = 0xFF, + }; + + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + return ret; +} + +static int cw1200_upload_qosnull(struct cw1200_common *priv) +{ + int ret = 0; + /* TODO: This needs to be implemented + + struct wsm_template_frame frame = { + .frame_type = WSM_FRAME_TYPE_QOS_NULL, + .rate = 0xFF, + }; + + frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif); + if (!frame.skb) + return -ENOMEM; + + ret = wsm_set_template_frame(priv, &frame); + + dev_kfree_skb(frame.skb); + + */ + return ret; +} + +static int cw1200_enable_beaconing(struct cw1200_common *priv, + bool enable) +{ + struct wsm_beacon_transmit transmit = { + .enable_beaconing = enable, + }; + + return wsm_beacon_transmit(priv, &transmit); +} + +static int cw1200_start_ap(struct cw1200_common *priv) +{ + int ret; + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct wsm_start start = { + .mode = priv->vif->p2p ? + WSM_START_MODE_P2P_GO : WSM_START_MODE_AP, + .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ? + WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G, + .channel_number = priv->channel->hw_value, + .beacon_interval = conf->beacon_int, + .dtim_period = conf->dtim_period, + .preamble = conf->use_short_preamble ? + WSM_JOIN_PREAMBLE_SHORT : + WSM_JOIN_PREAMBLE_LONG, + .probe_delay = 100, + .basic_rate_set = cw1200_rate_mask_to_wsm(priv, + conf->basic_rates), + }; + struct wsm_operational_mode mode = { + .power_mode = cw1200_power_mode, + .disable_more_flag_usage = true, + }; + + memset(start.ssid, 0, sizeof(start.ssid)); + if (!conf->hidden_ssid) { + start.ssid_len = conf->ssid_len; + memcpy(start.ssid, conf->ssid, start.ssid_len); + } + + priv->beacon_int = conf->beacon_int; + priv->join_dtim_period = conf->dtim_period; + + memset(&priv->link_id_db, 0, sizeof(priv->link_id_db)); + + pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n", + start.channel_number, start.band, + start.beacon_interval, start.dtim_period, + start.basic_rate_set, + start.ssid_len, start.ssid); + ret = wsm_start(priv, &start); + if (!ret) + ret = cw1200_upload_keys(priv); + if (!ret && priv->vif->p2p) { + pr_debug("[AP] Setting p2p powersave configuration.\n"); + wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo); + } + if (!ret) { + wsm_set_block_ack_policy(priv, 0, 0); + priv->join_status = CW1200_JOIN_STATUS_AP; + cw1200_update_filtering(priv); + } + wsm_set_operational_mode(priv, &mode); + return ret; +} + +static int cw1200_update_beaconing(struct cw1200_common *priv) +{ + struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; + struct wsm_reset reset = { + .link_id = 0, + .reset_statistics = true, + }; + + if (priv->mode == NL80211_IFTYPE_AP) { + /* TODO: check if changed channel, band */ + if (priv->join_status != CW1200_JOIN_STATUS_AP || + priv->beacon_int != conf->beacon_int) { + pr_debug("ap restarting\n"); + wsm_lock_tx(priv); + if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE) + wsm_reset(priv, &reset); + priv->join_status = CW1200_JOIN_STATUS_PASSIVE; + cw1200_start_ap(priv); + wsm_unlock_tx(priv); + } else + pr_debug("ap started join_status: %d\n", + priv->join_status); + } + return 0; +} diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h new file mode 100644 index 000000000000..35babb62cc6a --- /dev/null +++ b/drivers/net/wireless/cw1200/sta.h @@ -0,0 +1,123 @@ +/* + * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef STA_H_INCLUDED +#define STA_H_INCLUDED + +/* ******************************************************************** */ +/* mac80211 API */ + +int cw1200_start(struct ieee80211_hw *dev); +void cw1200_stop(struct ieee80211_hw *dev); +int cw1200_add_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif); +void cw1200_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif); +int cw1200_change_interface(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, + bool p2p); +int cw1200_config(struct ieee80211_hw *dev, u32 changed); +void cw1200_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +int cw1200_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats); +int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); + +int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value); + +void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop); + +u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list); + +int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg); + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_join_complete_cb(struct cw1200_common *priv, + struct wsm_join_complete *arg); + +/* ******************************************************************** */ +/* WSM events */ + +void cw1200_free_event_queue(struct cw1200_common *priv); +void cw1200_event_handler(struct work_struct *work); +void cw1200_bss_loss_work(struct work_struct *work); +void cw1200_bss_params_work(struct work_struct *work); +void cw1200_keep_alive_work(struct work_struct *work); +void cw1200_tx_failure_work(struct work_struct *work); + +void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good, + int bad); +static inline void cw1200_cqm_bssloss_sm(struct cw1200_common *priv, + int init, int good, int bad) +{ + spin_lock(&priv->bss_loss_lock); + __cw1200_cqm_bssloss_sm(priv, init, good, bad); + spin_unlock(&priv->bss_loss_lock); +} + +/* ******************************************************************** */ +/* Internal API */ + +int cw1200_setup_mac(struct cw1200_common *priv); +void cw1200_join_timeout(struct work_struct *work); +void cw1200_unjoin_work(struct work_struct *work); +void cw1200_join_complete_work(struct work_struct *work); +void cw1200_wep_key_work(struct work_struct *work); +void cw1200_update_listening(struct cw1200_common *priv, bool enabled); +void cw1200_update_filtering(struct cw1200_common *priv); +void cw1200_update_filtering_work(struct work_struct *work); +void cw1200_set_beacon_wakeup_period_work(struct work_struct *work); +int cw1200_enable_listening(struct cw1200_common *priv); +int cw1200_disable_listening(struct cw1200_common *priv); +int cw1200_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg); +void cw1200_ba_work(struct work_struct *work); +void cw1200_ba_timer(unsigned long arg); + +/* AP stuffs */ +int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, + bool set); +int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + enum sta_notify_cmd notify_cmd, + struct ieee80211_sta *sta); +void cw1200_bss_info_changed(struct ieee80211_hw *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed); +int cw1200_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); + +void cw1200_suspend_resume(struct cw1200_common *priv, + struct wsm_suspend_resume *arg); +void cw1200_set_tim_work(struct work_struct *work); +void cw1200_set_cts_work(struct work_struct *work); +void cw1200_multicast_start_work(struct work_struct *work); +void cw1200_multicast_stop_work(struct work_struct *work); +void cw1200_mcast_timeout(unsigned long arg); + +#endif diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c new file mode 100644 index 000000000000..0e40890e1993 --- /dev/null +++ b/drivers/net/wireless/cw1200/txrx.c @@ -0,0 +1,1474 @@ +/* + * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> + +#include "cw1200.h" +#include "wsm.h" +#include "bh.h" +#include "sta.h" +#include "debug.h" + +#define CW1200_INVALID_RATE_ID (0xFF) + +static int cw1200_handle_action_rx(struct cw1200_common *priv, + struct sk_buff *skb); +static const struct ieee80211_rate * +cw1200_get_tx_rate(const struct cw1200_common *priv, + const struct ieee80211_tx_rate *rate); + +/* ******************************************************************** */ +/* TX queue lock / unlock */ + +static inline void cw1200_tx_queues_lock(struct cw1200_common *priv) +{ + int i; + for (i = 0; i < 4; ++i) + cw1200_queue_lock(&priv->tx_queue[i]); +} + +static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv) +{ + int i; + for (i = 0; i < 4; ++i) + cw1200_queue_unlock(&priv->tx_queue[i]); +} + +/* ******************************************************************** */ +/* TX policy cache implementation */ + +static void tx_policy_dump(struct tx_policy *policy) +{ + pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n", + policy->raw[0] & 0x0F, policy->raw[0] >> 4, + policy->raw[1] & 0x0F, policy->raw[1] >> 4, + policy->raw[2] & 0x0F, policy->raw[2] >> 4, + policy->raw[3] & 0x0F, policy->raw[3] >> 4, + policy->raw[4] & 0x0F, policy->raw[4] >> 4, + policy->raw[5] & 0x0F, policy->raw[5] >> 4, + policy->raw[6] & 0x0F, policy->raw[6] >> 4, + policy->raw[7] & 0x0F, policy->raw[7] >> 4, + policy->raw[8] & 0x0F, policy->raw[8] >> 4, + policy->raw[9] & 0x0F, policy->raw[9] >> 4, + policy->raw[10] & 0x0F, policy->raw[10] >> 4, + policy->raw[11] & 0x0F, policy->raw[11] >> 4, + policy->defined); +} + +static void tx_policy_build(const struct cw1200_common *priv, + /* [out] */ struct tx_policy *policy, + struct ieee80211_tx_rate *rates, size_t count) +{ + int i, j; + unsigned limit = priv->short_frame_max_tx_count; + unsigned total = 0; + BUG_ON(rates[0].idx < 0); + memset(policy, 0, sizeof(*policy)); + + /* minstrel is buggy a little bit, so distille + * incoming rates first. */ + + /* Sort rates in descending order. */ + for (i = 1; i < count; ++i) { + if (rates[i].idx < 0) { + count = i; + break; + } + if (rates[i].idx > rates[i - 1].idx) { + struct ieee80211_tx_rate tmp = rates[i - 1]; + rates[i - 1] = rates[i]; + rates[i] = tmp; + } + } + + /* Eliminate duplicates. */ + total = rates[0].count; + for (i = 0, j = 1; j < count; ++j) { + if (rates[j].idx == rates[i].idx) { + rates[i].count += rates[j].count; + } else if (rates[j].idx > rates[i].idx) { + break; + } else { + ++i; + if (i != j) + rates[i] = rates[j]; + } + total += rates[j].count; + } + count = i + 1; + + /* Re-fill policy trying to keep every requested rate and with + * respect to the global max tx retransmission count. */ + if (limit < count) + limit = count; + if (total > limit) { + for (i = 0; i < count; ++i) { + int left = count - i - 1; + if (rates[i].count > limit - left) + rates[i].count = limit - left; + limit -= rates[i].count; + } + } + + /* HACK!!! Device has problems (at least) switching from + * 54Mbps CTS to 1Mbps. This switch takes enormous amount + * of time (100-200 ms), leading to valuable throughput drop. + * As a workaround, additional g-rates are injected to the + * policy. + */ + if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) && + rates[0].idx > 4 && rates[0].count > 2 && + rates[1].idx < 2) { + /* ">> 1" is an equivalent of "/ 2", but faster */ + int mid_rate = (rates[0].idx + 4) >> 1; + + /* Decrease number of retries for the initial rate */ + rates[0].count -= 2; + + if (mid_rate != 4) { + /* Keep fallback rate at 1Mbps. */ + rates[3] = rates[1]; + + /* Inject 1 transmission on lowest g-rate */ + rates[2].idx = 4; + rates[2].count = 1; + rates[2].flags = rates[1].flags; + + /* Inject 1 transmission on mid-rate */ + rates[1].idx = mid_rate; + rates[1].count = 1; + + /* Fallback to 1 Mbps is a really bad thing, + * so let's try to increase probability of + * successful transmission on the lowest g rate + * even more */ + if (rates[0].count >= 3) { + --rates[0].count; + ++rates[2].count; + } + + /* Adjust amount of rates defined */ + count += 2; + } else { + /* Keep fallback rate at 1Mbps. */ + rates[2] = rates[1]; + + /* Inject 2 transmissions on lowest g-rate */ + rates[1].idx = 4; + rates[1].count = 2; + + /* Adjust amount of rates defined */ + count += 1; + } + } + + policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; + + for (i = 0; i < count; ++i) { + register unsigned rateid, off, shift, retries; + + rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value; + off = rateid >> 3; /* eq. rateid / 8 */ + shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */ + + retries = rates[i].count; + if (retries > 0x0F) { + rates[i].count = 0x0f; + retries = 0x0F; + } + policy->tbl[off] |= __cpu_to_le32(retries << shift); + policy->retry_count += retries; + } + + pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n", + count, + rates[0].idx, rates[0].count, + rates[1].idx, rates[1].count, + rates[2].idx, rates[2].count, + rates[3].idx, rates[3].count, + rates[4].idx, rates[4].count); +} + +static inline bool tx_policy_is_equal(const struct tx_policy *wanted, + const struct tx_policy *cached) +{ + size_t count = wanted->defined >> 1; + if (wanted->defined > cached->defined) + return false; + if (count) { + if (memcmp(wanted->raw, cached->raw, count)) + return false; + } + if (wanted->defined & 1) { + if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) + return false; + } + return true; +} + +static int tx_policy_find(struct tx_policy_cache *cache, + const struct tx_policy *wanted) +{ + /* O(n) complexity. Not so good, but there's only 8 entries in + * the cache. + * Also lru helps to reduce search time. */ + struct tx_policy_cache_entry *it; + /* First search for policy in "used" list */ + list_for_each_entry(it, &cache->used, link) { + if (tx_policy_is_equal(wanted, &it->policy)) + return it - cache->cache; + } + /* Then - in "free list" */ + list_for_each_entry(it, &cache->free, link) { + if (tx_policy_is_equal(wanted, &it->policy)) + return it - cache->cache; + } + return -1; +} + +static inline void tx_policy_use(struct tx_policy_cache *cache, + struct tx_policy_cache_entry *entry) +{ + ++entry->policy.usage_count; + list_move(&entry->link, &cache->used); +} + +static inline int tx_policy_release(struct tx_policy_cache *cache, + struct tx_policy_cache_entry *entry) +{ + int ret = --entry->policy.usage_count; + if (!ret) + list_move(&entry->link, &cache->free); + return ret; +} + +void tx_policy_clean(struct cw1200_common *priv) +{ + int idx, locked; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + struct tx_policy_cache_entry *entry; + + cw1200_tx_queues_lock(priv); + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + + for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) { + entry = &cache->cache[idx]; + /* Policy usage count should be 0 at this time as all queues + should be empty */ + if (WARN_ON(entry->policy.usage_count)) { + entry->policy.usage_count = 0; + list_move(&entry->link, &cache->free); + } + memset(&entry->policy, 0, sizeof(entry->policy)); + } + if (locked) + cw1200_tx_queues_unlock(priv); + + cw1200_tx_queues_unlock(priv); + spin_unlock_bh(&cache->lock); +} + +/* ******************************************************************** */ +/* External TX policy cache API */ + +void tx_policy_init(struct cw1200_common *priv) +{ + struct tx_policy_cache *cache = &priv->tx_policy_cache; + int i; + + memset(cache, 0, sizeof(*cache)); + + spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->used); + INIT_LIST_HEAD(&cache->free); + + for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) + list_add(&cache->cache[i].link, &cache->free); +} + +static int tx_policy_get(struct cw1200_common *priv, + struct ieee80211_tx_rate *rates, + size_t count, bool *renew) +{ + int idx; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + struct tx_policy wanted; + + tx_policy_build(priv, &wanted, rates, count); + + spin_lock_bh(&cache->lock); + if (WARN_ON_ONCE(list_empty(&cache->free))) { + spin_unlock_bh(&cache->lock); + return CW1200_INVALID_RATE_ID; + } + idx = tx_policy_find(cache, &wanted); + if (idx >= 0) { + pr_debug("[TX policy] Used TX policy: %d\n", idx); + *renew = false; + } else { + struct tx_policy_cache_entry *entry; + *renew = true; + /* If policy is not found create a new one + * using the oldest entry in "free" list */ + entry = list_entry(cache->free.prev, + struct tx_policy_cache_entry, link); + entry->policy = wanted; + idx = entry - cache->cache; + pr_debug("[TX policy] New TX policy: %d\n", idx); + tx_policy_dump(&entry->policy); + } + tx_policy_use(cache, &cache->cache[idx]); + if (list_empty(&cache->free)) { + /* Lock TX queues. */ + cw1200_tx_queues_lock(priv); + } + spin_unlock_bh(&cache->lock); + return idx; +} + +static void tx_policy_put(struct cw1200_common *priv, int idx) +{ + int usage, locked; + struct tx_policy_cache *cache = &priv->tx_policy_cache; + + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + usage = tx_policy_release(cache, &cache->cache[idx]); + if (locked && !usage) { + /* Unlock TX queues. */ + cw1200_tx_queues_unlock(priv); + } + spin_unlock_bh(&cache->lock); +} + +static int tx_policy_upload(struct cw1200_common *priv) +{ + struct tx_policy_cache *cache = &priv->tx_policy_cache; + int i; + struct wsm_set_tx_rate_retry_policy arg = { + .num = 0, + }; + spin_lock_bh(&cache->lock); + + /* Upload only modified entries. */ + for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) { + struct tx_policy *src = &cache->cache[i].policy; + if (src->retry_count && !src->uploaded) { + struct wsm_tx_rate_retry_policy *dst = + &arg.tbl[arg.num]; + dst->index = i; + dst->short_retries = priv->short_frame_max_tx_count; + dst->long_retries = priv->long_frame_max_tx_count; + + dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED | + WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT; + memcpy(dst->rate_count_indices, src->tbl, + sizeof(dst->rate_count_indices)); + src->uploaded = 1; + ++arg.num; + } + } + spin_unlock_bh(&cache->lock); + cw1200_debug_tx_cache_miss(priv); + pr_debug("[TX policy] Upload %d policies\n", arg.num); + return wsm_set_tx_rate_retry_policy(priv, &arg); +} + +void tx_policy_upload_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, tx_policy_upload_work); + + pr_debug("[TX] TX policy upload.\n"); + tx_policy_upload(priv); + + wsm_unlock_tx(priv); + cw1200_tx_queues_unlock(priv); +} + +/* ******************************************************************** */ +/* cw1200 TX implementation */ + +struct cw1200_txinfo { + struct sk_buff *skb; + unsigned queue; + struct ieee80211_tx_info *tx_info; + const struct ieee80211_rate *rate; + struct ieee80211_hdr *hdr; + size_t hdrlen; + const u8 *da; + struct cw1200_sta_priv *sta_priv; + struct ieee80211_sta *sta; + struct cw1200_txpriv txpriv; +}; + +u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates) +{ + u32 ret = 0; + int i; + for (i = 0; i < 32; ++i) { + if (rates & BIT(i)) + ret |= BIT(priv->rates[i].hw_value); + } + return ret; +} + +static const struct ieee80211_rate * +cw1200_get_tx_rate(const struct cw1200_common *priv, + const struct ieee80211_tx_rate *rate) +{ + if (rate->idx < 0) + return NULL; + if (rate->flags & IEEE80211_TX_RC_MCS) + return &priv->mcs_rates[rate->idx]; + return &priv->hw->wiphy->bands[priv->channel->band]-> + bitrates[rate->idx]; +} + +static int +cw1200_tx_h_calc_link_ids(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (t->sta && t->sta_priv->link_id) + t->txpriv.raw_link_id = + t->txpriv.link_id = + t->sta_priv->link_id; + else if (priv->mode != NL80211_IFTYPE_AP) + t->txpriv.raw_link_id = + t->txpriv.link_id = 0; + else if (is_multicast_ether_addr(t->da)) { + if (priv->enable_beacon) { + t->txpriv.raw_link_id = 0; + t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM; + } else { + t->txpriv.raw_link_id = 0; + t->txpriv.link_id = 0; + } + } else { + t->txpriv.link_id = cw1200_find_link_id(priv, t->da); + if (!t->txpriv.link_id) + t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da); + if (!t->txpriv.link_id) { + wiphy_err(priv->hw->wiphy, + "No more link IDs available.\n"); + return -ENOENT; + } + t->txpriv.raw_link_id = t->txpriv.link_id; + } + if (t->txpriv.raw_link_id) + priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp = + jiffies; + if (t->sta && (t->sta->uapsd_queues & BIT(t->queue))) + t->txpriv.link_id = CW1200_LINK_ID_UAPSD; + return 0; +} + +static void +cw1200_tx_h_pm(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (ieee80211_is_auth(t->hdr->frame_control)) { + u32 mask = ~BIT(t->txpriv.raw_link_id); + spin_lock_bh(&priv->ps_state_lock); + priv->sta_asleep_mask &= mask; + priv->pspoll_mask &= mask; + spin_unlock_bh(&priv->ps_state_lock); + } +} + +static void +cw1200_tx_h_calc_tid(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (ieee80211_is_data_qos(t->hdr->frame_control)) { + u8 *qos = ieee80211_get_qos_ctl(t->hdr); + t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK; + } else if (ieee80211_is_data(t->hdr->frame_control)) { + t->txpriv.tid = 0; + } +} + +static int +cw1200_tx_h_crypt(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + if (!t->tx_info->control.hw_key || + !ieee80211_has_protected(t->hdr->frame_control)) + return 0; + + t->hdrlen += t->tx_info->control.hw_key->iv_len; + skb_put(t->skb, t->tx_info->control.hw_key->icv_len); + + if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + skb_put(t->skb, 8); /* MIC space */ + + return 0; +} + +static int +cw1200_tx_h_align(struct cw1200_common *priv, + struct cw1200_txinfo *t, + u8 *flags) +{ + size_t offset = (size_t)t->skb->data & 3; + + if (!offset) + return 0; + + if (offset & 1) { + wiphy_err(priv->hw->wiphy, + "Bug: attempt to transmit a frame with wrong alignment: %zu\n", + offset); + return -EINVAL; + } + + if (skb_headroom(t->skb) < offset) { + wiphy_err(priv->hw->wiphy, + "Bug: no space allocated for DMA alignment. headroom: %d\n", + skb_headroom(t->skb)); + return -ENOMEM; + } + skb_push(t->skb, offset); + t->hdrlen += offset; + t->txpriv.offset += offset; + *flags |= WSM_TX_2BYTES_SHIFT; + cw1200_debug_tx_align(priv); + return 0; +} + +static int +cw1200_tx_h_action(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)t->hdr; + if (ieee80211_is_action(t->hdr->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK) + return 1; + else + return 0; +} + +/* Add WSM header */ +static struct wsm_tx * +cw1200_tx_h_wsm(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + struct wsm_tx *wsm; + + if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) { + wiphy_err(priv->hw->wiphy, + "Bug: no space allocated for WSM header. headroom: %d\n", + skb_headroom(t->skb)); + return NULL; + } + + wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx)); + t->txpriv.offset += sizeof(struct wsm_tx); + memset(wsm, 0, sizeof(*wsm)); + wsm->hdr.len = __cpu_to_le16(t->skb->len); + wsm->hdr.id = __cpu_to_le16(0x0004); + wsm->queue_id = wsm_queue_id_to_wsm(t->queue); + return wsm; +} + +/* BT Coex specific handling */ +static void +cw1200_tx_h_bt(struct cw1200_common *priv, + struct cw1200_txinfo *t, + struct wsm_tx *wsm) +{ + u8 priority = 0; + + if (!priv->bt_present) + return; + + if (ieee80211_is_nullfunc(t->hdr->frame_control)) { + priority = WSM_EPTA_PRIORITY_MGT; + } else if (ieee80211_is_data(t->hdr->frame_control)) { + /* Skip LLC SNAP header (+6) */ + u8 *payload = &t->skb->data[t->hdrlen]; + u16 *ethertype = (u16 *)&payload[6]; + if (*ethertype == __be16_to_cpu(ETH_P_PAE)) + priority = WSM_EPTA_PRIORITY_EAPOL; + } else if (ieee80211_is_assoc_req(t->hdr->frame_control) || + ieee80211_is_reassoc_req(t->hdr->frame_control)) { + struct ieee80211_mgmt *mgt_frame = + (struct ieee80211_mgmt *)t->hdr; + + if (mgt_frame->u.assoc_req.listen_interval < + priv->listen_interval) { + pr_debug("Modified Listen Interval to %d from %d\n", + priv->listen_interval, + mgt_frame->u.assoc_req.listen_interval); + /* Replace listen interval derieved from + * the one read from SDD */ + mgt_frame->u.assoc_req.listen_interval = + priv->listen_interval; + } + } + + if (!priority) { + if (ieee80211_is_action(t->hdr->frame_control)) + priority = WSM_EPTA_PRIORITY_ACTION; + else if (ieee80211_is_mgmt(t->hdr->frame_control)) + priority = WSM_EPTA_PRIORITY_MGT; + else if ((wsm->queue_id == WSM_QUEUE_VOICE)) + priority = WSM_EPTA_PRIORITY_VOICE; + else if ((wsm->queue_id == WSM_QUEUE_VIDEO)) + priority = WSM_EPTA_PRIORITY_VIDEO; + else + priority = WSM_EPTA_PRIORITY_DATA; + } + + pr_debug("[TX] EPTA priority %d.\n", priority); + + wsm->flags |= priority << 1; +} + +static int +cw1200_tx_h_rate_policy(struct cw1200_common *priv, + struct cw1200_txinfo *t, + struct wsm_tx *wsm) +{ + bool tx_policy_renew = false; + + t->txpriv.rate_id = tx_policy_get(priv, + t->tx_info->control.rates, IEEE80211_TX_MAX_RATES, + &tx_policy_renew); + if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID) + return -EFAULT; + + wsm->flags |= t->txpriv.rate_id << 4; + + t->rate = cw1200_get_tx_rate(priv, + &t->tx_info->control.rates[0]), + wsm->max_tx_rate = t->rate->hw_value; + if (t->rate->flags & IEEE80211_TX_RC_MCS) { + if (cw1200_ht_greenfield(&priv->ht_info)) + wsm->ht_tx_parameters |= + __cpu_to_le32(WSM_HT_TX_GREENFIELD); + else + wsm->ht_tx_parameters |= + __cpu_to_le32(WSM_HT_TX_MIXED); + } + + if (tx_policy_renew) { + pr_debug("[TX] TX policy renew.\n"); + /* It's not so optimal to stop TX queues every now and then. + * Better to reimplement task scheduling with + * a counter. TODO. */ + wsm_lock_tx_async(priv); + cw1200_tx_queues_lock(priv); + if (queue_work(priv->workqueue, + &priv->tx_policy_upload_work) <= 0) { + cw1200_tx_queues_unlock(priv); + wsm_unlock_tx(priv); + } + } + return 0; +} + +static bool +cw1200_tx_h_pm_state(struct cw1200_common *priv, + struct cw1200_txinfo *t) +{ + int was_buffered = 1; + + if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM && + !priv->buffered_multicasts) { + priv->buffered_multicasts = true; + if (priv->sta_asleep_mask) + queue_work(priv->workqueue, + &priv->multicast_start_work); + } + + if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID) + was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++; + + return !was_buffered; +} + +/* ******************************************************************** */ + +void cw1200_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct cw1200_common *priv = dev->priv; + struct cw1200_txinfo t = { + .skb = skb, + .queue = skb_get_queue_mapping(skb), + .tx_info = IEEE80211_SKB_CB(skb), + .hdr = (struct ieee80211_hdr *)skb->data, + .txpriv.tid = CW1200_MAX_TID, + .txpriv.rate_id = CW1200_INVALID_RATE_ID, + }; + struct ieee80211_sta *sta; + struct wsm_tx *wsm; + bool tid_update = 0; + u8 flags = 0; + int ret; + + if (priv->bh_error) + goto drop; + + t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control); + t.da = ieee80211_get_DA(t.hdr); + if (control) { + t.sta = control->sta; + t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv; + } + + if (WARN_ON(t.queue >= 4)) + goto drop; + + ret = cw1200_tx_h_calc_link_ids(priv, &t); + if (ret) + goto drop; + + pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n", + skb->len, t.queue, t.txpriv.link_id, + t.txpriv.raw_link_id); + + cw1200_tx_h_pm(priv, &t); + cw1200_tx_h_calc_tid(priv, &t); + ret = cw1200_tx_h_crypt(priv, &t); + if (ret) + goto drop; + ret = cw1200_tx_h_align(priv, &t, &flags); + if (ret) + goto drop; + ret = cw1200_tx_h_action(priv, &t); + if (ret) + goto drop; + wsm = cw1200_tx_h_wsm(priv, &t); + if (!wsm) { + ret = -ENOMEM; + goto drop; + } + wsm->flags |= flags; + cw1200_tx_h_bt(priv, &t, wsm); + ret = cw1200_tx_h_rate_policy(priv, &t, wsm); + if (ret) + goto drop; + + rcu_read_lock(); + sta = rcu_dereference(t.sta); + + spin_lock_bh(&priv->ps_state_lock); + { + tid_update = cw1200_tx_h_pm_state(priv, &t); + BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue], + t.skb, &t.txpriv)); + } + spin_unlock_bh(&priv->ps_state_lock); + + if (tid_update && sta) + ieee80211_sta_set_buffered(sta, t.txpriv.tid, true); + + rcu_read_unlock(); + + cw1200_bh_wakeup(priv); + + return; + +drop: + cw1200_skb_dtor(priv, skb, &t.txpriv); + return; +} + +/* ******************************************************************** */ + +static int cw1200_handle_action_rx(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + /* Filter block ACK negotiation: fully controlled by firmware */ + if (mgmt->u.action.category == WLAN_CATEGORY_BACK) + return 1; + + return 0; +} + +static int cw1200_handle_pspoll(struct cw1200_common *priv, + struct sk_buff *skb) +{ + struct ieee80211_sta *sta; + struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; + int link_id = 0; + u32 pspoll_mask = 0; + int drop = 1; + int i; + + if (priv->join_status != CW1200_JOIN_STATUS_AP) + goto done; + if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) + goto done; + + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, pspoll->ta); + if (sta) { + struct cw1200_sta_priv *sta_priv; + sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; + link_id = sta_priv->link_id; + pspoll_mask = BIT(sta_priv->link_id); + } + rcu_read_unlock(); + if (!link_id) + goto done; + + priv->pspoll_mask |= pspoll_mask; + drop = 0; + + /* Do not report pspols if data for given link id is + * queued already. */ + for (i = 0; i < 4; ++i) { + if (cw1200_queue_get_num_queued(&priv->tx_queue[i], + pspoll_mask)) { + cw1200_bh_wakeup(priv); + drop = 1; + break; + } + } + pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd"); +done: + return drop; +} + +/* ******************************************************************** */ + +void cw1200_tx_confirm_cb(struct cw1200_common *priv, + int link_id, + struct wsm_tx_confirm *arg) +{ + u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id); + struct cw1200_queue *queue = &priv->tx_queue[queue_id]; + struct sk_buff *skb; + const struct cw1200_txpriv *txpriv; + + pr_debug("[TX] TX confirm: %d, %d.\n", + arg->status, arg->ack_failures); + + if (cw1200_itp_tx_running(priv)) + return; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + return; + } + + if (WARN_ON(queue_id >= 4)) + return; + + if (arg->status) + pr_debug("TX failed: %d.\n", arg->status); + + if ((arg->status == WSM_REQUEUE) && + (arg->flags & WSM_TX_STATUS_REQUEUE)) { + /* "Requeue" means "implicit suspend" */ + struct wsm_suspend_resume suspend = { + .link_id = link_id, + .stop = 1, + .multicast = !link_id, + }; + cw1200_suspend_resume(priv, &suspend); + wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n", + link_id, + cw1200_queue_get_generation(arg->packet_id) + 1, + priv->sta_asleep_mask); + cw1200_queue_requeue(queue, arg->packet_id); + spin_lock_bh(&priv->ps_state_lock); + if (!link_id) { + priv->buffered_multicasts = true; + if (priv->sta_asleep_mask) { + queue_work(priv->workqueue, + &priv->multicast_start_work); + } + } + spin_unlock_bh(&priv->ps_state_lock); + } else if (!cw1200_queue_get_skb(queue, arg->packet_id, + &skb, &txpriv)) { + struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb); + int tx_count = arg->ack_failures; + u8 ht_flags = 0; + int i; + + if (cw1200_ht_greenfield(&priv->ht_info)) + ht_flags |= IEEE80211_TX_RC_GREEN_FIELD; + + spin_lock(&priv->bss_loss_lock); + if (priv->bss_loss_state && + arg->packet_id == priv->bss_loss_confirm_id) { + if (arg->status) { + /* Recovery failed */ + __cw1200_cqm_bssloss_sm(priv, 0, 0, 1); + } else { + /* Recovery succeeded */ + __cw1200_cqm_bssloss_sm(priv, 0, 1, 0); + } + } + spin_unlock(&priv->bss_loss_lock); + + if (!arg->status) { + tx->flags |= IEEE80211_TX_STAT_ACK; + ++tx_count; + cw1200_debug_txed(priv); + if (arg->flags & WSM_TX_STATUS_AGGREGATION) { + /* Do not report aggregation to mac80211: + * it confuses minstrel a lot. */ + /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */ + cw1200_debug_txed_agg(priv); + } + } else { + if (tx_count) + ++tx_count; + } + + for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { + if (tx->status.rates[i].count >= tx_count) { + tx->status.rates[i].count = tx_count; + break; + } + tx_count -= tx->status.rates[i].count; + if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS) + tx->status.rates[i].flags |= ht_flags; + } + + for (++i; i < IEEE80211_TX_MAX_RATES; ++i) { + tx->status.rates[i].count = 0; + tx->status.rates[i].idx = -1; + } + + /* Pull off any crypto trailers that we added on */ + if (tx->control.hw_key) { + skb_trim(skb, skb->len - tx->control.hw_key->icv_len); + if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) + skb_trim(skb, skb->len - 8); /* MIC space */ + } + cw1200_queue_remove(queue, arg->packet_id); + } + /* XXX TODO: Only wake if there are pending transmits.. */ + cw1200_bh_wakeup(priv); +} + +static void cw1200_notify_buffered_tx(struct cw1200_common *priv, + struct sk_buff *skb, int link_id, int tid) +{ + struct ieee80211_sta *sta; + struct ieee80211_hdr *hdr; + u8 *buffered; + u8 still_buffered = 0; + + if (link_id && tid < CW1200_MAX_TID) { + buffered = priv->link_id_db + [link_id - 1].buffered; + + spin_lock_bh(&priv->ps_state_lock); + if (!WARN_ON(!buffered[tid])) + still_buffered = --buffered[tid]; + spin_unlock_bh(&priv->ps_state_lock); + + if (!still_buffered && tid < CW1200_MAX_TID) { + hdr = (struct ieee80211_hdr *)skb->data; + rcu_read_lock(); + sta = ieee80211_find_sta(priv->vif, hdr->addr1); + if (sta) + ieee80211_sta_set_buffered(sta, tid, false); + rcu_read_unlock(); + } + } +} + +void cw1200_skb_dtor(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv) +{ + skb_pull(skb, txpriv->offset); + if (txpriv->rate_id != CW1200_INVALID_RATE_ID) { + cw1200_notify_buffered_tx(priv, skb, + txpriv->raw_link_id, txpriv->tid); + tx_policy_put(priv, txpriv->rate_id); + } + if (!cw1200_is_itp(priv)) + ieee80211_tx_status(priv->hw, skb); +} + +void cw1200_rx_cb(struct cw1200_common *priv, + struct wsm_rx *arg, + int link_id, + struct sk_buff **skb_p) +{ + struct sk_buff *skb = *skb_p; + struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct cw1200_link_entry *entry = NULL; + unsigned long grace_period; + + bool early_data = false; + bool p2p = priv->vif && priv->vif->p2p; + size_t hdrlen; + hdr->flag = 0; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + goto drop; + } + + if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) { + entry = &priv->link_id_db[link_id - 1]; + if (entry->status == CW1200_LINK_SOFT && + ieee80211_is_data(frame->frame_control)) + early_data = true; + entry->timestamp = jiffies; + } else if (p2p && + ieee80211_is_action(frame->frame_control) && + (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { + pr_debug("[RX] Going to MAP&RESET link ID\n"); + WARN_ON(work_pending(&priv->linkid_reset_work)); + memcpy(&priv->action_frame_sa[0], + ieee80211_get_SA(frame), ETH_ALEN); + priv->action_linkid = 0; + schedule_work(&priv->linkid_reset_work); + } + + if (link_id && p2p && + ieee80211_is_action(frame->frame_control) && + (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { + /* Link ID already exists for the ACTION frame. + * Reset and Remap */ + WARN_ON(work_pending(&priv->linkid_reset_work)); + memcpy(&priv->action_frame_sa[0], + ieee80211_get_SA(frame), ETH_ALEN); + priv->action_linkid = link_id; + schedule_work(&priv->linkid_reset_work); + } + if (arg->status) { + if (arg->status == WSM_STATUS_MICFAILURE) { + pr_debug("[RX] MIC failure.\n"); + hdr->flag |= RX_FLAG_MMIC_ERROR; + } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) { + pr_debug("[RX] No key found.\n"); + goto drop; + } else { + pr_debug("[RX] Receive failure: %d.\n", + arg->status); + goto drop; + } + } + + if (skb->len < sizeof(struct ieee80211_pspoll)) { + wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n"); + goto drop; + } + + if (ieee80211_is_pspoll(frame->frame_control)) + if (cw1200_handle_pspoll(priv, skb)) + goto drop; + + hdr->mactime = 0; /* Not supported by WSM */ + hdr->band = ((arg->channel_number & 0xff00) || + (arg->channel_number > 14)) ? + IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + hdr->freq = ieee80211_channel_to_frequency( + arg->channel_number, + hdr->band); + + if (arg->rx_rate >= 14) { + hdr->flag |= RX_FLAG_HT; + hdr->rate_idx = arg->rx_rate - 14; + } else if (arg->rx_rate >= 4) { + hdr->rate_idx = arg->rx_rate - 2; + } else { + hdr->rate_idx = arg->rx_rate; + } + + hdr->signal = (s8)arg->rcpi_rssi; + hdr->antenna = 0; + + hdrlen = ieee80211_hdrlen(frame->frame_control); + + if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { + size_t iv_len = 0, icv_len = 0; + + hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED; + + /* Oops... There is no fast way to ask mac80211 about + * IV/ICV lengths. Even defineas are not exposed.*/ + switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { + case WSM_RX_STATUS_WEP: + iv_len = 4 /* WEP_IV_LEN */; + icv_len = 4 /* WEP_ICV_LEN */; + break; + case WSM_RX_STATUS_TKIP: + iv_len = 8 /* TKIP_IV_LEN */; + icv_len = 4 /* TKIP_ICV_LEN */ + + 8 /*MICHAEL_MIC_LEN*/; + hdr->flag |= RX_FLAG_MMIC_STRIPPED; + break; + case WSM_RX_STATUS_AES: + iv_len = 8 /* CCMP_HDR_LEN */; + icv_len = 8 /* CCMP_MIC_LEN */; + break; + case WSM_RX_STATUS_WAPI: + iv_len = 18 /* WAPI_HDR_LEN */; + icv_len = 16 /* WAPI_MIC_LEN */; + break; + default: + pr_warn("Unknown encryption type %d\n", + WSM_RX_STATUS_ENCRYPTION(arg->flags)); + goto drop; + } + + /* Firmware strips ICV in case of MIC failure. */ + if (arg->status == WSM_STATUS_MICFAILURE) + icv_len = 0; + + if (skb->len < hdrlen + iv_len + icv_len) { + wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n"); + goto drop; + } + + /* Remove IV, ICV and MIC */ + skb_trim(skb, skb->len - icv_len); + memmove(skb->data + iv_len, skb->data, hdrlen); + skb_pull(skb, iv_len); + } + + /* Remove TSF from the end of frame */ + if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) { + memcpy(&hdr->mactime, skb->data + skb->len - 8, 8); + hdr->mactime = le64_to_cpu(hdr->mactime); + if (skb->len >= 8) + skb_trim(skb, skb->len - 8); + } + + cw1200_debug_rxed(priv); + if (arg->flags & WSM_RX_STATUS_AGGREGATE) + cw1200_debug_rxed_agg(priv); + + if (ieee80211_is_action(frame->frame_control) && + (arg->flags & WSM_RX_STATUS_ADDRESS1)) { + if (cw1200_handle_action_rx(priv, skb)) + return; + } else if (ieee80211_is_beacon(frame->frame_control) && + !arg->status && + !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid, + ETH_ALEN)) { + const u8 *tim_ie; + u8 *ies = ((struct ieee80211_mgmt *) + (skb->data))->u.beacon.variable; + size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); + + tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); + if (tim_ie) { + struct ieee80211_tim_ie *tim = + (struct ieee80211_tim_ie *)&tim_ie[2]; + + if (priv->join_dtim_period != tim->dtim_period) { + priv->join_dtim_period = tim->dtim_period; + queue_work(priv->workqueue, + &priv->set_beacon_wakeup_period_work); + } + } + + /* Disable beacon filter once we're associated... */ + if (priv->disable_beacon_filter && + (priv->vif->bss_conf.assoc || + priv->vif->bss_conf.ibss_joined)) { + priv->disable_beacon_filter = false; + queue_work(priv->workqueue, + &priv->update_filtering_work); + } + } + + /* Stay awake after frame is received to give + * userspace chance to react and acquire appropriate + * wakelock. */ + if (ieee80211_is_auth(frame->frame_control)) + grace_period = 5 * HZ; + else if (ieee80211_is_deauth(frame->frame_control)) + grace_period = 5 * HZ; + else + grace_period = 1 * HZ; + cw1200_pm_stay_awake(&priv->pm_state, grace_period); + + if (cw1200_itp_rxed(priv, skb)) { + consume_skb(skb); + } else if (early_data) { + spin_lock_bh(&priv->ps_state_lock); + /* Double-check status with lock held */ + if (entry->status == CW1200_LINK_SOFT) + skb_queue_tail(&entry->rx_queue, skb); + else + ieee80211_rx_irqsafe(priv->hw, skb); + spin_unlock_bh(&priv->ps_state_lock); + } else { + ieee80211_rx_irqsafe(priv->hw, skb); + } + *skb_p = NULL; + + return; + +drop: + /* TODO: update failure counters */ + return; +} + +/* ******************************************************************** */ +/* Security */ + +int cw1200_alloc_key(struct cw1200_common *priv) +{ + int idx; + + idx = ffs(~priv->key_map) - 1; + if (idx < 0 || idx > WSM_KEY_MAX_INDEX) + return -1; + + priv->key_map |= BIT(idx); + priv->keys[idx].index = idx; + return idx; +} + +void cw1200_free_key(struct cw1200_common *priv, int idx) +{ + BUG_ON(!(priv->key_map & BIT(idx))); + memset(&priv->keys[idx], 0, sizeof(priv->keys[idx])); + priv->key_map &= ~BIT(idx); +} + +void cw1200_free_keys(struct cw1200_common *priv) +{ + memset(&priv->keys, 0, sizeof(priv->keys)); + priv->key_map = 0; +} + +int cw1200_upload_keys(struct cw1200_common *priv) +{ + int idx, ret = 0; + for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx) + if (priv->key_map & BIT(idx)) { + ret = wsm_add_key(priv, &priv->keys[idx]); + if (ret < 0) + break; + } + return ret; +} + +/* Workaround for WFD test case 6.1.10 */ +void cw1200_link_id_reset(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, linkid_reset_work); + int temp_linkid; + + if (!priv->action_linkid) { + /* In GO mode we can receive ACTION frames without a linkID */ + temp_linkid = cw1200_alloc_link_id(priv, + &priv->action_frame_sa[0]); + WARN_ON(!temp_linkid); + if (temp_linkid) { + /* Make sure we execute the WQ */ + flush_workqueue(priv->workqueue); + /* Release the link ID */ + spin_lock_bh(&priv->ps_state_lock); + priv->link_id_db[temp_linkid - 1].prev_status = + priv->link_id_db[temp_linkid - 1].status; + priv->link_id_db[temp_linkid - 1].status = + CW1200_LINK_RESET; + spin_unlock_bh(&priv->ps_state_lock); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + } + } else { + spin_lock_bh(&priv->ps_state_lock); + priv->link_id_db[priv->action_linkid - 1].prev_status = + priv->link_id_db[priv->action_linkid - 1].status; + priv->link_id_db[priv->action_linkid - 1].status = + CW1200_LINK_RESET_REMAP; + spin_unlock_bh(&priv->ps_state_lock); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + flush_workqueue(priv->workqueue); + } +} + +int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac) +{ + int i, ret = 0; + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) && + priv->link_id_db[i].status) { + priv->link_id_db[i].timestamp = jiffies; + ret = i + 1; + break; + } + } + spin_unlock_bh(&priv->ps_state_lock); + return ret; +} + +int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac) +{ + int i, ret = 0; + unsigned long max_inactivity = 0; + unsigned long now = jiffies; + + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + if (!priv->link_id_db[i].status) { + ret = i + 1; + break; + } else if (priv->link_id_db[i].status != CW1200_LINK_HARD && + !priv->tx_queue_stats.link_map_cache[i + 1]) { + unsigned long inactivity = + now - priv->link_id_db[i].timestamp; + if (inactivity < max_inactivity) + continue; + max_inactivity = inactivity; + ret = i + 1; + } + } + if (ret) { + struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1]; + pr_debug("[AP] STA added, link_id: %d\n", ret); + entry->status = CW1200_LINK_RESERVE; + memcpy(&entry->mac, mac, ETH_ALEN); + memset(&entry->buffered, 0, CW1200_MAX_TID); + skb_queue_head_init(&entry->rx_queue); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) + wsm_unlock_tx(priv); + } else { + wiphy_info(priv->hw->wiphy, + "[AP] Early: no more link IDs available.\n"); + } + + spin_unlock_bh(&priv->ps_state_lock); + return ret; +} + +void cw1200_link_id_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, link_id_work); + wsm_flush_tx(priv); + cw1200_link_id_gc_work(&priv->link_id_gc_work.work); + wsm_unlock_tx(priv); +} + +void cw1200_link_id_gc_work(struct work_struct *work) +{ + struct cw1200_common *priv = + container_of(work, struct cw1200_common, link_id_gc_work.work); + struct wsm_reset reset = { + .reset_statistics = false, + }; + struct wsm_map_link map_link = { + .link_id = 0, + }; + unsigned long now = jiffies; + unsigned long next_gc = -1; + long ttl; + bool need_reset; + u32 mask; + int i; + + if (priv->join_status != CW1200_JOIN_STATUS_AP) + return; + + wsm_lock_tx(priv); + spin_lock_bh(&priv->ps_state_lock); + for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { + need_reset = false; + mask = BIT(i + 1); + if (priv->link_id_db[i].status == CW1200_LINK_RESERVE || + (priv->link_id_db[i].status == CW1200_LINK_HARD && + !(priv->link_id_map & mask))) { + if (priv->link_id_map & mask) { + priv->sta_asleep_mask &= ~mask; + priv->pspoll_mask &= ~mask; + need_reset = true; + } + priv->link_id_map |= mask; + if (priv->link_id_db[i].status != CW1200_LINK_HARD) + priv->link_id_db[i].status = CW1200_LINK_SOFT; + memcpy(map_link.mac_addr, priv->link_id_db[i].mac, + ETH_ALEN); + spin_unlock_bh(&priv->ps_state_lock); + if (need_reset) { + reset.link_id = i + 1; + wsm_reset(priv, &reset); + } + map_link.link_id = i + 1; + wsm_map_link(priv, &map_link); + next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT); + spin_lock_bh(&priv->ps_state_lock); + } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) { + ttl = priv->link_id_db[i].timestamp - now + + CW1200_LINK_ID_GC_TIMEOUT; + if (ttl <= 0) { + need_reset = true; + priv->link_id_db[i].status = CW1200_LINK_OFF; + priv->link_id_map &= ~mask; + priv->sta_asleep_mask &= ~mask; + priv->pspoll_mask &= ~mask; + memset(map_link.mac_addr, 0, ETH_ALEN); + spin_unlock_bh(&priv->ps_state_lock); + reset.link_id = i + 1; + wsm_reset(priv, &reset); + spin_lock_bh(&priv->ps_state_lock); + } else { + next_gc = min_t(unsigned long, next_gc, ttl); + } + } else if (priv->link_id_db[i].status == CW1200_LINK_RESET || + priv->link_id_db[i].status == + CW1200_LINK_RESET_REMAP) { + int status = priv->link_id_db[i].status; + priv->link_id_db[i].status = + priv->link_id_db[i].prev_status; + priv->link_id_db[i].timestamp = now; + reset.link_id = i + 1; + spin_unlock_bh(&priv->ps_state_lock); + wsm_reset(priv, &reset); + if (status == CW1200_LINK_RESET_REMAP) { + memcpy(map_link.mac_addr, + priv->link_id_db[i].mac, + ETH_ALEN); + map_link.link_id = i + 1; + wsm_map_link(priv, &map_link); + next_gc = min(next_gc, + CW1200_LINK_ID_GC_TIMEOUT); + } + spin_lock_bh(&priv->ps_state_lock); + } + if (need_reset) { + skb_queue_purge(&priv->link_id_db[i].rx_queue); + pr_debug("[AP] STA removed, link_id: %d\n", + reset.link_id); + } + } + spin_unlock_bh(&priv->ps_state_lock); + if (next_gc != -1) + queue_delayed_work(priv->workqueue, + &priv->link_id_gc_work, next_gc); + wsm_unlock_tx(priv); +} diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/cw1200/txrx.h new file mode 100644 index 000000000000..492a4e14213b --- /dev/null +++ b/drivers/net/wireless/cw1200/txrx.h @@ -0,0 +1,106 @@ +/* + * Datapath interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_TXRX_H +#define CW1200_TXRX_H + +#include <linux/list.h> + +/* extern */ struct ieee80211_hw; +/* extern */ struct sk_buff; +/* extern */ struct wsm_tx; +/* extern */ struct wsm_rx; +/* extern */ struct wsm_tx_confirm; +/* extern */ struct cw1200_txpriv; + +struct tx_policy { + union { + __le32 tbl[3]; + u8 raw[12]; + }; + u8 defined; + u8 usage_count; + u8 retry_count; + u8 uploaded; +}; + +struct tx_policy_cache_entry { + struct tx_policy policy; + struct list_head link; +}; + +#define TX_POLICY_CACHE_SIZE (8) +struct tx_policy_cache { + struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE]; + struct list_head used; + struct list_head free; + spinlock_t lock; /* Protect policy cache */ +}; + +/* ******************************************************************** */ +/* TX policy cache */ +/* Intention of TX policy cache is an overcomplicated WSM API. + * Device does not accept per-PDU tx retry sequence. + * It uses "tx retry policy id" instead, so driver code has to sync + * linux tx retry sequences with a retry policy table in the device. + */ +void tx_policy_init(struct cw1200_common *priv); +void tx_policy_upload_work(struct work_struct *work); +void tx_policy_clean(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* TX implementation */ + +u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, + u32 rates); +void cw1200_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb); +void cw1200_skb_dtor(struct cw1200_common *priv, + struct sk_buff *skb, + const struct cw1200_txpriv *txpriv); + +/* ******************************************************************** */ +/* WSM callbacks */ + +void cw1200_tx_confirm_cb(struct cw1200_common *priv, + int link_id, + struct wsm_tx_confirm *arg); +void cw1200_rx_cb(struct cw1200_common *priv, + struct wsm_rx *arg, + int link_id, + struct sk_buff **skb_p); + +/* ******************************************************************** */ +/* Timeout */ + +void cw1200_tx_timeout(struct work_struct *work); + +/* ******************************************************************** */ +/* Security */ +int cw1200_alloc_key(struct cw1200_common *priv); +void cw1200_free_key(struct cw1200_common *priv, int idx); +void cw1200_free_keys(struct cw1200_common *priv); +int cw1200_upload_keys(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* Workaround for WFD test case 6.1.10 */ +void cw1200_link_id_reset(struct work_struct *work); + +#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ)) + +int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac); +int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac); +void cw1200_link_id_work(struct work_struct *work); +void cw1200_link_id_gc_work(struct work_struct *work); + + +#endif /* CW1200_TXRX_H */ diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c new file mode 100644 index 000000000000..f3fd9b218724 --- /dev/null +++ b/drivers/net/wireless/cw1200/wsm.c @@ -0,0 +1,1883 @@ +/* + * WSM host interface (HI) implementation for + * ST-Ericsson CW1200 mac80211 drivers. + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/skbuff.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/random.h> + +#include "cw1200.h" +#include "wsm.h" +#include "bh.h" +#include "sta.h" +#include "debug.h" +#include "itp.h" + +#define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */ +#define WSM_CMD_START_TIMEOUT (7 * HZ) +#define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */ +#define WSM_CMD_MAX_TIMEOUT (3 * HZ) + +#define WSM_SKIP(buf, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + goto underflow; \ + (buf)->data += size; \ + } while (0) + +#define WSM_GET(buf, ptr, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + goto underflow; \ + memcpy(ptr, (buf)->data, size); \ + (buf)->data += size; \ + } while (0) + +#define __WSM_GET(buf, type, cvt) \ + ({ \ + type val; \ + if ((buf)->data + sizeof(type) > (buf)->end) \ + goto underflow; \ + val = cvt(*(type *)(buf)->data); \ + (buf)->data += sizeof(type); \ + val; \ + }) + +#define WSM_GET8(buf) __WSM_GET(buf, u8, (u8)) +#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16_to_cpu) +#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32_to_cpu) + +#define WSM_PUT(buf, ptr, size) \ + do { \ + if ((buf)->data + size > (buf)->end) \ + if (wsm_buf_reserve((buf), size)) \ + goto nomem; \ + memcpy((buf)->data, ptr, size); \ + (buf)->data += size; \ + } while (0) + +#define __WSM_PUT(buf, val, type, cvt) \ + do { \ + if ((buf)->data + sizeof(type) > (buf)->end) \ + if (wsm_buf_reserve((buf), sizeof(type))) \ + goto nomem; \ + *(type *)(buf)->data = cvt(val); \ + (buf)->data += sizeof(type); \ + } while (0) + +#define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, (u8)) +#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __cpu_to_le16) +#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __cpu_to_le32) + +static void wsm_buf_reset(struct wsm_buf *buf); +static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size); + +static int wsm_cmd_send(struct cw1200_common *priv, + struct wsm_buf *buf, + void *arg, u16 cmd, long tmo); + +#define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux)) +#define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux)) + +/* ******************************************************************** */ +/* WSM API implementation */ + +static int wsm_generic_confirm(struct cw1200_common *priv, + void *arg, + struct wsm_buf *buf) +{ + u32 status = WSM_GET32(buf); + if (status != WSM_STATUS_SUCCESS) + return -EINVAL; + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime); + WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime); + WSM_PUT32(buf, arg->dot11RtsThreshold); + + /* DPD block. */ + WSM_PUT16(buf, arg->dpdData_size + 12); + WSM_PUT16(buf, 1); /* DPD version */ + WSM_PUT(buf, arg->dot11StationId, ETH_ALEN); + WSM_PUT16(buf, 5); /* DPD flags */ + WSM_PUT(buf, arg->dpdData, arg->dpdData_size); + + ret = wsm_cmd_send(priv, buf, arg, + WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_configuration_confirm(struct cw1200_common *priv, + struct wsm_configuration *arg, + struct wsm_buf *buf) +{ + int i; + int status; + + status = WSM_GET32(buf); + if (WARN_ON(status != WSM_STATUS_SUCCESS)) + return -EINVAL; + + WSM_GET(buf, arg->dot11StationId, ETH_ALEN); + arg->dot11FrequencyBandsSupported = WSM_GET8(buf); + WSM_SKIP(buf, 1); + arg->supportedRateMask = WSM_GET32(buf); + for (i = 0; i < 2; ++i) { + arg->txPowerRange[i].min_power_level = WSM_GET32(buf); + arg->txPowerRange[i].max_power_level = WSM_GET32(buf); + arg->txPowerRange[i].stepping = WSM_GET32(buf); + } + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id); + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->reset_statistics ? 0 : 1); + ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +struct wsm_mib { + u16 mib_id; + void *buf; + size_t buf_size; +}; + +int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, + size_t buf_size) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_mib mib_buf = { + .mib_id = mib_id, + .buf = _buf, + .buf_size = buf_size, + }; + wsm_cmd_lock(priv); + + WSM_PUT16(buf, mib_id); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, &mib_buf, + WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_read_mib_confirm(struct cw1200_common *priv, + struct wsm_mib *arg, + struct wsm_buf *buf) +{ + u16 size; + if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS)) + return -EINVAL; + + if (WARN_ON(WSM_GET16(buf) != arg->mib_id)) + return -EINVAL; + + size = WSM_GET16(buf); + if (size > arg->buf_size) + size = arg->buf_size; + + WSM_GET(buf, arg->buf, size); + arg->buf_size = size; + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, + size_t buf_size) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_mib mib_buf = { + .mib_id = mib_id, + .buf = _buf, + .buf_size = buf_size, + }; + + wsm_cmd_lock(priv); + + WSM_PUT16(buf, mib_id); + WSM_PUT16(buf, buf_size); + WSM_PUT(buf, _buf, buf_size); + + ret = wsm_cmd_send(priv, buf, &mib_buf, + WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +static int wsm_write_mib_confirm(struct cw1200_common *priv, + struct wsm_mib *arg, + struct wsm_buf *buf) +{ + int ret; + + ret = wsm_generic_confirm(priv, arg, buf); + if (ret) + return ret; + + if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) { + /* OperationalMode: update PM status. */ + const char *p = arg->buf; + cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false); + } + return 0; +} + +/* ******************************************************************** */ + +int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg) +{ + int i; + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + if (arg->num_channels > 48) + return -EINVAL; + + if (arg->num_ssids > 2) + return -EINVAL; + + if (arg->band > 1) + return -EINVAL; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->band); + WSM_PUT8(buf, arg->type); + WSM_PUT8(buf, arg->flags); + WSM_PUT8(buf, arg->max_tx_rate); + WSM_PUT32(buf, arg->auto_scan_interval); + WSM_PUT8(buf, arg->num_probes); + WSM_PUT8(buf, arg->num_channels); + WSM_PUT8(buf, arg->num_ssids); + WSM_PUT8(buf, arg->probe_delay); + + for (i = 0; i < arg->num_channels; ++i) { + WSM_PUT16(buf, arg->ch[i].number); + WSM_PUT16(buf, 0); + WSM_PUT32(buf, arg->ch[i].min_chan_time); + WSM_PUT32(buf, arg->ch[i].max_chan_time); + WSM_PUT32(buf, 0); + } + + for (i = 0; i < arg->num_ssids; ++i) { + WSM_PUT32(buf, arg->ssids[i].length); + WSM_PUT(buf, &arg->ssids[i].ssid[0], + sizeof(arg->ssids[i].ssid)); + } + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_stop_scan(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, + WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + + +static int wsm_tx_confirm(struct cw1200_common *priv, + struct wsm_buf *buf, + int link_id) +{ + struct wsm_tx_confirm tx_confirm; + + tx_confirm.packet_id = WSM_GET32(buf); + tx_confirm.status = WSM_GET32(buf); + tx_confirm.tx_rate = WSM_GET8(buf); + tx_confirm.ack_failures = WSM_GET8(buf); + tx_confirm.flags = WSM_GET16(buf); + tx_confirm.media_delay = WSM_GET32(buf); + tx_confirm.tx_queue_delay = WSM_GET32(buf); + + cw1200_tx_confirm_cb(priv, link_id, &tx_confirm); + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_multi_tx_confirm(struct cw1200_common *priv, + struct wsm_buf *buf, int link_id) +{ + int ret; + int count; + int i; + + count = WSM_GET32(buf); + if (WARN_ON(count <= 0)) + return -EINVAL; + + if (count > 1) { + /* We already released one buffer, now for the rest */ + ret = wsm_release_tx_buffer(priv, count - 1); + if (ret < 0) + return ret; + else if (ret > 0) + cw1200_bh_wakeup(priv); + } + + cw1200_debug_txed_multi(priv, count); + for (i = 0; i < count; ++i) { + ret = wsm_tx_confirm(priv, buf, link_id); + if (ret) + return ret; + } + return ret; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +/* ******************************************************************** */ + +static int wsm_join_confirm(struct cw1200_common *priv, + struct wsm_join_cnf *arg, + struct wsm_buf *buf) +{ + arg->status = WSM_GET32(buf); + if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS) + return -EINVAL; + + arg->min_power_level = WSM_GET32(buf); + arg->max_power_level = WSM_GET32(buf); + + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +int wsm_join(struct cw1200_common *priv, struct wsm_join *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + struct wsm_join_cnf resp; + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->band); + WSM_PUT16(buf, arg->channel_number); + WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid)); + WSM_PUT16(buf, arg->atim_window); + WSM_PUT8(buf, arg->preamble_type); + WSM_PUT8(buf, arg->probe_for_join); + WSM_PUT8(buf, arg->dtim_period); + WSM_PUT8(buf, arg->flags); + WSM_PUT32(buf, arg->ssid_len); + WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid)); + WSM_PUT32(buf, arg->beacon_interval); + WSM_PUT32(buf, arg->basic_rate_set); + + priv->tx_burst_idx = -1; + ret = wsm_cmd_send(priv, buf, &resp, + WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT); + /* TODO: Update state based on resp.min|max_power_level */ + + priv->join_complete_status = resp.status; + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_bss_params(struct cw1200_common *priv, + const struct wsm_set_bss_params *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, (arg->reset_beacon_loss ? 0x1 : 0)); + WSM_PUT8(buf, arg->beacon_lost_count); + WSM_PUT16(buf, arg->aid); + WSM_PUT32(buf, arg->operational_rate_set); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT(buf, arg, sizeof(*arg)); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->index); + WSM_PUT8(buf, 0); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_tx_queue_params(struct cw1200_common *priv, + const struct wsm_set_tx_queue_params *arg, u8 id) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1}; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, queue_id_to_wmm_aci[id]); + WSM_PUT8(buf, 0); + WSM_PUT8(buf, arg->ackPolicy); + WSM_PUT8(buf, 0); + WSM_PUT32(buf, arg->maxTransmitLifetime); + WSM_PUT16(buf, arg->allowedMediumTime); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_edca_params(struct cw1200_common *priv, + const struct wsm_edca_params *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + /* Implemented according to specification. */ + + WSM_PUT16(buf, arg->params[3].cwmin); + WSM_PUT16(buf, arg->params[2].cwmin); + WSM_PUT16(buf, arg->params[1].cwmin); + WSM_PUT16(buf, arg->params[0].cwmin); + + WSM_PUT16(buf, arg->params[3].cwmax); + WSM_PUT16(buf, arg->params[2].cwmax); + WSM_PUT16(buf, arg->params[1].cwmax); + WSM_PUT16(buf, arg->params[0].cwmax); + + WSM_PUT8(buf, arg->params[3].aifns); + WSM_PUT8(buf, arg->params[2].aifns); + WSM_PUT8(buf, arg->params[1].aifns); + WSM_PUT8(buf, arg->params[0].aifns); + + WSM_PUT16(buf, arg->params[3].txop_limit); + WSM_PUT16(buf, arg->params[2].txop_limit); + WSM_PUT16(buf, arg->params[1].txop_limit); + WSM_PUT16(buf, arg->params[0].txop_limit); + + WSM_PUT32(buf, arg->params[3].max_rx_lifetime); + WSM_PUT32(buf, arg->params[2].max_rx_lifetime); + WSM_PUT32(buf, arg->params[1].max_rx_lifetime); + WSM_PUT32(buf, arg->params[0].max_rx_lifetime); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_switch_channel(struct cw1200_common *priv, + const struct wsm_switch_channel *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->switch_count); + WSM_PUT16(buf, arg->channel_number); + + priv->channel_switch_in_progress = 1; + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT); + if (ret) + priv->channel_switch_in_progress = 0; + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + priv->ps_mode_switch_in_progress = 1; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->fast_psm_idle_period); + WSM_PUT8(buf, arg->ap_psm_change_period); + WSM_PUT8(buf, arg->min_auto_pspoll_period); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT8(buf, arg->mode); + WSM_PUT8(buf, arg->band); + WSM_PUT16(buf, arg->channel_number); + WSM_PUT32(buf, arg->ct_window); + WSM_PUT32(buf, arg->beacon_interval); + WSM_PUT8(buf, arg->dtim_period); + WSM_PUT8(buf, arg->preamble); + WSM_PUT8(buf, arg->probe_delay); + WSM_PUT8(buf, arg->ssid_len); + WSM_PUT(buf, arg->ssid, sizeof(arg->ssid)); + WSM_PUT32(buf, arg->basic_rate_set); + + priv->tx_burst_idx = -1; + ret = wsm_cmd_send(priv, buf, NULL, + WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_beacon_transmit(struct cw1200_common *priv, + const struct wsm_beacon_transmit *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0); + + ret = wsm_cmd_send(priv, buf, NULL, + WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_start_find(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + +/* ******************************************************************** */ + +int wsm_stop_find(struct cw1200_common *priv) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT); + wsm_cmd_unlock(priv); + return ret; +} + +/* ******************************************************************** */ + +int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id); + + wsm_cmd_lock(priv); + + WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr)); + WSM_PUT16(buf, 0); + + ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ + +int wsm_update_ie(struct cw1200_common *priv, + const struct wsm_update_ie *arg) +{ + int ret; + struct wsm_buf *buf = &priv->wsm_cmd_buf; + + wsm_cmd_lock(priv); + + WSM_PUT16(buf, arg->what); + WSM_PUT16(buf, arg->count); + WSM_PUT(buf, arg->ies, arg->length); + + ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} + +/* ******************************************************************** */ +int wsm_set_probe_responder(struct cw1200_common *priv, bool enable) +{ + priv->rx_filter.probeResponder = enable; + return wsm_set_rx_filter(priv, &priv->rx_filter); +} + +/* ******************************************************************** */ +/* WSM indication events implementation */ +const char * const cw1200_fw_types[] = { + "ETF", + "WFM", + "WSM", + "HI test", + "Platform test" +}; + +static int wsm_startup_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + priv->wsm_caps.input_buffers = WSM_GET16(buf); + priv->wsm_caps.input_buffer_size = WSM_GET16(buf); + priv->wsm_caps.hw_id = WSM_GET16(buf); + priv->wsm_caps.hw_subid = WSM_GET16(buf); + priv->wsm_caps.status = WSM_GET16(buf); + priv->wsm_caps.fw_cap = WSM_GET16(buf); + priv->wsm_caps.fw_type = WSM_GET16(buf); + priv->wsm_caps.fw_api = WSM_GET16(buf); + priv->wsm_caps.fw_build = WSM_GET16(buf); + priv->wsm_caps.fw_ver = WSM_GET16(buf); + WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label)); + priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */ + + if (WARN_ON(priv->wsm_caps.status)) + return -EINVAL; + + if (WARN_ON(priv->wsm_caps.fw_type > 4)) + return -EINVAL; + + pr_info("CW1200 WSM init done.\n" + " Input buffers: %d x %d bytes\n" + " Hardware: %d.%d\n" + " %s firmware [%s], ver: %d, build: %d," + " api: %d, cap: 0x%.4X\n", + priv->wsm_caps.input_buffers, + priv->wsm_caps.input_buffer_size, + priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid, + cw1200_fw_types[priv->wsm_caps.fw_type], + priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver, + priv->wsm_caps.fw_build, + priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap); + + /* Disable unsupported frequency bands */ + if (!(priv->wsm_caps.fw_cap & 0x1)) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; + if (!(priv->wsm_caps.fw_cap & 0x2)) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + + priv->firmware_ready = 1; + wake_up(&priv->wsm_startup_done); + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_receive_indication(struct cw1200_common *priv, + int link_id, + struct wsm_buf *buf, + struct sk_buff **skb_p) +{ + struct wsm_rx rx; + struct ieee80211_hdr *hdr; + size_t hdr_len; + __le16 fctl; + + rx.status = WSM_GET32(buf); + rx.channel_number = WSM_GET16(buf); + rx.rx_rate = WSM_GET8(buf); + rx.rcpi_rssi = WSM_GET8(buf); + rx.flags = WSM_GET32(buf); + + /* FW Workaround: Drop probe resp or + beacon when RSSI is 0 + */ + hdr = (struct ieee80211_hdr *)(*skb_p)->data; + + if (!rx.rcpi_rssi && + (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control))) + return 0; + + /* If no RSSI subscription has been made, + * convert RCPI to RSSI here + */ + if (!priv->cqm_use_rssi) + rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110; + + fctl = *(__le16 *)buf->data; + hdr_len = buf->data - buf->begin; + skb_pull(*skb_p, hdr_len); + if (!rx.status && ieee80211_is_deauth(fctl)) { + if (priv->join_status == CW1200_JOIN_STATUS_STA) { + /* Shedule unjoin work */ + pr_debug("[WSM] Issue unjoin command (RX).\n"); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } + } + cw1200_rx_cb(priv, &rx, link_id, skb_p); + if (*skb_p) + skb_push(*skb_p, hdr_len); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf) +{ + int first; + struct cw1200_wsm_event *event; + + if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { + /* STA is stopped. */ + return 0; + } + + event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL); + + event->evt.id = __le32_to_cpu(WSM_GET32(buf)); + event->evt.data = __le32_to_cpu(WSM_GET32(buf)); + + pr_debug("[WSM] Event: %d(%d)\n", + event->evt.id, event->evt.data); + + spin_lock(&priv->event_queue_lock); + first = list_empty(&priv->event_queue); + list_add_tail(&event->link, &priv->event_queue); + spin_unlock(&priv->event_queue_lock); + + if (first) + queue_work(priv->workqueue, &priv->event_handler); + + return 0; + +underflow: + kfree(event); + return -EINVAL; +} + +static int wsm_channel_switch_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + WARN_ON(WSM_GET32(buf)); + + priv->channel_switch_in_progress = 0; + wake_up(&priv->channel_switch_done); + + wsm_unlock_tx(priv); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_set_pm_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + /* TODO: Check buf (struct wsm_set_pm_complete) for validity */ + if (priv->ps_mode_switch_in_progress) { + priv->ps_mode_switch_in_progress = 0; + wake_up(&priv->ps_mode_switch_done); + } + return 0; +} + +static int wsm_scan_started(struct cw1200_common *priv, void *arg, + struct wsm_buf *buf) +{ + u32 status = WSM_GET32(buf); + if (status != WSM_STATUS_SUCCESS) { + cw1200_scan_failed_cb(priv); + return -EINVAL; + } + return 0; + +underflow: + WARN_ON(1); + return -EINVAL; +} + +static int wsm_scan_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + struct wsm_scan_complete arg; + arg.status = WSM_GET32(buf); + arg.psm = WSM_GET8(buf); + arg.num_channels = WSM_GET8(buf); + cw1200_scan_complete_cb(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_join_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + struct wsm_join_complete arg; + arg.status = WSM_GET32(buf); + pr_debug("[WSM] Join complete indication, status: %d\n", arg.status); + cw1200_join_complete_cb(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_find_complete_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + pr_warn("Implement find_complete_indication\n"); + return 0; +} + +static int wsm_ba_timeout_indication(struct cw1200_common *priv, + struct wsm_buf *buf) +{ + u32 dummy; + u8 tid; + u8 dummy2; + u8 addr[ETH_ALEN]; + + dummy = WSM_GET32(buf); + tid = WSM_GET8(buf); + dummy2 = WSM_GET8(buf); + WSM_GET(buf, addr, ETH_ALEN); + + pr_info("BlockACK timeout, tid %d, addr %pM\n", + tid, addr); + + return 0; + +underflow: + return -EINVAL; +} + +static int wsm_suspend_resume_indication(struct cw1200_common *priv, + int link_id, struct wsm_buf *buf) +{ + u32 flags; + struct wsm_suspend_resume arg; + + flags = WSM_GET32(buf); + arg.link_id = link_id; + arg.stop = !(flags & 1); + arg.multicast = !!(flags & 8); + arg.queue = (flags >> 1) & 3; + + cw1200_suspend_resume(priv, &arg); + + return 0; + +underflow: + return -EINVAL; +} + + +/* ******************************************************************** */ +/* WSM TX */ + +static int wsm_cmd_send(struct cw1200_common *priv, + struct wsm_buf *buf, + void *arg, u16 cmd, long tmo) +{ + size_t buf_len = buf->data - buf->begin; + int ret; + + /* Don't bother if we're dead. */ + if (priv->bh_error) { + ret = 0; + goto done; + } + + /* Block until the cmd buffer is completed. Tortuous. */ + spin_lock(&priv->wsm_cmd.lock); + while (!priv->wsm_cmd.done) { + spin_unlock(&priv->wsm_cmd.lock); + spin_lock(&priv->wsm_cmd.lock); + } + priv->wsm_cmd.done = 0; + spin_unlock(&priv->wsm_cmd.lock); + + if (cmd == WSM_WRITE_MIB_REQ_ID || + cmd == WSM_READ_MIB_REQ_ID) + pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n", + cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]), + buf_len); + else + pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len); + + /* + * Due to buggy SPI on CW1200, we need to + * pad the message by a few bytes to ensure + * that it's completely received. + */ +#ifdef CONFIG_CW1200_ETF + if (!etf_mode) +#endif + buf_len += 4; + + /* Fill HI message header */ + /* BH will add sequence number */ + ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len); + ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd); + + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(priv->wsm_cmd.ptr); + priv->wsm_cmd.ptr = buf->begin; + priv->wsm_cmd.len = buf_len; + priv->wsm_cmd.arg = arg; + priv->wsm_cmd.cmd = cmd; + spin_unlock(&priv->wsm_cmd.lock); + + cw1200_bh_wakeup(priv); + + /* Wait for command completion */ + ret = wait_event_timeout(priv->wsm_cmd_wq, + priv->wsm_cmd.done, tmo); + + if (!ret && !priv->wsm_cmd.done) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + priv->wsm_cmd.ptr = NULL; + spin_unlock(&priv->wsm_cmd.lock); + if (priv->bh_error) { + /* Return ok to help system cleanup */ + ret = 0; + } else { + pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd); + print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE, + buf->begin, buf_len); + pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used); + + /* Kill BH thread to report the error to the top layer. */ + atomic_add(1, &priv->bh_term); + wake_up(&priv->bh_wq); + ret = -ETIMEDOUT; + } + } else { + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(!priv->wsm_cmd.done); + ret = priv->wsm_cmd.ret; + spin_unlock(&priv->wsm_cmd.lock); + } +done: + wsm_buf_reset(buf); + return ret; +} + +#ifdef CONFIG_CW1200_ETF +int wsm_raw_cmd(struct cw1200_common *priv, u8 *data, size_t len) +{ + struct wsm_buf *buf = &priv->wsm_cmd_buf; + int ret; + + u16 *cmd = (u16 *)(data + 2); + + wsm_cmd_lock(priv); + + WSM_PUT(buf, data + 4, len - 4); /* Skip over header (u16+u16) */ + + ret = wsm_cmd_send(priv, buf, NULL, __le16_to_cpu(*cmd), WSM_CMD_TIMEOUT); + + wsm_cmd_unlock(priv); + return ret; + +nomem: + wsm_cmd_unlock(priv); + return -ENOMEM; +} +#endif /* CONFIG_CW1200_ETF */ + +/* ******************************************************************** */ +/* WSM TX port control */ + +void wsm_lock_tx(struct cw1200_common *priv) +{ + wsm_cmd_lock(priv); + if (atomic_add_return(1, &priv->tx_lock) == 1) { + if (wsm_flush_tx(priv)) + pr_debug("[WSM] TX is locked.\n"); + } + wsm_cmd_unlock(priv); +} + +void wsm_lock_tx_async(struct cw1200_common *priv) +{ + if (atomic_add_return(1, &priv->tx_lock) == 1) + pr_debug("[WSM] TX is locked (async).\n"); +} + +bool wsm_flush_tx(struct cw1200_common *priv) +{ + unsigned long timestamp = jiffies; + bool pending = false; + long timeout; + int i; + + /* Flush must be called with TX lock held. */ + BUG_ON(!atomic_read(&priv->tx_lock)); + + /* First check if we really need to do something. + * It is safe to use unprotected access, as hw_bufs_used + * can only decrements. + */ + if (!priv->hw_bufs_used) + return true; + + if (priv->bh_error) { + /* In case of failure do not wait for magic. */ + pr_err("[WSM] Fatal error occured, will not flush TX.\n"); + return false; + } else { + /* Get a timestamp of "oldest" frame */ + for (i = 0; i < 4; ++i) + pending |= cw1200_queue_get_xmit_timestamp( + &priv->tx_queue[i], + ×tamp, 0xffffffff); + /* If there's nothing pending, we're good */ + if (!pending) + return true; + + timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies; + if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq, + !priv->hw_bufs_used, + timeout) <= 0) { + /* Hmmm... Not good. Frame had stuck in firmware. */ + priv->bh_error = 1; + wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used); + wake_up(&priv->bh_wq); + return false; + } + + /* Ok, everything is flushed. */ + return true; + } +} + +void wsm_unlock_tx(struct cw1200_common *priv) +{ + int tx_lock; + tx_lock = atomic_sub_return(1, &priv->tx_lock); + BUG_ON(tx_lock < 0); + + if (tx_lock == 0) { + if (!priv->bh_error) + cw1200_bh_wakeup(priv); + pr_debug("[WSM] TX is unlocked.\n"); + } +} + +/* ******************************************************************** */ +/* WSM RX */ + +int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len) +{ + struct wsm_buf buf; + u32 reason; + u32 reg[18]; + char fname[48]; + unsigned int i; + + static const char * const reason_str[] = { + "undefined instruction", + "prefetch abort", + "data abort", + "unknown error", + }; + + buf.begin = buf.data = data; + buf.end = &buf.begin[len]; + + reason = WSM_GET32(&buf); + for (i = 0; i < ARRAY_SIZE(reg); ++i) + reg[i] = WSM_GET32(&buf); + WSM_GET(&buf, fname, sizeof(fname)); + + if (reason < 4) + wiphy_err(priv->hw->wiphy, + "Firmware exception: %s.\n", + reason_str[reason]); + else + wiphy_err(priv->hw->wiphy, + "Firmware assert at %.*s, line %d\n", + (int) sizeof(fname), fname, reg[1]); + + for (i = 0; i < 12; i += 4) + wiphy_err(priv->hw->wiphy, + "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n", + i + 0, reg[i + 0], i + 1, reg[i + 1], + i + 2, reg[i + 2], i + 3, reg[i + 3]); + wiphy_err(priv->hw->wiphy, + "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n", + reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]); + i += 4; + wiphy_err(priv->hw->wiphy, + "CPSR: 0x%.8X, SPSR: 0x%.8X\n", + reg[i + 0], reg[i + 1]); + + print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE, + fname, sizeof(fname)); + return 0; + +underflow: + wiphy_err(priv->hw->wiphy, "Firmware exception.\n"); + print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE, + data, len); + return -EINVAL; +} + +int wsm_handle_rx(struct cw1200_common *priv, u16 id, + struct wsm_hdr *wsm, struct sk_buff **skb_p) +{ + int ret = 0; + struct wsm_buf wsm_buf; + int link_id = (id >> 6) & 0x0F; + + /* Strip link id. */ + id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); + + wsm_buf.begin = (u8 *)&wsm[0]; + wsm_buf.data = (u8 *)&wsm[1]; + wsm_buf.end = &wsm_buf.begin[__le32_to_cpu(wsm->len)]; + + pr_debug("[WSM] <<< 0x%.4X (%td)\n", id, + wsm_buf.end - wsm_buf.begin); + +#ifdef CONFIG_CW1200_ETF + if (etf_mode) { + struct sk_buff *skb = alloc_skb(wsm_buf.end - wsm_buf.begin, GFP_KERNEL); + + /* Strip out Sequence num before passing up */ + wsm->id = __le16_to_cpu(wsm->id); + wsm->id &= 0x0FFF; + wsm->id = __cpu_to_le16(wsm->id); + + memcpy(skb_put(skb, wsm_buf.end - wsm_buf.begin), + wsm_buf.begin, + wsm_buf.end - wsm_buf.begin); + skb_queue_tail(&priv->etf_q, skb); + + /* Special case for startup */ + if (id == WSM_STARTUP_IND_ID) { + wsm_startup_indication(priv, &wsm_buf); + } else if (id & 0x0400) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.done = 1; + spin_unlock(&priv->wsm_cmd.lock); + wake_up(&priv->wsm_cmd_wq); + } + + goto out; + } +#endif + + if (id == WSM_TX_CONFIRM_IND_ID) { + ret = wsm_tx_confirm(priv, &wsm_buf, link_id); + } else if (id == WSM_MULTI_TX_CONFIRM_ID) { + ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id); + } else if (id & 0x0400) { + void *wsm_arg; + u16 wsm_cmd; + + /* Do not trust FW too much. Protection against repeated + * response and race condition removal (see above). + */ + spin_lock(&priv->wsm_cmd.lock); + wsm_arg = priv->wsm_cmd.arg; + wsm_cmd = priv->wsm_cmd.cmd & + ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); + priv->wsm_cmd.cmd = 0xFFFF; + spin_unlock(&priv->wsm_cmd.lock); + + if (WARN_ON((id & ~0x0400) != wsm_cmd)) { + /* Note that any non-zero is a fatal retcode. */ + ret = -EINVAL; + goto out; + } + + /* Note that wsm_arg can be NULL in case of timeout in + * wsm_cmd_send(). + */ + + switch (id) { + case WSM_READ_MIB_RESP_ID: + if (wsm_arg) + ret = wsm_read_mib_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_WRITE_MIB_RESP_ID: + if (wsm_arg) + ret = wsm_write_mib_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_START_SCAN_RESP_ID: + if (wsm_arg) + ret = wsm_scan_started(priv, wsm_arg, &wsm_buf); + break; + case WSM_CONFIGURATION_RESP_ID: + if (wsm_arg) + ret = wsm_configuration_confirm(priv, wsm_arg, + &wsm_buf); + break; + case WSM_JOIN_RESP_ID: + if (wsm_arg) + ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf); + break; + case WSM_STOP_SCAN_RESP_ID: + case WSM_RESET_RESP_ID: + case WSM_ADD_KEY_RESP_ID: + case WSM_REMOVE_KEY_RESP_ID: + case WSM_SET_PM_RESP_ID: + case WSM_SET_BSS_PARAMS_RESP_ID: + case 0x0412: /* set_tx_queue_params */ + case WSM_EDCA_PARAMS_RESP_ID: + case WSM_SWITCH_CHANNEL_RESP_ID: + case WSM_START_RESP_ID: + case WSM_BEACON_TRANSMIT_RESP_ID: + case 0x0419: /* start_find */ + case 0x041A: /* stop_find */ + case 0x041B: /* update_ie */ + case 0x041C: /* map_link */ + WARN_ON(wsm_arg != NULL); + ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf); + if (ret) { + wiphy_warn(priv->hw->wiphy, + "wsm_generic_confirm failed for request 0x%04x.\n", + id & ~0x0400); + + /* often 0x407 and 0x410 occur, this means we're dead.. */ + if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) { + wsm_lock_tx(priv); + if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } + } + break; + default: + wiphy_warn(priv->hw->wiphy, + "Unrecognized confirmation 0x%04x\n", + id & ~0x0400); + } + + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.ret = ret; + priv->wsm_cmd.done = 1; + spin_unlock(&priv->wsm_cmd.lock); + + ret = 0; /* Error response from device should ne stop BH. */ + + wake_up(&priv->wsm_cmd_wq); + } else if (id & 0x0800) { + switch (id) { + case WSM_STARTUP_IND_ID: + ret = wsm_startup_indication(priv, &wsm_buf); + break; + case WSM_RECEIVE_IND_ID: + ret = wsm_receive_indication(priv, link_id, + &wsm_buf, skb_p); + break; + case 0x0805: + ret = wsm_event_indication(priv, &wsm_buf); + break; + case WSM_SCAN_COMPLETE_IND_ID: + ret = wsm_scan_complete_indication(priv, &wsm_buf); + break; + case 0x0808: + ret = wsm_ba_timeout_indication(priv, &wsm_buf); + break; + case 0x0809: + ret = wsm_set_pm_indication(priv, &wsm_buf); + break; + case 0x080A: + ret = wsm_channel_switch_indication(priv, &wsm_buf); + break; + case 0x080B: + ret = wsm_find_complete_indication(priv, &wsm_buf); + break; + case 0x080C: + ret = wsm_suspend_resume_indication(priv, + link_id, &wsm_buf); + break; + case 0x080F: + ret = wsm_join_complete_indication(priv, &wsm_buf); + break; + default: + pr_warn("Unrecognised WSM ID %04x\n", id); + } + } else { + WARN_ON(1); + ret = -EINVAL; + } +out: + return ret; +} + +static bool wsm_handle_tx_data(struct cw1200_common *priv, + struct wsm_tx *wsm, + const struct ieee80211_tx_info *tx_info, + const struct cw1200_txpriv *txpriv, + struct cw1200_queue *queue) +{ + bool handled = false; + const struct ieee80211_hdr *frame = + (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset]; + __le16 fctl = frame->frame_control; + enum { + do_probe, + do_drop, + do_wep, + do_tx, + } action = do_tx; + + switch (priv->mode) { + case NL80211_IFTYPE_STATION: + if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) + action = do_tx; + else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) + action = do_drop; + break; + case NL80211_IFTYPE_AP: + if (!priv->join_status) { + action = do_drop; + } else if (!(BIT(txpriv->raw_link_id) & + (BIT(0) | priv->link_id_map))) { + wiphy_warn(priv->hw->wiphy, + "A frame with expired link id is dropped.\n"); + action = do_drop; + } + if (cw1200_queue_get_generation(wsm->packet_id) > + CW1200_MAX_REQUEUE_ATTEMPTS) { + /* HACK!!! WSM324 firmware has tendency to requeue + * multicast frames in a loop, causing performance + * drop and high power consumption of the driver. + * In this situation it is better just to drop + * the problematic frame. + */ + wiphy_warn(priv->hw->wiphy, + "Too many attempts to requeue a frame; dropped.\n"); + action = do_drop; + } + break; + case NL80211_IFTYPE_ADHOC: + if (priv->join_status != CW1200_JOIN_STATUS_IBSS) + action = do_drop; + break; + case NL80211_IFTYPE_MESH_POINT: + action = do_tx; /* TODO: Test me! */ + break; + case NL80211_IFTYPE_MONITOR: + default: + action = do_drop; + break; + } + + if (action == do_tx) { + if (ieee80211_is_nullfunc(fctl)) { + spin_lock(&priv->bss_loss_lock); + if (priv->bss_loss_state) { + priv->bss_loss_confirm_id = wsm->packet_id; + wsm->queue_id = WSM_QUEUE_VOICE; + } + spin_unlock(&priv->bss_loss_lock); + } else if (ieee80211_is_probe_req(fctl)) { + action = do_probe; + } else if (ieee80211_is_deauth(fctl) && + priv->mode != NL80211_IFTYPE_AP) { + pr_debug("[WSM] Issue unjoin command due to tx deauth.\n"); + wsm_lock_tx_async(priv); + if (queue_work(priv->workqueue, + &priv->unjoin_work) <= 0) + wsm_unlock_tx(priv); + } else if (ieee80211_has_protected(fctl) && + tx_info->control.hw_key && + tx_info->control.hw_key->keyidx != priv->wep_default_key_id && + (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 || + tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) { + action = do_wep; + } + } + + switch (action) { + case do_probe: + /* An interesting FW "feature". Device filters probe responses. + * The easiest way to get it back is to convert + * probe request into WSM start_scan command. + */ + pr_debug("[WSM] Convert probe request to scan.\n"); + wsm_lock_tx_async(priv); + priv->pending_frame_id = __le32_to_cpu(wsm->packet_id); + if (queue_delayed_work(priv->workqueue, + &priv->scan.probe_work, 0) <= 0) + wsm_unlock_tx(priv); + handled = true; + break; + case do_drop: + pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl); + BUG_ON(cw1200_queue_remove(queue, + __le32_to_cpu(wsm->packet_id))); + handled = true; + break; + case do_wep: + pr_debug("[WSM] Issue set_default_wep_key.\n"); + wsm_lock_tx_async(priv); + priv->wep_default_key_id = tx_info->control.hw_key->keyidx; + priv->pending_frame_id = __le32_to_cpu(wsm->packet_id); + if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0) + wsm_unlock_tx(priv); + handled = true; + break; + case do_tx: + pr_debug("[WSM] Transmit frame.\n"); + break; + default: + /* Do nothing */ + break; + } + return handled; +} + +static int cw1200_get_prio_queue(struct cw1200_common *priv, + u32 link_id_map, int *total) +{ + static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) | + BIT(CW1200_LINK_ID_UAPSD); + struct wsm_edca_queue_params *edca; + unsigned score, best = -1; + int winner = -1; + int queued; + int i; + + /* search for a winner using edca params */ + for (i = 0; i < 4; ++i) { + queued = cw1200_queue_get_num_queued(&priv->tx_queue[i], + link_id_map); + if (!queued) + continue; + *total += queued; + edca = &priv->edca.params[i]; + score = ((edca->aifns + edca->cwmin) << 16) + + ((edca->cwmax - edca->cwmin) * + (get_random_int() & 0xFFFF)); + if (score < best && (winner < 0 || i != 3)) { + best = score; + winner = i; + } + } + + /* override winner if bursting */ + if (winner >= 0 && priv->tx_burst_idx >= 0 && + winner != priv->tx_burst_idx && + !cw1200_queue_get_num_queued( + &priv->tx_queue[winner], + link_id_map & urgent) && + cw1200_queue_get_num_queued( + &priv->tx_queue[priv->tx_burst_idx], + link_id_map)) + winner = priv->tx_burst_idx; + + return winner; +} + +static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv, + struct cw1200_queue **queue_p, + u32 *tx_allowed_mask_p, + bool *more) +{ + int idx; + u32 tx_allowed_mask; + int total = 0; + + /* Search for a queue with multicast frames buffered */ + if (priv->tx_multicast) { + tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM); + idx = cw1200_get_prio_queue(priv, + tx_allowed_mask, &total); + if (idx >= 0) { + *more = total > 1; + goto found; + } + } + + /* Search for unicast traffic */ + tx_allowed_mask = ~priv->sta_asleep_mask; + tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD); + if (priv->sta_asleep_mask) { + tx_allowed_mask |= priv->pspoll_mask; + tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM); + } else { + tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM); + } + idx = cw1200_get_prio_queue(priv, + tx_allowed_mask, &total); + if (idx < 0) + return -ENOENT; + +found: + *queue_p = &priv->tx_queue[idx]; + *tx_allowed_mask_p = tx_allowed_mask; + return 0; +} + +int wsm_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst) +{ + struct wsm_tx *wsm = NULL; + struct ieee80211_tx_info *tx_info; + struct cw1200_queue *queue = NULL; + int queue_num; + u32 tx_allowed_mask = 0; + const struct cw1200_txpriv *txpriv = NULL; + int count = 0; + + /* More is used only for broadcasts. */ + bool more = false; + +#ifdef CONFIG_CW1200_ITP + count = cw1200_itp_get_tx(priv, data, tx_len, burst); + if (count) + return count; +#endif + + if (priv->wsm_cmd.ptr) { /* CMD request */ + ++count; + spin_lock(&priv->wsm_cmd.lock); + BUG_ON(!priv->wsm_cmd.ptr); + *data = priv->wsm_cmd.ptr; + *tx_len = priv->wsm_cmd.len; + *burst = 1; + spin_unlock(&priv->wsm_cmd.lock); + } else { + for (;;) { + int ret; + + if (atomic_add_return(0, &priv->tx_lock)) + break; + + spin_lock_bh(&priv->ps_state_lock); + + ret = wsm_get_tx_queue_and_mask(priv, &queue, + &tx_allowed_mask, &more); + queue_num = queue - priv->tx_queue; + + if (priv->buffered_multicasts && + (ret || !more) && + (priv->tx_multicast || !priv->sta_asleep_mask)) { + priv->buffered_multicasts = false; + if (priv->tx_multicast) { + priv->tx_multicast = false; + queue_work(priv->workqueue, + &priv->multicast_stop_work); + } + } + + spin_unlock_bh(&priv->ps_state_lock); + + if (ret) + break; + + if (cw1200_queue_get(queue, + tx_allowed_mask, + &wsm, &tx_info, &txpriv)) + continue; + + if (wsm_handle_tx_data(priv, wsm, + tx_info, txpriv, queue)) + continue; /* Handled by WSM */ + + wsm->hdr.id &= __cpu_to_le16( + ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX)); + wsm->hdr.id |= cpu_to_le16( + WSM_TX_LINK_ID(txpriv->raw_link_id)); + priv->pspoll_mask &= ~BIT(txpriv->raw_link_id); + + *data = (u8 *)wsm; + *tx_len = __le16_to_cpu(wsm->hdr.len); + + /* allow bursting if txop is set */ + if (priv->edca.params[queue_num].txop_limit) + *burst = min(*burst, + (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1); + else + *burst = 1; + + /* store index of bursting queue */ + if (*burst > 1) + priv->tx_burst_idx = queue_num; + else + priv->tx_burst_idx = -1; + + if (more) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) + &((u8 *)wsm)[txpriv->offset]; + /* more buffered multicast/broadcast frames + * ==> set MoreData flag in IEEE 802.11 header + * to inform PS STAs + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n", + 0x0004, *tx_len, *data, + wsm->more ? 'M' : ' '); + ++count; + break; + } + } + + return count; +} + +void wsm_txed(struct cw1200_common *priv, u8 *data) +{ + if (data == priv->wsm_cmd.ptr) { + spin_lock(&priv->wsm_cmd.lock); + priv->wsm_cmd.ptr = NULL; + spin_unlock(&priv->wsm_cmd.lock); + } +} + +/* ******************************************************************** */ +/* WSM buffer */ + +void wsm_buf_init(struct wsm_buf *buf) +{ + BUG_ON(buf->begin); + buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); + buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin; + wsm_buf_reset(buf); +} + +void wsm_buf_deinit(struct wsm_buf *buf) +{ + kfree(buf->begin); + buf->begin = buf->data = buf->end = NULL; +} + +static void wsm_buf_reset(struct wsm_buf *buf) +{ + if (buf->begin) { + buf->data = &buf->begin[4]; + *(u32 *)buf->begin = 0; + } else { + buf->data = buf->begin; + } +} + +static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size) +{ + size_t pos = buf->data - buf->begin; + size_t size = pos + extra_size; + + size = round_up(size, FWLOAD_BLOCK_SIZE); + + buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); + if (buf->begin) { + buf->data = &buf->begin[pos]; + buf->end = &buf->begin[size]; + return 0; + } else { + buf->end = buf->data = buf->begin; + return -ENOMEM; + } +} diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h new file mode 100644 index 000000000000..8d902d6a7dc4 --- /dev/null +++ b/drivers/net/wireless/cw1200/wsm.h @@ -0,0 +1,1879 @@ +/* + * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers + * + * Copyright (c) 2010, ST-Ericsson + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * + * Based on CW1200 UMAC WSM API, which is + * Copyright (C) ST-Ericsson SA 2010 + * Author: Stewart Mathers <stewart.mathers@stericsson.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CW1200_WSM_H_INCLUDED +#define CW1200_WSM_H_INCLUDED + +#include <linux/spinlock.h> + +struct cw1200_common; + +/* Bands */ +/* Radio band 2.412 -2.484 GHz. */ +#define WSM_PHY_BAND_2_4G (0) + +/* Radio band 4.9375-5.8250 GHz. */ +#define WSM_PHY_BAND_5G (1) + +/* Transmit rates */ +/* 1 Mbps ERP-DSSS */ +#define WSM_TRANSMIT_RATE_1 (0) + +/* 2 Mbps ERP-DSSS */ +#define WSM_TRANSMIT_RATE_2 (1) + +/* 5.5 Mbps ERP-CCK */ +#define WSM_TRANSMIT_RATE_5 (2) + +/* 11 Mbps ERP-CCK */ +#define WSM_TRANSMIT_RATE_11 (3) + +/* 22 Mbps ERP-PBCC (Not supported) */ +/* #define WSM_TRANSMIT_RATE_22 (4) */ + +/* 33 Mbps ERP-PBCC (Not supported) */ +/* #define WSM_TRANSMIT_RATE_33 (5) */ + +/* 6 Mbps (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_6 (6) + +/* 9 Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_9 (7) + +/* 12 Mbps (6 Mbps) ERP-OFDM, QPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_12 (8) + +/* 18 Mbps (9 Mbps) ERP-OFDM, QPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_18 (9) + +/* 24 Mbps (12 Mbps) ERP-OFDM, 16QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_24 (10) + +/* 36 Mbps (18 Mbps) ERP-OFDM, 16QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_36 (11) + +/* 48 Mbps (24 Mbps) ERP-OFDM, 64QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_48 (12) + +/* 54 Mbps (27 Mbps) ERP-OFDM, 64QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_54 (13) + +/* 6.5 Mbps HT-OFDM, BPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_6 (14) + +/* 13 Mbps HT-OFDM, QPSK coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_13 (15) + +/* 19.5 Mbps HT-OFDM, QPSK coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_19 (16) + +/* 26 Mbps HT-OFDM, 16QAM coding rate 1/2 */ +#define WSM_TRANSMIT_RATE_HT_26 (17) + +/* 39 Mbps HT-OFDM, 16QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_39 (18) + +/* 52 Mbps HT-OFDM, 64QAM coding rate 2/3 */ +#define WSM_TRANSMIT_RATE_HT_52 (19) + +/* 58.5 Mbps HT-OFDM, 64QAM coding rate 3/4 */ +#define WSM_TRANSMIT_RATE_HT_58 (20) + +/* 65 Mbps HT-OFDM, 64QAM coding rate 5/6 */ +#define WSM_TRANSMIT_RATE_HT_65 (21) + +/* Scan types */ +/* Foreground scan */ +#define WSM_SCAN_TYPE_FOREGROUND (0) + +/* Background scan */ +#define WSM_SCAN_TYPE_BACKGROUND (1) + +/* Auto scan */ +#define WSM_SCAN_TYPE_AUTO (2) + +/* Scan flags */ +/* Forced background scan means if the station cannot */ +/* enter the power-save mode, it shall force to perform a */ +/* background scan. Only valid when ScanType is */ +/* background scan. */ +#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0)) + +/* The WLAN device scans one channel at a time so */ +/* that disturbance to the data traffic is minimized. */ +#define WSM_SCAN_FLAG_SPLIT_METHOD (BIT(1)) + +/* Preamble Type. Long if not set. */ +#define WSM_SCAN_FLAG_SHORT_PREAMBLE (BIT(2)) + +/* 11n Tx Mode. Mixed if not set. */ +#define WSM_SCAN_FLAG_11N_GREENFIELD (BIT(3)) + +/* Scan constraints */ +/* Maximum number of channels to be scanned. */ +#define WSM_SCAN_MAX_NUM_OF_CHANNELS (48) + +/* The maximum number of SSIDs that the device can scan for. */ +#define WSM_SCAN_MAX_NUM_OF_SSIDS (2) + +/* Power management modes */ +/* 802.11 Active mode */ +#define WSM_PSM_ACTIVE (0) + +/* 802.11 PS mode */ +#define WSM_PSM_PS BIT(0) + +/* Fast Power Save bit */ +#define WSM_PSM_FAST_PS_FLAG BIT(7) + +/* Dynamic aka Fast power save */ +#define WSM_PSM_FAST_PS (BIT(0) | BIT(7)) + +/* Undetermined */ +/* Note : Undetermined status is reported when the */ +/* NULL data frame used to advertise the PM mode to */ +/* the AP at Pre or Post Background Scan is not Acknowledged */ +#define WSM_PSM_UNKNOWN BIT(1) + +/* Queue IDs */ +/* best effort/legacy */ +#define WSM_QUEUE_BEST_EFFORT (0) + +/* background */ +#define WSM_QUEUE_BACKGROUND (1) + +/* video */ +#define WSM_QUEUE_VIDEO (2) + +/* voice */ +#define WSM_QUEUE_VOICE (3) + +/* HT TX parameters */ +/* Non-HT */ +#define WSM_HT_TX_NON_HT (0) + +/* Mixed format */ +#define WSM_HT_TX_MIXED (1) + +/* Greenfield format */ +#define WSM_HT_TX_GREENFIELD (2) + +/* STBC allowed */ +#define WSM_HT_TX_STBC (BIT(7)) + +/* EPTA prioirty flags for BT Coex */ +/* default epta priority */ +#define WSM_EPTA_PRIORITY_DEFAULT 4 +/* use for normal data */ +#define WSM_EPTA_PRIORITY_DATA 4 +/* use for connect/disconnect/roaming*/ +#define WSM_EPTA_PRIORITY_MGT 5 +/* use for action frames */ +#define WSM_EPTA_PRIORITY_ACTION 5 +/* use for AC_VI data */ +#define WSM_EPTA_PRIORITY_VIDEO 5 +/* use for AC_VO data */ +#define WSM_EPTA_PRIORITY_VOICE 6 +/* use for EAPOL exchange */ +#define WSM_EPTA_PRIORITY_EAPOL 7 + +/* TX status */ +/* Frame was sent aggregated */ +/* Only valid for WSM_SUCCESS status. */ +#define WSM_TX_STATUS_AGGREGATION (BIT(0)) + +/* Host should requeue this frame later. */ +/* Valid only when status is WSM_REQUEUE. */ +#define WSM_TX_STATUS_REQUEUE (BIT(1)) + +/* Normal Ack */ +#define WSM_TX_STATUS_NORMAL_ACK (0<<2) + +/* No Ack */ +#define WSM_TX_STATUS_NO_ACK (1<<2) + +/* No explicit acknowledgement */ +#define WSM_TX_STATUS_NO_EXPLICIT_ACK (2<<2) + +/* Block Ack */ +/* Only valid for WSM_SUCCESS status. */ +#define WSM_TX_STATUS_BLOCK_ACK (3<<2) + +/* RX status */ +/* Unencrypted */ +#define WSM_RX_STATUS_UNENCRYPTED (0<<0) + +/* WEP */ +#define WSM_RX_STATUS_WEP (1<<0) + +/* TKIP */ +#define WSM_RX_STATUS_TKIP (2<<0) + +/* AES */ +#define WSM_RX_STATUS_AES (3<<0) + +/* WAPI */ +#define WSM_RX_STATUS_WAPI (4<<0) + +/* Macro to fetch encryption subfield. */ +#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07) + +/* Frame was part of an aggregation */ +#define WSM_RX_STATUS_AGGREGATE (BIT(3)) + +/* Frame was first in the aggregation */ +#define WSM_RX_STATUS_AGGREGATE_FIRST (BIT(4)) + +/* Frame was last in the aggregation */ +#define WSM_RX_STATUS_AGGREGATE_LAST (BIT(5)) + +/* Indicates a defragmented frame */ +#define WSM_RX_STATUS_DEFRAGMENTED (BIT(6)) + +/* Indicates a Beacon frame */ +#define WSM_RX_STATUS_BEACON (BIT(7)) + +/* Indicates STA bit beacon TIM field */ +#define WSM_RX_STATUS_TIM (BIT(8)) + +/* Indicates Beacon frame's virtual bitmap contains multicast bit */ +#define WSM_RX_STATUS_MULTICAST (BIT(9)) + +/* Indicates frame contains a matching SSID */ +#define WSM_RX_STATUS_MATCHING_SSID (BIT(10)) + +/* Indicates frame contains a matching BSSI */ +#define WSM_RX_STATUS_MATCHING_BSSI (BIT(11)) + +/* Indicates More bit set in Framectl field */ +#define WSM_RX_STATUS_MORE_DATA (BIT(12)) + +/* Indicates frame received during a measurement process */ +#define WSM_RX_STATUS_MEASUREMENT (BIT(13)) + +/* Indicates frame received as an HT packet */ +#define WSM_RX_STATUS_HT (BIT(14)) + +/* Indicates frame received with STBC */ +#define WSM_RX_STATUS_STBC (BIT(15)) + +/* Indicates Address 1 field matches dot11StationId */ +#define WSM_RX_STATUS_ADDRESS1 (BIT(16)) + +/* Indicates Group address present in the Address 1 field */ +#define WSM_RX_STATUS_GROUP (BIT(17)) + +/* Indicates Broadcast address present in the Address 1 field */ +#define WSM_RX_STATUS_BROADCAST (BIT(18)) + +/* Indicates group key used with encrypted frames */ +#define WSM_RX_STATUS_GROUP_KEY (BIT(19)) + +/* Macro to fetch encryption key index. */ +#define WSM_RX_STATUS_KEY_IDX(status) (((status >> 20)) & 0x0F) + +/* Indicates TSF inclusion after 802.11 frame body */ +#define WSM_RX_STATUS_TSF_INCLUDED (BIT(24)) + +/* Frame Control field starts at Frame offset + 2 */ +#define WSM_TX_2BYTES_SHIFT (BIT(7)) + +/* Join mode */ +/* IBSS */ +#define WSM_JOIN_MODE_IBSS (0) + +/* BSS */ +#define WSM_JOIN_MODE_BSS (1) + +/* PLCP preamble type */ +/* For long preamble */ +#define WSM_JOIN_PREAMBLE_LONG (0) + +/* For short preamble (Long for 1Mbps) */ +#define WSM_JOIN_PREAMBLE_SHORT (1) + +/* For short preamble (Long for 1 and 2Mbps) */ +#define WSM_JOIN_PREAMBLE_SHORT_2 (2) + +/* Join flags */ +/* Unsynchronized */ +#define WSM_JOIN_FLAGS_UNSYNCRONIZED BIT(0) +/* The BSS owner is a P2P GO */ +#define WSM_JOIN_FLAGS_P2P_GO BIT(1) +/* Force to join BSS with the BSSID and the + * SSID specified without waiting for beacons. The + * ProbeForJoin parameter is ignored. */ +#define WSM_JOIN_FLAGS_FORCE BIT(2) +/* Give probe request/response higher + * priority over the BT traffic */ +#define WSM_JOIN_FLAGS_PRIO BIT(3) +/* Issue immediate join confirmation and use + * join complete to notify about completion */ +#define WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND BIT(5) + +/* Key types */ +#define WSM_KEY_TYPE_WEP_DEFAULT (0) +#define WSM_KEY_TYPE_WEP_PAIRWISE (1) +#define WSM_KEY_TYPE_TKIP_GROUP (2) +#define WSM_KEY_TYPE_TKIP_PAIRWISE (3) +#define WSM_KEY_TYPE_AES_GROUP (4) +#define WSM_KEY_TYPE_AES_PAIRWISE (5) +#define WSM_KEY_TYPE_WAPI_GROUP (6) +#define WSM_KEY_TYPE_WAPI_PAIRWISE (7) + +/* Key indexes */ +#define WSM_KEY_MAX_INDEX (10) + +/* ACK policy */ +#define WSM_ACK_POLICY_NORMAL (0) +#define WSM_ACK_POLICY_NO_ACK (1) + +/* Start modes */ +#define WSM_START_MODE_AP (0) /* Mini AP */ +#define WSM_START_MODE_P2P_GO (1) /* P2P GO */ +#define WSM_START_MODE_P2P_DEV (2) /* P2P device */ + +/* SetAssociationMode MIB flags */ +#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE (BIT(0)) +#define WSM_ASSOCIATION_MODE_USE_HT_MODE (BIT(1)) +#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET (BIT(2)) +#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING (BIT(3)) +#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES (BIT(4)) + +/* RcpiRssiThreshold MIB flags */ +#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0)) +#define WSM_RCPI_RSSI_USE_RSSI (BIT(1)) +#define WSM_RCPI_RSSI_DONT_USE_UPPER (BIT(2)) +#define WSM_RCPI_RSSI_DONT_USE_LOWER (BIT(3)) + +/* Update-ie constants */ +#define WSM_UPDATE_IE_BEACON (BIT(0)) +#define WSM_UPDATE_IE_PROBE_RESP (BIT(1)) +#define WSM_UPDATE_IE_PROBE_REQ (BIT(2)) + +/* WSM events */ +/* Error */ +#define WSM_EVENT_ERROR (0) + +/* BSS lost */ +#define WSM_EVENT_BSS_LOST (1) + +/* BSS regained */ +#define WSM_EVENT_BSS_REGAINED (2) + +/* Radar detected */ +#define WSM_EVENT_RADAR_DETECTED (3) + +/* RCPI or RSSI threshold triggered */ +#define WSM_EVENT_RCPI_RSSI (4) + +/* BT inactive */ +#define WSM_EVENT_BT_INACTIVE (5) + +/* BT active */ +#define WSM_EVENT_BT_ACTIVE (6) + +/* MIB IDs */ +/* 4.1 dot11StationId */ +#define WSM_MIB_ID_DOT11_STATION_ID 0x0000 + +/* 4.2 dot11MaxtransmitMsduLifeTime */ +#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME 0x0001 + +/* 4.3 dot11MaxReceiveLifeTime */ +#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME 0x0002 + +/* 4.4 dot11SlotTime */ +#define WSM_MIB_ID_DOT11_SLOT_TIME 0x0003 + +/* 4.5 dot11GroupAddressesTable */ +#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004 +#define WSM_MAX_GRP_ADDRTABLE_ENTRIES 8 + +/* 4.6 dot11WepDefaultKeyId */ +#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID 0x0005 + +/* 4.7 dot11CurrentTxPowerLevel */ +#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL 0x0006 + +/* 4.8 dot11RTSThreshold */ +#define WSM_MIB_ID_DOT11_RTS_THRESHOLD 0x0007 + +/* 4.9 NonErpProtection */ +#define WSM_MIB_ID_NON_ERP_PROTECTION 0x1000 + +/* 4.10 ArpIpAddressesTable */ +#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE 0x1001 +#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES 1 + +/* 4.11 TemplateFrame */ +#define WSM_MIB_ID_TEMPLATE_FRAME 0x1002 + +/* 4.12 RxFilter */ +#define WSM_MIB_ID_RX_FILTER 0x1003 + +/* 4.13 BeaconFilterTable */ +#define WSM_MIB_ID_BEACON_FILTER_TABLE 0x1004 + +/* 4.14 BeaconFilterEnable */ +#define WSM_MIB_ID_BEACON_FILTER_ENABLE 0x1005 + +/* 4.15 OperationalPowerMode */ +#define WSM_MIB_ID_OPERATIONAL_POWER_MODE 0x1006 + +/* 4.16 BeaconWakeUpPeriod */ +#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD 0x1007 + +/* 4.17 RcpiRssiThreshold */ +#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD 0x1009 + +/* 4.18 StatisticsTable */ +#define WSM_MIB_ID_STATISTICS_TABLE 0x100A + +/* 4.19 IbssPsConfig */ +#define WSM_MIB_ID_IBSS_PS_CONFIG 0x100B + +/* 4.20 CountersTable */ +#define WSM_MIB_ID_COUNTERS_TABLE 0x100C + +/* 4.21 BlockAckPolicy */ +#define WSM_MIB_ID_BLOCK_ACK_POLICY 0x100E + +/* 4.22 OverrideInternalTxRate */ +#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE 0x100F + +/* 4.23 SetAssociationMode */ +#define WSM_MIB_ID_SET_ASSOCIATION_MODE 0x1010 + +/* 4.24 UpdateEptaConfigData */ +#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA 0x1011 + +/* 4.25 SelectCcaMethod */ +#define WSM_MIB_ID_SELECT_CCA_METHOD 0x1012 + +/* 4.26 SetUpasdInformation */ +#define WSM_MIB_ID_SET_UAPSD_INFORMATION 0x1013 + +/* 4.27 SetAutoCalibrationMode WBF00004073 */ +#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE 0x1015 + +/* 4.28 SetTxRateRetryPolicy */ +#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY 0x1016 + +/* 4.29 SetHostMessageTypeFilter */ +#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER 0x1017 + +/* 4.30 P2PFindInfo */ +#define WSM_MIB_ID_P2P_FIND_INFO 0x1018 + +/* 4.31 P2PPsModeInfo */ +#define WSM_MIB_ID_P2P_PS_MODE_INFO 0x1019 + +/* 4.32 SetEtherTypeDataFrameFilter */ +#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A + +/* 4.33 SetUDPPortDataFrameFilter */ +#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER 0x101B + +/* 4.34 SetMagicDataFrameFilter */ +#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER 0x101C + +/* 4.35 P2PDeviceInfo */ +#define WSM_MIB_ID_P2P_DEVICE_INFO 0x101D + +/* 4.36 SetWCDMABand */ +#define WSM_MIB_ID_SET_WCDMA_BAND 0x101E + +/* 4.37 GroupTxSequenceCounter */ +#define WSM_MIB_ID_GRP_SEQ_COUNTER 0x101F + +/* 4.38 ProtectedMgmtPolicy */ +#define WSM_MIB_ID_PROTECTED_MGMT_POLICY 0x1020 + +/* 4.39 SetHtProtection */ +#define WSM_MIB_ID_SET_HT_PROTECTION 0x1021 + +/* 4.40 GPIO Command */ +#define WSM_MIB_ID_GPIO_COMMAND 0x1022 + +/* 4.41 TSF Counter Value */ +#define WSM_MIB_ID_TSF_COUNTER 0x1023 + +/* Test Purposes Only */ +#define WSM_MIB_ID_BLOCK_ACK_INFO 0x100D + +/* 4.42 UseMultiTxConfMessage */ +#define WSM_MIB_USE_MULTI_TX_CONF 0x1024 + +/* 4.43 Keep-alive period */ +#define WSM_MIB_ID_KEEP_ALIVE_PERIOD 0x1025 + +/* 4.44 Disable BSSID filter */ +#define WSM_MIB_ID_DISABLE_BSSID_FILTER 0x1026 + +/* Frame template types */ +#define WSM_FRAME_TYPE_PROBE_REQUEST (0) +#define WSM_FRAME_TYPE_BEACON (1) +#define WSM_FRAME_TYPE_NULL (2) +#define WSM_FRAME_TYPE_QOS_NULL (3) +#define WSM_FRAME_TYPE_PS_POLL (4) +#define WSM_FRAME_TYPE_PROBE_RESPONSE (5) + +#define WSM_FRAME_GREENFIELD (0x80) /* See 4.11 */ + +/* Status */ +/* The WSM firmware has completed a request */ +/* successfully. */ +#define WSM_STATUS_SUCCESS (0) + +/* This is a generic failure code if other error codes do */ +/* not apply. */ +#define WSM_STATUS_FAILURE (1) + +/* A request contains one or more invalid parameters. */ +#define WSM_INVALID_PARAMETER (2) + +/* The request cannot perform because the device is in */ +/* an inappropriate mode. */ +#define WSM_ACCESS_DENIED (3) + +/* The frame received includes a decryption error. */ +#define WSM_STATUS_DECRYPTFAILURE (4) + +/* A MIC failure is detected in the received packets. */ +#define WSM_STATUS_MICFAILURE (5) + +/* The transmit request failed due to retry limit being */ +/* exceeded. */ +#define WSM_STATUS_RETRY_EXCEEDED (6) + +/* The transmit request failed due to MSDU life time */ +/* being exceeded. */ +#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7) + +/* The link to the AP is lost. */ +#define WSM_STATUS_LINK_LOST (8) + +/* No key was found for the encrypted frame */ +#define WSM_STATUS_NO_KEY_FOUND (9) + +/* Jammer was detected when transmitting this frame */ +#define WSM_STATUS_JAMMER_DETECTED (10) + +/* The message should be requeued later. */ +/* This is applicable only to Transmit */ +#define WSM_REQUEUE (11) + +/* Advanced filtering options */ +#define WSM_MAX_FILTER_ELEMENTS (4) + +#define WSM_FILTER_ACTION_IGNORE (0) +#define WSM_FILTER_ACTION_FILTER_IN (1) +#define WSM_FILTER_ACTION_FILTER_OUT (2) + +#define WSM_FILTER_PORT_TYPE_DST (0) +#define WSM_FILTER_PORT_TYPE_SRC (1) + +/* Actual header of WSM messages */ +struct wsm_hdr { + __le16 len; + __le16 id; +}; + +#define WSM_TX_SEQ_MAX (7) +#define WSM_TX_SEQ(seq) \ + ((seq & WSM_TX_SEQ_MAX) << 13) +#define WSM_TX_LINK_ID_MAX (0x0F) +#define WSM_TX_LINK_ID(link_id) \ + ((link_id & WSM_TX_LINK_ID_MAX) << 6) + +#define MAX_BEACON_SKIP_TIME_MS 1000 + +#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2) + +/* ******************************************************************** */ +/* WSM capability */ + +#define WSM_STARTUP_IND_ID 0x0801 + +struct wsm_startup_ind { + u16 input_buffers; + u16 input_buffer_size; + u16 status; + u16 hw_id; + u16 hw_subid; + u16 fw_cap; + u16 fw_type; + u16 fw_api; + u16 fw_build; + u16 fw_ver; + char fw_label[128]; + u32 config[4]; +}; + +/* ******************************************************************** */ +/* WSM commands */ + +/* 3.1 */ +#define WSM_CONFIGURATION_REQ_ID 0x0009 +#define WSM_CONFIGURATION_RESP_ID 0x0409 + +struct wsm_tx_power_range { + int min_power_level; + int max_power_level; + u32 stepping; +}; + +struct wsm_configuration { + /* [in] */ u32 dot11MaxTransmitMsduLifeTime; + /* [in] */ u32 dot11MaxReceiveLifeTime; + /* [in] */ u32 dot11RtsThreshold; + /* [in, out] */ u8 *dot11StationId; + /* [in] */ const void *dpdData; + /* [in] */ size_t dpdData_size; + /* [out] */ u8 dot11FrequencyBandsSupported; + /* [out] */ u32 supportedRateMask; + /* [out] */ struct wsm_tx_power_range txPowerRange[2]; +}; + +int wsm_configuration(struct cw1200_common *priv, + struct wsm_configuration *arg); + +/* 3.3 */ +#define WSM_RESET_REQ_ID 0x000A +#define WSM_RESET_RESP_ID 0x040A +struct wsm_reset { + /* [in] */ int link_id; + /* [in] */ bool reset_statistics; +}; + +int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg); + +/* 3.5 */ +#define WSM_READ_MIB_REQ_ID 0x0005 +#define WSM_READ_MIB_RESP_ID 0x0405 +int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *buf, + size_t buf_size); + +/* 3.7 */ +#define WSM_WRITE_MIB_REQ_ID 0x0006 +#define WSM_WRITE_MIB_RESP_ID 0x0406 +int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *buf, + size_t buf_size); + +/* 3.9 */ +#define WSM_START_SCAN_REQ_ID 0x0007 +#define WSM_START_SCAN_RESP_ID 0x0407 + +struct wsm_ssid { + u8 ssid[32]; + u32 length; +}; + +struct wsm_scan_ch { + u16 number; + u32 min_chan_time; + u32 max_chan_time; + u32 tx_power_level; +}; + +struct wsm_scan { + /* WSM_PHY_BAND_... */ + u8 band; + + /* WSM_SCAN_TYPE_... */ + u8 type; + + /* WSM_SCAN_FLAG_... */ + u8 flags; + + /* WSM_TRANSMIT_RATE_... */ + u8 max_tx_rate; + + /* Interval period in TUs that the device shall the re- */ + /* execute the requested scan. Max value supported by the device */ + /* is 256s. */ + u32 auto_scan_interval; + + /* Number of probe requests (per SSID) sent to one (1) */ + /* channel. Zero (0) means that none is send, which */ + /* means that a passive scan is to be done. Value */ + /* greater than zero (0) means that an active scan is to */ + /* be done. */ + u32 num_probes; + + /* Number of channels to be scanned. */ + /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */ + u8 num_channels; + + /* Number of SSID provided in the scan command (this */ + /* is zero (0) in broadcast scan) */ + /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */ + u8 num_ssids; + + /* The delay time (in microseconds) period */ + /* before sending a probe-request. */ + u8 probe_delay; + + /* SSIDs to be scanned [numOfSSIDs]; */ + struct wsm_ssid *ssids; + + /* Channels to be scanned [numOfChannels]; */ + struct wsm_scan_ch *ch; +}; + +int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg); + +/* 3.11 */ +#define WSM_STOP_SCAN_REQ_ID 0x0008 +#define WSM_STOP_SCAN_RESP_ID 0x0408 +int wsm_stop_scan(struct cw1200_common *priv); + +/* 3.13 */ +#define WSM_SCAN_COMPLETE_IND_ID 0x0806 +struct wsm_scan_complete { + /* WSM_STATUS_... */ + u32 status; + + /* WSM_PSM_... */ + u8 psm; + + /* Number of channels that the scan operation completed. */ + u8 num_channels; +}; + +/* 3.14 */ +#define WSM_TX_CONFIRM_IND_ID 0x0404 +#define WSM_MULTI_TX_CONFIRM_ID 0x041E + +struct wsm_tx_confirm { + /* Packet identifier used in wsm_tx. */ + u32 packet_id; + + /* WSM_STATUS_... */ + u32 status; + + /* WSM_TRANSMIT_RATE_... */ + u8 tx_rate; + + /* The number of times the frame was transmitted */ + /* without receiving an acknowledgement. */ + u8 ack_failures; + + /* WSM_TX_STATUS_... */ + u16 flags; + + /* The total time in microseconds that the frame spent in */ + /* the WLAN device before transmission as completed. */ + u32 media_delay; + + /* The total time in microseconds that the frame spent in */ + /* the WLAN device before transmission was started. */ + u32 tx_queue_delay; +}; + +/* 3.15 */ +typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv, + struct wsm_tx_confirm *arg); + +/* Note that ideology of wsm_tx struct is different against the rest of + * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input + * argument for WSM call, but a prepared bytestream to be sent to firmware. + * It is filled partly in cw1200_tx, partly in low-level WSM code. + * Please pay attention once again: ideology is different. + * + * Legend: + * - [in]: cw1200_tx must fill this field. + * - [wsm]: the field is filled by low-level WSM. + */ +struct wsm_tx { + /* common WSM header */ + struct wsm_hdr hdr; + + /* Packet identifier that meant to be used in completion. */ + __le32 packet_id; + + /* WSM_TRANSMIT_RATE_... */ + u8 max_tx_rate; + + /* WSM_QUEUE_... */ + u8 queue_id; + + /* True: another packet is pending on the host for transmission. */ + u8 more; + + /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */ + /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */ + /* Bits 3:1 - PTA Priority */ + /* Bits 6:4 - Tx Rate Retry Policy */ + /* Bit 7 - Reserved */ + u8 flags; + + /* Should be 0. */ + __le32 reserved; + + /* The elapsed time in TUs, after the initial transmission */ + /* of an MSDU, after which further attempts to transmit */ + /* the MSDU shall be terminated. Overrides the global */ + /* dot11MaxTransmitMsduLifeTime setting [optional] */ + /* Device will set the default value if this is 0. */ + __le32 expire_time; + + /* WSM_HT_TX_... */ + __le32 ht_tx_parameters; +}; + +/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */ +#define WSM_TX_EXTRA_HEADROOM (28) + +/* 3.16 */ +#define WSM_RECEIVE_IND_ID 0x0804 + +struct wsm_rx { + /* WSM_STATUS_... */ + __le32 status; + + /* Specifies the channel of the received packet. */ + __le16 channel_number; + + /* WSM_TRANSMIT_RATE_... */ + u8 rx_rate; + + /* This value is expressed in signed Q8.0 format for */ + /* RSSI and unsigned Q7.1 format for RCPI. */ + u8 rcpi_rssi; + + /* WSM_RX_STATUS_... */ + __le32 flags; + + /* Payload */ + u8 data[0]; +} __packed; + +/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */ +#define WSM_RX_EXTRA_HEADROOM (16) + +typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg, + struct sk_buff **skb_p); + +/* 3.17 */ +struct wsm_event { + /* WSM_STATUS_... */ + /* [out] */ u32 id; + + /* Indication parameters. */ + /* For error indication, this shall be a 32-bit WSM status. */ + /* For RCPI or RSSI indication, this should be an 8-bit */ + /* RCPI or RSSI value. */ + /* [out] */ u32 data; +}; + +struct cw1200_wsm_event { + struct list_head link; + struct wsm_event evt; +}; + +/* 3.18 - 3.22 */ +/* Measurement. Skipped for now. Irrelevent. */ + +typedef void (*wsm_event_cb) (struct cw1200_common *priv, + struct wsm_event *arg); + +/* 3.23 */ +#define WSM_JOIN_REQ_ID 0x000B +#define WSM_JOIN_RESP_ID 0x040B + +struct wsm_join { + /* WSM_JOIN_MODE_... */ + u8 mode; + + /* WSM_PHY_BAND_... */ + u8 band; + + /* Specifies the channel number to join. The channel */ + /* number will be mapped to an actual frequency */ + /* according to the band */ + u16 channel_number; + + /* Specifies the BSSID of the BSS or IBSS to be joined */ + /* or the IBSS to be started. */ + u8 bssid[6]; + + /* ATIM window of IBSS */ + /* When ATIM window is zero the initiated IBSS does */ + /* not support power saving. */ + u16 atim_window; + + /* WSM_JOIN_PREAMBLE_... */ + u8 preamble_type; + + /* Specifies if a probe request should be send with the */ + /* specified SSID when joining to the network. */ + u8 probe_for_join; + + /* DTIM Period (In multiples of beacon interval) */ + u8 dtim_period; + + /* WSM_JOIN_FLAGS_... */ + u8 flags; + + /* Length of the SSID */ + u32 ssid_len; + + /* Specifies the SSID of the IBSS to join or start */ + u8 ssid[32]; + + /* Specifies the time between TBTTs in TUs */ + u32 beacon_interval; + + /* A bit mask that defines the BSS basic rate set. */ + u32 basic_rate_set; +}; + +struct wsm_join_cnf { + u32 status; + + /* Minimum transmission power level in units of 0.1dBm */ + u32 min_power_level; + + /* Maximum transmission power level in units of 0.1dBm */ + u32 max_power_level; +}; + +int wsm_join(struct cw1200_common *priv, struct wsm_join *arg); + +/* 3.24 */ +struct wsm_join_complete { + /* WSM_STATUS_... */ + u32 status; +}; + +/* 3.25 */ +#define WSM_SET_PM_REQ_ID 0x0010 +#define WSM_SET_PM_RESP_ID 0x0410 +struct wsm_set_pm { + /* WSM_PSM_... */ + u8 mode; + + /* in unit of 500us; 0 to use default */ + u8 fast_psm_idle_period; + + /* in unit of 500us; 0 to use default */ + u8 ap_psm_change_period; + + /* in unit of 500us; 0 to disable auto-pspoll */ + u8 min_auto_pspoll_period; +}; + +int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg); + +/* 3.27 */ +struct wsm_set_pm_complete { + u8 psm; /* WSM_PSM_... */ +}; + +/* 3.28 */ +#define WSM_SET_BSS_PARAMS_REQ_ID 0x0011 +#define WSM_SET_BSS_PARAMS_RESP_ID 0x0411 +struct wsm_set_bss_params { + /* This resets the beacon loss counters only */ + u8 reset_beacon_loss; + + /* The number of lost consecutive beacons after which */ + /* the WLAN device should indicate the BSS-Lost event */ + /* to the WLAN host driver. */ + u8 beacon_lost_count; + + /* The AID received during the association process. */ + u16 aid; + + /* The operational rate set mask */ + u32 operational_rate_set; +}; + +int wsm_set_bss_params(struct cw1200_common *priv, + const struct wsm_set_bss_params *arg); + +/* 3.30 */ +#define WSM_ADD_KEY_REQ_ID 0x000C +#define WSM_ADD_KEY_RESP_ID 0x040C +struct wsm_add_key { + u8 type; /* WSM_KEY_TYPE_... */ + u8 index; /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */ + u16 reserved; + union { + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u8 reserved; + u8 keylen; /* Key length in bytes */ + u8 keydata[16]; /* Key data */ + } __packed wep_pairwise; + struct { + u8 keyid; /* Unique per key identifier + * (0..3) */ + u8 keylen; /* Key length in bytes */ + u16 reserved; + u8 keydata[16]; /* Key data */ + } __packed wep_group; + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u16 reserved; + u8 keydata[16]; /* TKIP key data */ + u8 rx_mic_key[8]; /* Rx MIC key */ + u8 tx_mic_key[8]; /* Tx MIC key */ + } __packed tkip_pairwise; + struct { + u8 keydata[16]; /* TKIP key data */ + u8 rx_mic_key[8]; /* Rx MIC key */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + u8 rx_seqnum[8]; /* Receive Sequence Counter */ + } __packed tkip_group; + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u16 reserved; + u8 keydata[16]; /* AES key data */ + } __packed aes_pairwise; + struct { + u8 keydata[16]; /* AES key data */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + u8 rx_seqnum[8]; /* Receive Sequence Counter */ + } __packed aes_group; + struct { + u8 peer[6]; /* MAC address of the + * peer station */ + u8 keyid; /* Key ID */ + u8 reserved; + u8 keydata[16]; /* WAPI key data */ + u8 mic_key[16]; /* MIC key data */ + } __packed wapi_pairwise; + struct { + u8 keydata[16]; /* WAPI key data */ + u8 mic_key[16]; /* MIC key data */ + u8 keyid; /* Key ID */ + u8 reserved[3]; + } __packed wapi_group; + } __packed; +} __packed; + +int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg); + +/* 3.32 */ +#define WSM_REMOVE_KEY_REQ_ID 0x000D +#define WSM_REMOVE_KEY_RESP_ID 0x040D +struct wsm_remove_key { + u8 index; /* Key entry index : 0-10 */ +}; + +int wsm_remove_key(struct cw1200_common *priv, + const struct wsm_remove_key *arg); + +/* 3.34 */ +struct wsm_set_tx_queue_params { + /* WSM_ACK_POLICY_... */ + u8 ackPolicy; + + /* Medium Time of TSPEC (in 32us units) allowed per */ + /* One Second Averaging Period for this queue. */ + u16 allowedMediumTime; + + /* dot11MaxTransmitMsduLifetime to be used for the */ + /* specified queue. */ + u32 maxTransmitLifetime; +}; + +struct wsm_tx_queue_params { + /* NOTE: index is a linux queue id. */ + struct wsm_set_tx_queue_params params[4]; +}; + + +#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\ + max_life_time) \ +do { \ + struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \ + p->ackPolicy = (ack_policy); \ + p->allowedMediumTime = (allowed_time); \ + p->maxTransmitLifetime = (max_life_time); \ +} while (0) + +int wsm_set_tx_queue_params(struct cw1200_common *priv, + const struct wsm_set_tx_queue_params *arg, u8 id); + +/* 3.36 */ +#define WSM_EDCA_PARAMS_REQ_ID 0x0013 +#define WSM_EDCA_PARAMS_RESP_ID 0x0413 +struct wsm_edca_queue_params { + /* CWmin (in slots) for the access class. */ + __le16 cwmin; + + /* CWmax (in slots) for the access class. */ + __le16 cwmax; + + /* AIFS (in slots) for the access class. */ + __le16 aifns; + + /* TX OP Limit (in microseconds) for the access class. */ + __le16 txop_limit; + + /* dot11MaxReceiveLifetime to be used for the specified */ + /* the access class. Overrides the global */ + /* dot11MaxReceiveLifetime value */ + __le32 max_rx_lifetime; +} __packed; + +struct wsm_edca_params { + /* NOTE: index is a linux queue id. */ + struct wsm_edca_queue_params params[4]; + bool uapsd_enable[4]; +}; + +#define TXOP_UNIT 32 +#define WSM_EDCA_SET(__edca, __queue, __aifs, __cw_min, __cw_max, __txop, __lifetime,\ + __uapsd) \ + do { \ + struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \ + p->cwmin = (__cw_min); \ + p->cwmax = (__cw_max); \ + p->aifns = (__aifs); \ + p->txop_limit = ((__txop) * TXOP_UNIT); \ + p->max_rx_lifetime = (__lifetime); \ + (__edca)->uapsd_enable[__queue] = (__uapsd); \ + } while (0) + +int wsm_set_edca_params(struct cw1200_common *priv, + const struct wsm_edca_params *arg); + +int wsm_set_uapsd_param(struct cw1200_common *priv, + const struct wsm_edca_params *arg); + +/* 3.38 */ +/* Set-System info. Skipped for now. Irrelevent. */ + +/* 3.40 */ +#define WSM_SWITCH_CHANNEL_REQ_ID 0x0016 +#define WSM_SWITCH_CHANNEL_RESP_ID 0x0416 + +struct wsm_switch_channel { + /* 1 - means the STA shall not transmit any further */ + /* frames until the channel switch has completed */ + u8 mode; + + /* Number of TBTTs until channel switch occurs. */ + /* 0 - indicates switch shall occur at any time */ + /* 1 - occurs immediately before the next TBTT */ + u8 switch_count; + + /* The new channel number to switch to. */ + /* Note this is defined as per section 2.7. */ + u16 channel_number; +}; + +int wsm_switch_channel(struct cw1200_common *priv, + const struct wsm_switch_channel *arg); + +typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv); + +#define WSM_START_REQ_ID 0x0017 +#define WSM_START_RESP_ID 0x0417 + +struct wsm_start { + /* WSM_START_MODE_... */ + /* [in] */ u8 mode; + + /* WSM_PHY_BAND_... */ + /* [in] */ u8 band; + + /* Channel number */ + /* [in] */ u16 channel_number; + + /* Client Traffic window in units of TU */ + /* Valid only when mode == ..._P2P */ + /* [in] */ u32 ct_window; + + /* Interval between two consecutive */ + /* beacon transmissions in TU. */ + /* [in] */ u32 beacon_interval; + + /* DTIM period in terms of beacon intervals */ + /* [in] */ u8 dtim_period; + + /* WSM_JOIN_PREAMBLE_... */ + /* [in] */ u8 preamble; + + /* The delay time (in microseconds) period */ + /* before sending a probe-request. */ + /* [in] */ u8 probe_delay; + + /* Length of the SSID */ + /* [in] */ u8 ssid_len; + + /* SSID of the BSS or P2P_GO to be started now. */ + /* [in] */ u8 ssid[32]; + + /* The basic supported rates for the MiniAP. */ + /* [in] */ u32 basic_rate_set; +}; + +int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg); + +#define WSM_BEACON_TRANSMIT_REQ_ID 0x0018 +#define WSM_BEACON_TRANSMIT_RESP_ID 0x0418 + +struct wsm_beacon_transmit { + /* 1: enable; 0: disable */ + /* [in] */ u8 enable_beaconing; +}; + +int wsm_beacon_transmit(struct cw1200_common *priv, + const struct wsm_beacon_transmit *arg); + +int wsm_start_find(struct cw1200_common *priv); + +int wsm_stop_find(struct cw1200_common *priv); + +typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status); + +struct wsm_suspend_resume { + /* See 3.52 */ + /* Link ID */ + /* [out] */ int link_id; + /* Stop sending further Tx requests down to device for this link */ + /* [out] */ bool stop; + /* Transmit multicast Frames */ + /* [out] */ bool multicast; + /* The AC on which Tx to be suspended /resumed. */ + /* This is applicable only for U-APSD */ + /* WSM_QUEUE_... */ + /* [out] */ int queue; +}; + +typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv, + struct wsm_suspend_resume *arg); + +/* 3.54 Update-IE request. */ +struct wsm_update_ie { + /* WSM_UPDATE_IE_... */ + /* [in] */ u16 what; + /* [in] */ u16 count; + /* [in] */ u8 *ies; + /* [in] */ size_t length; +}; + +int wsm_update_ie(struct cw1200_common *priv, + const struct wsm_update_ie *arg); + +/* 3.56 */ +struct wsm_map_link { + /* MAC address of the remote device */ + /* [in] */ u8 mac_addr[6]; + /* [in] */ u8 link_id; +}; + +int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg); + +/* ******************************************************************** */ +/* MIB shortcats */ + +static inline int wsm_set_output_power(struct cw1200_common *priv, + int power_level) +{ + __le32 val = __cpu_to_le32(power_level); + return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL, + &val, sizeof(val)); +} + +static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv, + unsigned dtim_interval, + unsigned listen_interval) +{ + struct { + u8 numBeaconPeriods; + u8 reserved; + __le16 listenInterval; + } val = { + dtim_interval, 0, __cpu_to_le16(listen_interval) + }; + + if (dtim_interval > 0xFF || listen_interval > 0xFFFF) + return -EINVAL; + else + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD, + &val, sizeof(val)); +} + +struct wsm_rcpi_rssi_threshold { + u8 rssiRcpiMode; /* WSM_RCPI_RSSI_... */ + u8 lowerThreshold; + u8 upperThreshold; + u8 rollingAverageCount; +}; + +static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv, + struct wsm_rcpi_rssi_threshold *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg, + sizeof(*arg)); +} + +struct wsm_mib_counters_table { + __le32 plcp_errors; + __le32 fcs_errors; + __le32 tx_packets; + __le32 rx_packets; + __le32 rx_packet_errors; + __le32 rx_decryption_failures; + __le32 rx_mic_failures; + __le32 rx_no_key_failures; + __le32 tx_multicast_frames; + __le32 tx_frames_success; + __le32 tx_frame_failures; + __le32 tx_frames_retried; + __le32 tx_frames_multi_retried; + __le32 rx_frame_duplicates; + __le32 rts_success; + __le32 rts_failures; + __le32 ack_failures; + __le32 rx_multicast_frames; + __le32 rx_frames_success; + __le32 rx_cmac_icv_errors; + __le32 rx_cmac_replays; + __le32 rx_mgmt_ccmp_replays; +} __packed; + +static inline int wsm_get_counters_table(struct cw1200_common *priv, + struct wsm_mib_counters_table *arg) +{ + return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE, + arg, sizeof(*arg)); +} + +static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac) +{ + return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN); +} + +struct wsm_rx_filter { + bool promiscuous; + bool bssid; + bool fcs; + bool probeResponder; +}; + +static inline int wsm_set_rx_filter(struct cw1200_common *priv, + const struct wsm_rx_filter *arg) +{ + __le32 val = 0; + if (arg->promiscuous) + val |= __cpu_to_le32(BIT(0)); + if (arg->bssid) + val |= __cpu_to_le32(BIT(1)); + if (arg->fcs) + val |= __cpu_to_le32(BIT(2)); + if (arg->probeResponder) + val |= __cpu_to_le32(BIT(3)); + return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val)); +} + +int wsm_set_probe_responder(struct cw1200_common *priv, bool enable); + +#define WSM_BEACON_FILTER_IE_HAS_CHANGED BIT(0) +#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1) +#define WSM_BEACON_FILTER_IE_HAS_APPEARED BIT(2) + +struct wsm_beacon_filter_table_entry { + u8 ie_id; + u8 flags; + u8 oui[3]; + u8 match_data[3]; +} __packed; + +struct wsm_mib_beacon_filter_table { + __le32 num; + struct wsm_beacon_filter_table_entry entry[10]; +} __packed; + +static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv, + struct wsm_mib_beacon_filter_table *ft) +{ + size_t size = __le32_to_cpu(ft->num) * + sizeof(struct wsm_beacon_filter_table_entry) + + sizeof(__le32); + + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size); +} + +#define WSM_BEACON_FILTER_ENABLE BIT(0) /* Enable/disable beacon filtering */ +#define WSM_BEACON_FILTER_AUTO_ERP BIT(1) /* If 1 FW will handle ERP IE changes internally */ + +struct wsm_beacon_filter_control { + int enabled; + int bcn_count; +}; + +static inline int wsm_beacon_filter_control(struct cw1200_common *priv, + struct wsm_beacon_filter_control *arg) +{ + struct { + __le32 enabled; + __le32 bcn_count; + } val; + val.enabled = __cpu_to_le32(arg->enabled); + val.bcn_count = __cpu_to_le32(arg->bcn_count); + return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val, + sizeof(val)); +} + +enum wsm_power_mode { + wsm_power_mode_active = 0, + wsm_power_mode_doze = 1, + wsm_power_mode_quiescent = 2, +}; + +struct wsm_operational_mode { + enum wsm_power_mode power_mode; + int disable_more_flag_usage; + int perform_ant_diversity; +}; + +static inline int wsm_set_operational_mode(struct cw1200_common *priv, + const struct wsm_operational_mode *arg) +{ + u8 val = arg->power_mode; + if (arg->disable_more_flag_usage) + val |= BIT(4); + if (arg->perform_ant_diversity) + val |= BIT(5); + return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val, + sizeof(val)); +} + +struct wsm_template_frame { + u8 frame_type; + u8 rate; + struct sk_buff *skb; +}; + +static inline int wsm_set_template_frame(struct cw1200_common *priv, + struct wsm_template_frame *arg) +{ + int ret; + u8 *p = skb_push(arg->skb, 4); + p[0] = arg->frame_type; + p[1] = arg->rate; + ((u16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4); + ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len); + skb_pull(arg->skb, 4); + return ret; +} + + +struct wsm_protected_mgmt_policy { + bool protectedMgmtEnable; + bool unprotectedMgmtFramesAllowed; + bool encryptionForAuthFrame; +}; + +static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv, + struct wsm_protected_mgmt_policy *arg) +{ + __le32 val = 0; + int ret; + if (arg->protectedMgmtEnable) + val |= __cpu_to_le32(BIT(0)); + if (arg->unprotectedMgmtFramesAllowed) + val |= __cpu_to_le32(BIT(1)); + if (arg->encryptionForAuthFrame) + val |= __cpu_to_le32(BIT(2)); + ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY, + &val, sizeof(val)); + return ret; +} + +struct wsm_mib_block_ack_policy { + u8 tx_tid; + u8 reserved1; + u8 rx_tid; + u8 reserved2; +} __packed; + +static inline int wsm_set_block_ack_policy(struct cw1200_common *priv, + u8 tx_tid_policy, + u8 rx_tid_policy) +{ + struct wsm_mib_block_ack_policy val = { + .tx_tid = tx_tid_policy, + .rx_tid = rx_tid_policy, + }; + return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val, + sizeof(val)); +} + +struct wsm_mib_association_mode { + u8 flags; /* WSM_ASSOCIATION_MODE_... */ + u8 preamble; /* WSM_JOIN_PREAMBLE_... */ + u8 greenfield; /* 1 for greenfield */ + u8 mpdu_start_spacing; + __le32 basic_rate_set; +} __packed; + +static inline int wsm_set_association_mode(struct cw1200_common *priv, + struct wsm_mib_association_mode *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg, + sizeof(*arg)); +} + +#define WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED BIT(2) +#define WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT BIT(3) +struct wsm_tx_rate_retry_policy { + u8 index; + u8 short_retries; + u8 long_retries; + /* BIT(2) - Terminate retries when Tx rate retry policy + * finishes. + * BIT(3) - Count initial frame transmission as part of + * rate retry counting but not as a retry + * attempt */ + u8 flags; + u8 rate_recoveries; + u8 reserved[3]; + __le32 rate_count_indices[3]; +} __packed; + +struct wsm_set_tx_rate_retry_policy { + u8 num; + u8 reserved[3]; + struct wsm_tx_rate_retry_policy tbl[8]; +} __packed; + +static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv, + struct wsm_set_tx_rate_retry_policy *arg) +{ + size_t size = 4 + arg->num * sizeof(struct wsm_tx_rate_retry_policy); + return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, + size); +} + +/* 4.32 SetEtherTypeDataFrameFilter */ +struct wsm_ether_type_filter_hdr { + u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */ + u8 reserved[3]; +} __packed; + +struct wsm_ether_type_filter { + u8 action; /* WSM_FILTER_ACTION_XXX */ + u8 reserved; + __le16 type; /* Type of ethernet frame */ +} __packed; + +static inline int wsm_set_ether_type_filter(struct cw1200_common *priv, + struct wsm_ether_type_filter_hdr *arg) +{ + size_t size = sizeof(struct wsm_ether_type_filter_hdr) + + arg->num * sizeof(struct wsm_ether_type_filter); + return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER, + arg, size); +} + +/* 4.33 SetUDPPortDataFrameFilter */ +struct wsm_udp_port_filter_hdr { + u8 num; /* Up to WSM_MAX_FILTER_ELEMENTS */ + u8 reserved[3]; +} __packed; + +struct wsm_udp_port_filter { + u8 action; /* WSM_FILTER_ACTION_XXX */ + u8 type; /* WSM_FILTER_PORT_TYPE_XXX */ + __le16 port; /* Port number */ +} __packed; + +static inline int wsm_set_udp_port_filter(struct cw1200_common *priv, + struct wsm_udp_port_filter_hdr *arg) +{ + size_t size = sizeof(struct wsm_udp_port_filter_hdr) + + arg->num * sizeof(struct wsm_udp_port_filter); + return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER, + arg, size); +} + +/* Undocumented MIBs: */ +/* 4.35 P2PDeviceInfo */ +#define D11_MAX_SSID_LEN (32) + +struct wsm_p2p_device_type { + __le16 categoryId; + u8 oui[4]; + __le16 subCategoryId; +} __packed; + +struct wsm_p2p_device_info { + struct wsm_p2p_device_type primaryDevice; + u8 reserved1[3]; + u8 devNameSize; + u8 localDevName[D11_MAX_SSID_LEN]; + u8 reserved2[3]; + u8 numSecDevSupported; + struct wsm_p2p_device_type secondaryDevices[0]; +} __packed; + +/* 4.36 SetWCDMABand - WO */ +struct wsm_cdma_band { + u8 WCDMA_Band; + u8 reserved[3]; +} __packed; + +/* 4.37 GroupTxSequenceCounter - RO */ +struct wsm_group_tx_seq { + __le32 bits_47_16; + __le16 bits_15_00; + __le16 reserved; +} __packed; + +/* 4.39 SetHtProtection - WO */ +#define WSM_DUAL_CTS_PROT_ENB (1 << 0) +#define WSM_NON_GREENFIELD_STA_PRESENT (1 << 1) +#define WSM_HT_PROT_MODE__NO_PROT (0 << 2) +#define WSM_HT_PROT_MODE__NON_MEMBER (1 << 2) +#define WSM_HT_PROT_MODE__20_MHZ (2 << 2) +#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2) +#define WSM_LSIG_TXOP_PROT_FULL (1 << 4) +#define WSM_LARGE_L_LENGTH_PROT (1 << 5) + +struct wsm_ht_protection { + __le32 flags; +} __packed; + +/* 4.40 GPIO Command - R/W */ +#define WSM_GPIO_COMMAND_SETUP 0 +#define WSM_GPIO_COMMAND_READ 1 +#define WSM_GPIO_COMMAND_WRITE 2 +#define WSM_GPIO_COMMAND_RESET 3 +#define WSM_GPIO_ALL_PINS 0xFF + +struct wsm_gpio_command { + u8 GPIO_Command; + u8 pin; + __le16 config; +} __packed; + +/* 4.41 TSFCounter - RO */ +struct wsm_tsf_counter { + __le64 TSF_Counter; +} __packed; + +/* 4.43 Keep alive period */ +struct wsm_keep_alive_period { + __le16 keepAlivePeriod; + u8 reserved[2]; +} __packed; + +static inline int wsm_keep_alive_period(struct cw1200_common *priv, + int period) +{ + struct wsm_keep_alive_period arg = { + .keepAlivePeriod = __cpu_to_le16(period), + }; + return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD, + &arg, sizeof(arg)); +}; + +/* BSSID filtering */ +struct wsm_set_bssid_filtering { + u8 filter; + u8 reserved[3]; +} __packed; + +static inline int wsm_set_bssid_filtering(struct cw1200_common *priv, + bool enabled) +{ + struct wsm_set_bssid_filtering arg = { + .filter = !enabled, + }; + return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER, + &arg, sizeof(arg)); +} + +/* Multicast filtering - 4.5 */ +struct wsm_mib_multicast_filter { + __le32 enable; + __le32 num_addrs; + u8 macaddrs[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN]; +} __packed; + +static inline int wsm_set_multicast_filter(struct cw1200_common *priv, + struct wsm_mib_multicast_filter *fp) +{ + return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE, + fp, sizeof(*fp)); +} + +/* ARP IPv4 filtering - 4.10 */ +struct wsm_mib_arp_ipv4_filter { + __le32 enable; + __be32 ipv4addrs[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES]; +} __packed; + +static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv, + struct wsm_mib_arp_ipv4_filter *fp) +{ + return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE, + fp, sizeof(*fp)); +} + +/* P2P Power Save Mode Info - 4.31 */ +struct wsm_p2p_ps_modeinfo { + u8 oppPsCTWindow; + u8 count; + u8 reserved; + u8 dtimCount; + __le32 duration; + __le32 interval; + __le32 startTime; +} __packed; + +static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv, + struct wsm_p2p_ps_modeinfo *mi) +{ + return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO, + mi, sizeof(*mi)); +} + +static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv, + struct wsm_p2p_ps_modeinfo *mi) +{ + return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO, + mi, sizeof(*mi)); +} + +/* UseMultiTxConfMessage */ + +static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv, + bool enabled) +{ + __le32 arg = enabled ? __cpu_to_le32(1) : 0; + + return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF, + &arg, sizeof(arg)); +} + + +/* 4.26 SetUpasdInformation */ +struct wsm_uapsd_info { + __le16 uapsd_flags; + __le16 min_auto_trigger_interval; + __le16 max_auto_trigger_interval; + __le16 auto_trigger_step; +}; + +static inline int wsm_set_uapsd_info(struct cw1200_common *priv, + struct wsm_uapsd_info *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION, + arg, sizeof(*arg)); +} + +/* 4.22 OverrideInternalTxRate */ +struct wsm_override_internal_txrate { + u8 internalTxRate; + u8 nonErpInternalTxRate; + u8 reserved[2]; +} __packed; + +static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv, + struct wsm_override_internal_txrate *arg) +{ + return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, + arg, sizeof(*arg)); +} + +/* ******************************************************************** */ +/* WSM TX port control */ + +void wsm_lock_tx(struct cw1200_common *priv); +void wsm_lock_tx_async(struct cw1200_common *priv); +bool wsm_flush_tx(struct cw1200_common *priv); +void wsm_unlock_tx(struct cw1200_common *priv); + +/* ******************************************************************** */ +/* WSM / BH API */ + +int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len); +int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm, + struct sk_buff **skb_p); + +/* ******************************************************************** */ +/* wsm_buf API */ + +struct wsm_buf { + u8 *begin; + u8 *data; + u8 *end; +}; + +void wsm_buf_init(struct wsm_buf *buf); +void wsm_buf_deinit(struct wsm_buf *buf); + +/* ******************************************************************** */ +/* wsm_cmd API */ + +struct wsm_cmd { + spinlock_t lock; /* Protect structure from multiple access */ + int done; + u8 *ptr; + size_t len; + void *arg; + int ret; + u16 cmd; +}; + +/* ******************************************************************** */ +/* WSM TX buffer access */ + +int wsm_get_tx(struct cw1200_common *priv, u8 **data, + size_t *tx_len, int *burst); +void wsm_txed(struct cw1200_common *priv, u8 *data); + +/* ******************************************************************** */ +/* Queue mapping: WSM <---> linux */ +/* Linux: VO VI BE BK */ +/* WSM: BE BK VI VO */ + +static inline u8 wsm_queue_id_to_linux(u8 queue_id) +{ + static const u8 queue_mapping[] = { + 2, 3, 1, 0 + }; + return queue_mapping[queue_id]; +} + +static inline u8 wsm_queue_id_to_wsm(u8 queue_id) +{ + static const u8 queue_mapping[] = { + 3, 2, 0, 1 + }; + return queue_mapping[queue_id]; +} + + +#ifdef CONFIG_CW1200_ETF +int wsm_raw_cmd(struct cw1200_common *priv, u8 *data, size_t len); +#endif + +#endif /* CW1200_HWIO_H_INCLUDED */ diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 15920aaa5dd6..f8ab193009cd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -6242,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, if ((val & 0x0000ff00) != 0) pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); - pci_set_power_state(pci_dev, PCI_D0); - if (!ipw2100_hw_is_adapter_in_system(dev)) { printk(KERN_WARNING DRV_NAME "Device not found via register read.\n"); diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h index 3b6c99400892..048421511988 100644 --- a/drivers/net/wireless/iwlegacy/commands.h +++ b/drivers/net/wireless/iwlegacy/commands.h @@ -1348,14 +1348,6 @@ struct il_rx_mpdu_res_start { #define TX_CMD_SEC_KEY128 0x08 /* - * security overhead sizes - */ -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define CCMP_MIC_LEN 8 -#define TKIP_ICV_LEN 4 - -/* * C_TX = 0x1c (command) */ diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index 48545ab00311..de2c9514bef6 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -76,13 +76,16 @@ #define IWL_INVALID_STATION 255 /* device operations */ -extern struct iwl_lib_ops iwl1000_lib; -extern struct iwl_lib_ops iwl2000_lib; -extern struct iwl_lib_ops iwl2030_lib; -extern struct iwl_lib_ops iwl5000_lib; -extern struct iwl_lib_ops iwl5150_lib; -extern struct iwl_lib_ops iwl6000_lib; -extern struct iwl_lib_ops iwl6030_lib; +extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_105_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg; +extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg; #define TIME_UNIT 1024 @@ -291,8 +294,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) { - return priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist; + return priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist; } #ifdef CONFIG_IWLWIFI_DEBUG diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c index d6c4cf2ad7c5..1b0f0d502568 100644 --- a/drivers/net/wireless/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/iwlwifi/dvm/calib.c @@ -521,7 +521,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); - if (priv->cfg->base_params->hd_v2) { + if (priv->lib->hd_v2) { cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = @@ -895,7 +895,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv, continue; } - delta_g = (priv->cfg->base_params->chain_noise_scale * + delta_g = (priv->lib->chain_noise_scale * ((s32)average_noise[default_chain] - (s32)average_noise[i])) / 1500; @@ -1051,8 +1051,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) return; /* Analyze signal for disconnected antenna */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* Disable disconnected antenna algorithm for advanced bt coex, assuming valid antennas are connected */ data->active_chains = priv->nvm_data->valid_rx_ant; diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h index 95ca026ecc9d..ebdac909f0cd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/iwlwifi/dvm/commands.h @@ -838,10 +838,6 @@ struct iwl_qosparam_cmd { #define STA_MODIFY_DELBA_TID_MSK 0x10 #define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 -/* Receiver address (actually, Rx station's index into station table), - * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ -#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) - /* agn */ struct iwl_keyinfo { __le16 key_flags; @@ -1225,14 +1221,6 @@ struct iwl_rx_mpdu_res_start { #define TX_CMD_SEC_KEY128 0x08 /* - * security overhead sizes - */ -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define CCMP_MIC_LEN 8 -#define TKIP_ICV_LEN 4 - -/* * REPLY_TX = 0x1c (command) */ diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 71ea77576d22..5cd87f949266 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -568,16 +568,61 @@ struct iwl_hw_params { const struct iwl_sensitivity_ranges *sens; }; -struct iwl_lib_ops { - /* set hw dependent parameters */ +/** + * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters + * @advanced_bt_coexist: support advanced bt coexist + * @bt_init_traffic_load: specify initial bt traffic load + * @bt_prio_boost: default bt priority boost value + * @agg_time_limit: maximum number of uSec in aggregation + * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode + */ +struct iwl_dvm_bt_params { + bool advanced_bt_coexist; + u8 bt_init_traffic_load; + u32 bt_prio_boost; + u16 agg_time_limit; + bool bt_sco_disable; + bool bt_session_2; +}; + +/** + * struct iwl_dvm_cfg - DVM firmware specific device configuration + * @set_hw_params: set hardware parameters + * @set_channel_switch: send channel switch command + * @nic_config: apply device specific configuration + * @temperature: read temperature + * @adv_thermal_throttle: support advance thermal throttle + * @support_ct_kill_exit: support ct kill exit condition + * @plcp_delta_threshold: plcp error rate threshold used to trigger + * radio tuning when there is a high receiving plcp error rate + * @chain_noise_scale: default chain noise scale used for gain computation + * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up + * @no_idle_support: do not support idle mode + * @bt_params: pointer to BT parameters + * @need_temp_offset_calib: need to perform temperature offset calibration + * @no_xtal_calib: some devices do not need crystal calibration data, + * don't send it to those + * @temp_offset_v2: support v2 of temperature offset calibration + * @adv_pm: advanced power management + */ +struct iwl_dvm_cfg { void (*set_hw_params)(struct iwl_priv *priv); int (*set_channel_switch)(struct iwl_priv *priv, struct ieee80211_channel_switch *ch_switch); - /* device specific configuration */ void (*nic_config)(struct iwl_priv *priv); - - /* temperature */ void (*temperature)(struct iwl_priv *priv); + + const struct iwl_dvm_bt_params *bt_params; + s32 chain_noise_scale; + u8 plcp_delta_threshold; + bool adv_thermal_throttle; + bool support_ct_kill_exit; + bool hd_v2; + bool no_idle_support; + bool need_temp_offset_calib; + bool no_xtal_calib; + bool temp_offset_v2; + bool adv_pm; }; struct iwl_wipan_noa_data { @@ -610,7 +655,7 @@ struct iwl_priv { struct device *dev; /* for debug prints only */ const struct iwl_cfg *cfg; const struct iwl_fw *fw; - const struct iwl_lib_ops *lib; + const struct iwl_dvm_cfg *lib; unsigned long status; spinlock_t sta_lock; @@ -870,6 +915,9 @@ struct iwl_priv { __le64 replay_ctr; __le16 last_seq_ctl; bool have_rekey_data; +#ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan_support; +#endif /* device_pointers: pointers to ucode event tables */ struct { diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c index c48907c8ab43..352c6cb7b4f1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/iwlwifi/dvm/devices.c @@ -174,10 +174,13 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.sens = &iwl1000_sensitivity; } -struct iwl_lib_ops iwl1000_lib = { +const struct iwl_dvm_cfg iwl_dvm_1000_cfg = { .set_hw_params = iwl1000_hw_set_hw_params, .nic_config = iwl1000_nic_config, .temperature = iwlagn_temperature, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, }; @@ -232,16 +235,56 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.sens = &iwl2000_sensitivity; } -struct iwl_lib_ops iwl2000_lib = { +const struct iwl_dvm_cfg iwl_dvm_2000_cfg = { .set_hw_params = iwl2000_hw_set_hw_params, .nic_config = iwl2000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, }; -struct iwl_lib_ops iwl2030_lib = { +const struct iwl_dvm_cfg iwl_dvm_105_cfg = { .set_hw_params = iwl2000_hw_set_hw_params, .nic_config = iwl2000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, + .adv_pm = true, +}; + +static const struct iwl_dvm_bt_params iwl2030_bt_params = { + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32, + .bt_sco_disable = true, + .bt_session_2 = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_2030_cfg = { + .set_hw_params = iwl2000_hw_set_hw_params, + .nic_config = iwl2000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .hd_v2 = true, + .bt_params = &iwl2030_bt_params, + .need_temp_offset_calib = true, + .temp_offset_v2 = true, + .adv_pm = true, }; /* @@ -420,16 +463,23 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, return iwl_dvm_send_cmd(priv, &hcmd); } -struct iwl_lib_ops iwl5000_lib = { +const struct iwl_dvm_cfg iwl_dvm_5000_cfg = { .set_hw_params = iwl5000_hw_set_hw_params, .set_channel_switch = iwl5000_hw_channel_switch, .temperature = iwlagn_temperature, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .no_idle_support = true, }; -struct iwl_lib_ops iwl5150_lib = { +const struct iwl_dvm_cfg iwl_dvm_5150_cfg = { .set_hw_params = iwl5150_hw_set_hw_params, .set_channel_switch = iwl5000_hw_channel_switch, .temperature = iwl5150_temperature, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .no_idle_support = true, + .no_xtal_calib = true, }; @@ -584,16 +634,59 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, return err; } -struct iwl_lib_ops iwl6000_lib = { +const struct iwl_dvm_cfg iwl_dvm_6000_cfg = { .set_hw_params = iwl6000_hw_set_hw_params, .set_channel_switch = iwl6000_hw_channel_switch, .nic_config = iwl6000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, +}; + +const struct iwl_dvm_cfg iwl_dvm_6005_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .need_temp_offset_calib = true, +}; + +const struct iwl_dvm_cfg iwl_dvm_6050_cfg = { + .set_hw_params = iwl6000_hw_set_hw_params, + .set_channel_switch = iwl6000_hw_channel_switch, + .nic_config = iwl6000_nic_config, + .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1500, +}; + +static const struct iwl_dvm_bt_params iwl6000_bt_params = { + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, + .bt_sco_disable = true, }; -struct iwl_lib_ops iwl6030_lib = { +const struct iwl_dvm_cfg iwl_dvm_6030_cfg = { .set_hw_params = iwl6000_hw_set_hw_params, .set_channel_switch = iwl6000_hw_channel_switch, .nic_config = iwl6000_nic_config, .temperature = iwlagn_temperature, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .bt_params = &iwl6000_bt_params, + .need_temp_offset_calib = true, + .adv_pm = true, }; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 54f553380aa8..9879550a0fea 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -254,23 +254,23 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != sizeof(basic.bt3_lookup_table)); - if (priv->cfg->bt_params) { + if (priv->lib->bt_params) { /* * newer generation of devices (2000 series and newer) * use the version 2 of the bt command * we need to make sure sending the host command * with correct data structure to avoid uCode assert */ - if (priv->cfg->bt_params->bt_session_2) { + if (priv->lib->bt_params->bt_session_2) { bt_cmd_v2.prio_boost = cpu_to_le32( - priv->cfg->bt_params->bt_prio_boost); + priv->lib->bt_params->bt_prio_boost); bt_cmd_v2.tx_prio_boost = 0; bt_cmd_v2.rx_prio_boost = 0; } else { /* older version only has 8 bits */ - WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF); + WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF); bt_cmd_v1.prio_boost = - priv->cfg->bt_params->bt_prio_boost; + priv->lib->bt_params->bt_prio_boost; bt_cmd_v1.tx_prio_boost = 0; bt_cmd_v1.rx_prio_boost = 0; } @@ -330,7 +330,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - if (priv->cfg->bt_params->bt_session_2) { + if (priv->lib->bt_params->bt_session_2) { memcpy(&bt_cmd_v2.basic, &basic, sizeof(basic)); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, @@ -758,8 +758,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv) */ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) { - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* @@ -830,8 +830,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) else active_chains = priv->nvm_data->valid_rx_ant; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && (priv->bt_full_concurrent || priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { /* diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index cab23af0be9e..f10c755d952c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, priv->trans->ops->d3_suspend && priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlwifi_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= + priv->wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE; - hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = + priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + priv->wowlan_support.pattern_min_len = IWLAGN_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = + priv->wowlan_support.pattern_max_len = IWLAGN_WOWLAN_MAX_PATTERN_LEN; + hw->wiphy->wowlan = &priv->wowlan_support; } #endif @@ -426,6 +427,10 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, if (ret) goto error; + /* let the ucode operate on its own */ + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + iwl_trans_d3_suspend(priv->trans); goto out; @@ -509,6 +514,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) goto out_unlock; } + /* uCode is no longer operating by itself */ + iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + base = priv->device_pointers.error_event_table; if (!iwlagn_hw_valid_rtc_data_addr(base)) { IWL_WARN(priv, "Invalid error table during resume!\n"); @@ -1276,8 +1285,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(priv, "enter\n"); mutex_lock(&priv->mutex); - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { if (rssi_event == RSSI_EVENT_LOW) priv->bt_enable_pspoll = true; else if (rssi_event == RSSI_EVENT_HIGH) @@ -1387,7 +1396,7 @@ static int iwl_setup_interface(struct iwl_priv *priv, return err; } - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist && vif->type == NL80211_IFTYPE_ADHOC) { /* * pretend to have high BT traffic as long as we diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 74d7572e7091..68f754659570 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -615,7 +615,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) priv->thermal_throttle.ct_kill_toggle = false; - if (priv->cfg->base_params->support_ct_kill_exit) { + if (priv->lib->support_ct_kill_exit) { adv_cmd.critical_temperature_enter = cpu_to_le32(priv->hw_params.ct_kill_threshold); adv_cmd.critical_temperature_exit = @@ -732,10 +732,10 @@ int iwl_alive_start(struct iwl_priv *priv) } /* download priority table before any calibration request */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* Configure Bluetooth device coexistence support */ - if (priv->cfg->bt_params->bt_sco_disable) + if (priv->lib->bt_params->bt_sco_disable) priv->bt_enable_pspoll = false; else priv->bt_enable_pspoll = true; @@ -873,9 +873,9 @@ void iwl_down(struct iwl_priv *priv) priv->bt_status = 0; priv->cur_rssi_ctx = NULL; priv->bt_is_sco = 0; - if (priv->cfg->bt_params) + if (priv->lib->bt_params) priv->bt_traffic_load = - priv->cfg->bt_params->bt_init_traffic_load; + priv->lib->bt_params->bt_init_traffic_load; else priv->bt_traffic_load = 0; priv->bt_full_concurrent = false; @@ -1058,7 +1058,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) iwl_setup_scan_deferred_work(priv); - if (priv->cfg->bt_params) + if (priv->lib->bt_params) iwlagn_bt_setup_deferred_work(priv); init_timer(&priv->statistics_periodic); @@ -1072,7 +1072,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) void iwl_cancel_deferred_work(struct iwl_priv *priv) { - if (priv->cfg->bt_params) + if (priv->lib->bt_params) iwlagn_bt_cancel_deferred_work(priv); cancel_work_sync(&priv->run_time_calib_work); @@ -1098,8 +1098,7 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->band = IEEE80211_BAND_2GHZ; - priv->plcp_delta_threshold = - priv->cfg->base_params->plcp_delta_threshold; + priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold; priv->iw_mode = NL80211_IFTYPE_STATION; priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; @@ -1116,8 +1115,8 @@ static int iwl_init_drv(struct iwl_priv *priv) iwl_init_scan_params(priv); /* init bt coex */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; @@ -1264,31 +1263,37 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, switch (priv->cfg->device_family) { case IWL_DEVICE_FAMILY_1000: case IWL_DEVICE_FAMILY_100: - priv->lib = &iwl1000_lib; + priv->lib = &iwl_dvm_1000_cfg; break; case IWL_DEVICE_FAMILY_2000: + priv->lib = &iwl_dvm_2000_cfg; + break; case IWL_DEVICE_FAMILY_105: - priv->lib = &iwl2000_lib; + priv->lib = &iwl_dvm_105_cfg; break; case IWL_DEVICE_FAMILY_2030: case IWL_DEVICE_FAMILY_135: - priv->lib = &iwl2030_lib; + priv->lib = &iwl_dvm_2030_cfg; break; case IWL_DEVICE_FAMILY_5000: - priv->lib = &iwl5000_lib; + priv->lib = &iwl_dvm_5000_cfg; break; case IWL_DEVICE_FAMILY_5150: - priv->lib = &iwl5150_lib; + priv->lib = &iwl_dvm_5150_cfg; break; case IWL_DEVICE_FAMILY_6000: - case IWL_DEVICE_FAMILY_6005: case IWL_DEVICE_FAMILY_6000i: + priv->lib = &iwl_dvm_6000_cfg; + break; + case IWL_DEVICE_FAMILY_6005: + priv->lib = &iwl_dvm_6005_cfg; + break; case IWL_DEVICE_FAMILY_6050: case IWL_DEVICE_FAMILY_6150: - priv->lib = &iwl6000_lib; + priv->lib = &iwl_dvm_6050_cfg; break; case IWL_DEVICE_FAMILY_6030: - priv->lib = &iwl6030_lib; + priv->lib = &iwl_dvm_6030_cfg; break; default: break; diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index bd69018d07a9..77cb59712235 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c @@ -163,7 +163,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, u8 skip; u32 slp_itrvl; - if (priv->cfg->adv_pm) { + if (priv->lib->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; @@ -217,7 +217,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { - if (!priv->cfg->bt_params->bt_sco_disable) + if (!priv->lib->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; @@ -293,7 +293,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (!priv->cfg->base_params->no_idle_support && + else if (!priv->lib->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 907bd6e50aad..94314a8e1029 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -1088,7 +1088,7 @@ done: (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); } @@ -3064,11 +3064,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && priv->cfg->bt_params && - priv->cfg->bt_params->agg_time_limit && + if (priv && priv->lib->bt_params && + priv->lib->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(priv->cfg->bt_params->agg_time_limit); + cpu_to_le16(priv->lib->bt_params->agg_time_limit); } static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c index a4eed2055fdb..2f3fd160ab44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -1102,7 +1102,7 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) iwl_notification_wait_init(&priv->notif_wait); /* Set up BT Rx handlers */ - if (priv->cfg->bt_params) + if (priv->lib->bt_params) iwlagn_bt_rx_handler_setup(priv); } diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c index d69b55866714..8c686a5b90ac 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/iwlwifi/dvm/scan.c @@ -801,8 +801,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) * Internal scans are passive, so we can indiscriminately set * the BT ignore flag on 2.4 GHz since it applies to TX only. */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; break; case IEEE80211_BAND_5GHZ: @@ -844,8 +844,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) band = priv->scan_band; if (band == IEEE80211_BAND_2GHZ && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* transmit 2.4 GHz probes only on first antenna */ scan_tx_antennas = first_antenna(scan_tx_antennas); } @@ -873,8 +873,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rx_ant = first_antenna(active_chains); } - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ rx_ant = first_antenna(rx_ant); diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index db183b44e038..c3c13ce96eb0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -735,7 +735,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) memcpy(&lq, priv->stations[i].lq, sizeof(struct iwl_link_quality_cmd)); - if (!memcmp(&lq, &zero_lq, sizeof(lq))) + if (memcmp(&lq, &zero_lq, sizeof(lq))) send_lq = true; } spin_unlock_bh(&priv->sta_lock); diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c index 03f9bc01c0cc..fbeee081ee2f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/iwlwifi/dvm/tt.c @@ -627,7 +627,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); - if (priv->cfg->base_params->adv_thermal_throttle) { + if (priv->lib->adv_thermal_throttle) { IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); tt->restriction = kcalloc(IWL_TI_STATE_MAX, sizeof(struct iwl_tt_restriction), diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index a900aaf47790..353a053b4eb1 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -83,8 +83,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; else if (info->band == IEEE80211_BAND_2GHZ && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc) || skb->protocol == cpu_to_be16(ETH_P_PAE))) @@ -202,8 +202,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, rate_flags |= RATE_MCS_CCK_MSK; /* Set up antennas */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist && priv->bt_full_concurrent) { /* operated as 1x1 in full concurrency mode */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, @@ -986,8 +986,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, * notification again. */ if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); } diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 0a1cdc5e856b..86270b69cd02 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -132,8 +132,8 @@ int iwl_init_alive_start(struct iwl_priv *priv) { int ret; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (priv->lib->bt_params && + priv->lib->bt_params->advanced_bt_coexist) { /* * Tell uCode we are ready to perform calibration * need to perform this before any calibration @@ -155,8 +155,8 @@ int iwl_init_alive_start(struct iwl_priv *priv) * temperature offset calibration is only needed for runtime ucode, * so prepare the value now. */ - if (priv->cfg->need_temp_offset_calib) { - if (priv->cfg->temp_offset_v2) + if (priv->lib->need_temp_offset_calib) { + if (priv->lib->temp_offset_v2) return iwl_set_temperature_offset_calib_v2(priv); else return iwl_set_temperature_offset_calib(priv); @@ -277,7 +277,7 @@ static int iwl_alive_notify(struct iwl_priv *priv) if (ret) return ret; - if (!priv->cfg->no_xtal_calib) { + if (!priv->lib->no_xtal_calib) { ret = iwl_set_Xtal_calib(priv); if (ret) return ret; diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c080ae3070b2..0d2afe098afc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -60,9 +60,6 @@ static const struct iwl_base_params iwl1000_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, .led_compensation = 51, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 128, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index a6ddd2f9fba0..c727ec7c90a6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -72,14 +72,9 @@ static const struct iwl_base_params iwl2000_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 51, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .hd_v2 = true, }; @@ -90,14 +85,9 @@ static const struct iwl_base_params iwl2030_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 57, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ - .hd_v2 = true, }; static const struct iwl_ht_params iwl2000_ht_params = { @@ -106,16 +96,6 @@ static const struct iwl_ht_params iwl2000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ), }; -static const struct iwl_bt_params iwl2030_bt_params = { - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32, - .bt_sco_disable = true, - .bt_session_2 = true, -}; - static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -137,12 +117,10 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .device_family = IWL_DEVICE_FAMILY_2000, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ - .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ - .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ + .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl2000_2bgn_cfg = { @@ -168,12 +146,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -193,10 +167,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ .rx_with_siso_diversity = true const struct iwl_cfg iwl105_bgn_cfg = { @@ -222,12 +193,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .nvm_ver = EEPROM_2000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ - .bt_params = &iwl2030_bt_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .need_temp_offset_calib = true, \ - .temp_offset_v2 = true, \ .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true, \ .rx_with_siso_diversity = true const struct iwl_cfg iwl135_bgn_cfg = { diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 403f3f224bf6..ecc01e1a61a1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -59,11 +59,8 @@ static const struct iwl_base_params iwl5000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .led_compensation = 51, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 512, - .no_idle_support = true, }; static const struct iwl_ht_params iwl5000_ht_params = { @@ -159,7 +156,6 @@ const struct iwl_cfg iwl5350_agn_cfg = { .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ - .no_xtal_calib = true, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index b5ab8d1bcac0..30d45e2fc193 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -82,10 +82,6 @@ static const struct iwl_base_params iwl6000_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 51, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ @@ -98,10 +94,6 @@ static const struct iwl_base_params iwl6050_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_6x50, .shadow_ram_support = true, .led_compensation = 51, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1500, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 1024, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ @@ -114,10 +106,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = { .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 57, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ @@ -129,15 +117,6 @@ static const struct iwl_ht_params iwl6000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; -static const struct iwl_bt_params iwl6000_bt_params = { - /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ - .advanced_bt_coexist = true, - .agg_time_limit = BT_AGG_THRESHOLD_DEF, - .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, - .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, - .bt_sco_disable = true, -}; - static const struct iwl_eeprom_params iwl6000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -163,7 +142,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl6005_2agn_cfg = { @@ -217,11 +195,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ - .bt_params = &iwl6000_bt_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true \ + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -256,11 +231,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .nvm_ver = EEPROM_6030_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ - .bt_params = &iwl6000_bt_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 50263e87fe15..d4f3b4864ab1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -96,13 +96,9 @@ static const struct iwl_base_params iwl7000_base_params = { .pll_cfg_val = 0, .shadow_ram_support = true, .led_compensation = 57, - .adv_thermal_throttle = true, - .support_ct_kill_exit = true, - .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, - .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, - .shadow_reg_enable = false, /* TODO: fix bugs using this feature */ + .shadow_reg_enable = true, }; static const struct iwl_ht_params iwl7000_ht_params = { @@ -118,14 +114,11 @@ static const struct iwl_ht_params iwl7000_ht_params = { .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl7000_base_params, \ - /* TODO: .bt_params? */ \ - .need_temp_offset_calib = true, \ - .led_mode = IWL_LED_RF_STATE, \ - .adv_pm = true \ + .led_mode = IWL_LED_RF_STATE const struct iwl_cfg iwl7260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC7260", + .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, @@ -133,8 +126,44 @@ const struct iwl_cfg iwl7260_2ac_cfg = { .nvm_calib_ver = IWL7260_TX_POWER_VERSION, }; -const struct iwl_cfg iwl3160_ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC3160", +const struct iwl_cfg iwl7260_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl7260_n_cfg = { + .name = "Intel(R) Wireless N 7260", + .fw_name_pre = IWL7260_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL7260_NVM_VERSION, + .nvm_calib_ver = IWL7260_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_2n_cfg = { + .name = "Intel(R) Dual Band Wireless N 3160", + .fw_name_pre = IWL3160_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3160_NVM_VERSION, + .nvm_calib_ver = IWL3160_TX_POWER_VERSION, +}; + +const struct iwl_cfg iwl3160_n_cfg = { + .name = "Intel(R) Wireless N 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index c38aa8f77554..a193832fc790 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -136,17 +136,9 @@ enum iwl_led_mode { * @led_compensation: compensate on the led on/off time per HW according * to the deviation to achieve the desired led frequency. * The detail algorithm is described in iwl-led.c - * @chain_noise_num_beacons: number of beacons used to compute chain noise - * @adv_thermal_throttle: support advance thermal throttle - * @support_ct_kill_exit: support ct kill exit condition - * @plcp_delta_threshold: plcp error rate threshold used to trigger - * radio tuning when there is a high receiving plcp error rate - * @chain_noise_scale: default chain noise scale used for gain computation * @wd_timeout: TX queues watchdog timeout * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadow register support - * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up - * @no_idle_support: do not support idle mode */ struct iwl_base_params { int eeprom_size; @@ -157,31 +149,9 @@ struct iwl_base_params { const u16 max_ll_items; const bool shadow_ram_support; u16 led_compensation; - bool adv_thermal_throttle; - bool support_ct_kill_exit; - u8 plcp_delta_threshold; - s32 chain_noise_scale; unsigned int wd_timeout; u32 max_event_log_size; const bool shadow_reg_enable; - const bool hd_v2; - const bool no_idle_support; -}; - -/* - * @advanced_bt_coexist: support advanced bt coexist - * @bt_init_traffic_load: specify initial bt traffic load - * @bt_prio_boost: default bt priority boost value - * @agg_time_limit: maximum number of uSec in aggregation - * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode - */ -struct iwl_bt_params { - bool advanced_bt_coexist; - u8 bt_init_traffic_load; - u32 bt_prio_boost; - u16 agg_time_limit; - bool bt_sco_disable; - bool bt_session_2; }; /* @@ -231,16 +201,10 @@ struct iwl_eeprom_params { * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops * @base_params: pointer to basic parameters - * @ht_params: point to ht patameters - * @bt_params: pointer to bt parameters - * @need_temp_offset_calib: need to perform temperature offset calibration - * @no_xtal_calib: some devices do not need crystal calibration data, - * don't send it to those + * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) - * @adv_pm: advance power management * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device - * @temp_offset_v2: support v2 of temperature offset calibration * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -264,15 +228,10 @@ struct iwl_cfg { const struct iwl_base_params *base_params; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; - const struct iwl_bt_params *bt_params; const struct iwl_eeprom_params *eeprom_params; - const bool need_temp_offset_calib; /* if used set to true */ - const bool no_xtal_calib; enum iwl_led_mode led_mode; - const bool adv_pm; const bool rx_with_siso_diversity; const bool internal_wimax_coex; - const bool temp_offset_v2; }; /* @@ -320,6 +279,10 @@ extern const struct iwl_cfg iwl105_bgn_cfg; extern const struct iwl_cfg iwl105_bgn_d_cfg; extern const struct iwl_cfg iwl135_bgn_cfg; extern const struct iwl_cfg iwl7260_2ac_cfg; -extern const struct iwl_cfg iwl3160_ac_cfg; +extern const struct iwl_cfg iwl7260_2n_cfg; +extern const struct iwl_cfg iwl7260_n_cfg; +extern const struct iwl_cfg iwl3160_2ac_cfg; +extern const struct iwl_cfg iwl3160_2n_cfg; +extern const struct iwl_cfg iwl3160_n_cfg; #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 20e845d4da04..a276af476e2d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -472,4 +472,23 @@ #define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) #define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) +/***************************************************************************** + * 7000/3000 series SHR DTS addresses * + *****************************************************************************/ + +/* Diode Results Register Structure: */ +enum dtd_diode_reg { + DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ + DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ + DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ + DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ + DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ + DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ +/* Those are the masks INSIDE the flags bit-field: */ + DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, + DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ + DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, + DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ +}; + #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 39aad9893e0b..4f886133639a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1234,6 +1234,9 @@ MODULE_PARM_DESC(wd_disable, "Disable stuck queue watchdog timer 0=system default, " "1=disable, 2=enable (default: 0)"); +module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); +MODULE_PARM_DESC(nvm_file, "NVM file name"); + /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 600c9fdd7f71..4c887f365908 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -732,17 +732,16 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band) + enum ieee80211_band band, + u8 tx_chains, u8 rx_chains) { int max_bit_rate = 0; - u8 rx_chains; - u8 tx_chains; - tx_chains = hweight8(data->valid_tx_ant); + tx_chains = hweight8(tx_chains); if (cfg->rx_with_siso_diversity) rx_chains = 1; else - rx_chains = hweight8(data->valid_rx_ant); + rx_chains = hweight8(rx_chains); if (!(data->sku_cap_11n_enable) || !cfg->ht_params) { ht_info->ht_supported = false; @@ -806,7 +805,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + data->valid_tx_ant, data->valid_rx_ant); sband = &data->bands[IEEE80211_BAND_5GHZ]; sband->band = IEEE80211_BAND_5GHZ; @@ -814,7 +814,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + data->valid_tx_ant, data->valid_rx_ant); if (n_channels != n_used) IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n", diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h index 37f115390b19..d73304a23ec2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h @@ -133,6 +133,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, - enum ieee80211_band band); + enum ieee80211_band band, + u8 tx_chains, u8 rx_chains); #endif /* __iwl_eeprom_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index c4c446d41eb0..f844d5c748c0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -106,11 +106,14 @@ enum iwl_ucode_type { /* * enumeration of ucode section. - * This enumeration is used for legacy tlv style (before 16.0 uCode). + * This enumeration is used directly for older firmware (before 16.0). + * For new firmware, there can be up to 4 sections (see below) but the + * first one packaged into the firmware file is the DATA section and + * some debugging code accesses that. */ enum iwl_ucode_sec { - IWL_UCODE_SECTION_INST, IWL_UCODE_SECTION_DATA, + IWL_UCODE_SECTION_INST, }; /* * For 16.0 uCode and above, there is no differentiation between sections, diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h index d6f6c37c09fd..36dfe0919f6b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -119,6 +119,7 @@ struct iwl_mod_params { int ant_coupling; bool bt_ch_announce; bool auto_agg; + char *nvm_file; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 6199a0a597a6..acd2665afb8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -89,6 +89,7 @@ enum nvm_sku_bits { NVM_SKU_CAP_BAND_24GHZ = BIT(0), NVM_SKU_CAP_BAND_52GHZ = BIT(1), NVM_SKU_CAP_11N_ENABLE = BIT(2), + NVM_SKU_CAP_11AC_ENABLE = BIT(3), }; /* radio config bits (actual values from NVM definition) */ @@ -258,8 +259,6 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, struct ieee80211_sta_vht_cap *vht_cap) { - /* For now, assume new devices with NVM are VHT capable */ - vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | @@ -292,7 +291,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, } static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_sw) + struct iwl_nvm_data *data, const __le16 *nvm_sw, + bool enable_vht, u8 tx_chains, u8 rx_chains) { int n_channels = iwl_init_channel_map(dev, cfg, data, &nvm_sw[NVM_CHANNELS]); @@ -305,7 +305,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_2GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ, + tx_chains, rx_chains); sband = &data->bands[IEEE80211_BAND_5GHZ]; sband->band = IEEE80211_BAND_5GHZ; @@ -313,8 +314,10 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, IEEE80211_BAND_5GHZ); - iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); - iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap); + iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, + tx_chains, rx_chains); + if (enable_vht) + iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap); if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", @@ -324,7 +327,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, - const __le16 *nvm_calib) + const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; u8 hw_addr[ETH_ALEN]; @@ -380,7 +383,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; - iwl_init_sbands(dev, cfg, data, nvm_sw); + iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE, + tx_chains, rx_chains); data->calib_version = 255; /* TODO: this value will prevent some checks from diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h index e57fb989661e..3325059c52d4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h @@ -75,6 +75,6 @@ struct iwl_nvm_data * iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, - const __le16 *nvm_calib); + const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 386f2a7c87cb..ff8cc75c189d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -100,6 +100,18 @@ /* Device system time */ #define DEVICE_SYSTEM_TIME_REG 0xA0206C +/***************************************************************************** + * 7000/3000 series SHR DTS addresses * + *****************************************************************************/ + +#define SHR_MISC_WFM_DTS_EN (0x00a10024) +#define DTSC_CFG_MODE (0x00a10604) +#define DTSC_VREF_AVG (0x00a10648) +#define DTSC_VREF5_AVG (0x00a1064c) +#define DTSC_CFG_MODE_PERIODIC (0x2) +#define DTSC_PTAT_AVG (0x00a10650) + + /** * Tx Scheduler * diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 7a13790b5bfe..84f1c8dc9741 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -189,7 +189,8 @@ enum CMD_MODE { CMD_SYNC = 0, CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), - CMD_ON_DEMAND = BIT(2), + CMD_SEND_IN_RFKILL = BIT(2), + CMD_ON_DEMAND = BIT(3), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -455,7 +456,7 @@ struct iwl_trans_ops { int (*read_mem)(struct iwl_trans *trans, u32 addr, void *buf, int dwords); int (*write_mem)(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); + const void *buf, int dwords); void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); @@ -761,7 +762,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) } static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) + const void *buf, int dwords) { return trans->ops->write_mem(trans, addr, buf, dwords); } diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile index 2acc44b40986..ff856e543ae8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -3,7 +3,7 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o iwlmvm-y += scan.o time-event.o rs.o iwlmvm-y += power.o bt-coex.o -iwlmvm-y += led.o +iwlmvm-y += led.o tt.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o iwlmvm-$(CONFIG_PM_SLEEP) += d3.o diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 810bfa5f6de0..f03655f303aa 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -351,6 +351,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, enum ieee80211_band band; int ave_rssi; + lockdep_assert_held(&mvm->mutex); if (vif->type != NL80211_IFTYPE_STATION) return; @@ -365,7 +366,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, smps_mode = IEEE80211_SMPS_AUTOMATIC; if (band != IEEE80211_BAND_2GHZ) { - ieee80211_request_smps(vif, smps_mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, + smps_mode); return; } @@ -380,7 +382,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, mvmvif->id, data->notif->bt_status, data->notif->bt_traffic_load, smps_mode); - ieee80211_request_smps(vif, smps_mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode); /* don't reduce the Tx power if in loose scheme */ if (is_loose_coex()) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 16bbdcc8627a..4d3c978b5c76 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1007,6 +1007,10 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) if (ret) goto out; + ret = iwl_mvm_power_update_mode(mvm, vif); + if (ret) + goto out; + /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC, sizeof(d3_cfg_cmd), &d3_cfg_cmd); @@ -1214,6 +1218,26 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, iwl_free_resp(&cmd); } +static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) +{ +#ifdef CONFIG_IWLWIFI_DEBUGFS + const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN]; + u32 len = img->sec[IWL_UCODE_SECTION_DATA].len; + u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset; + + if (!mvm->store_d3_resume_sram) + return; + + if (!mvm->d3_resume_sram) { + mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL); + if (!mvm->d3_resume_sram) + return; + } + + iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len); +#endif +} + int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1245,6 +1269,9 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) goto out_unlock; } + /* query SRAM first in case we want event logging */ + iwl_mvm_read_d3_sram(mvm); + iwl_mvm_query_wakeup_reasons(mvm, vif); out_unlock: diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 2053dccefcd6..69e0806075a2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -145,15 +145,18 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, char *buf; u8 *ptr; + if (!mvm->ucode_loaded) + return -EINVAL; + /* default is to dump the entire data segment */ if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) { - mvm->dbgfs_sram_offset = 0x800000; - if (!mvm->ucode_loaded) - return -EINVAL; img = &mvm->fw->img[mvm->cur_ucode]; - mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + } else { + ofs = mvm->dbgfs_sram_offset; + len = mvm->dbgfs_sram_len; } - len = mvm->dbgfs_sram_len; bufsz = len * 4 + 256; buf = kzalloc(bufsz, GFP_KERNEL); @@ -167,12 +170,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, } pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len); - pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", - mvm->dbgfs_sram_offset); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs); - iwl_trans_read_mem_bytes(mvm->trans, - mvm->dbgfs_sram_offset, - ptr, len); + iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); for (ofs = 0; ofs < len; ofs += 16) { pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs); hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, @@ -300,6 +300,146 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file, return count; } +static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_dbgfs_pm_mask param, int val) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; + + dbgfs_pm->mask |= param; + + switch (param) { + case MVM_DEBUGFS_PM_KEEP_ALIVE: { + struct ieee80211_hw *hw = mvm->hw; + int dtimper = hw->conf.ps_dtim_period ?: 1; + int dtimper_msec = dtimper * vif->bss_conf.beacon_int; + + IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); + if (val * MSEC_PER_SEC < 3 * dtimper_msec) { + IWL_WARN(mvm, + "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", + val * MSEC_PER_SEC, 3 * dtimper_msec); + } + dbgfs_pm->keep_alive_seconds = val; + break; + } + case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: + IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", + val ? "enabled" : "disabled"); + dbgfs_pm->skip_over_dtim = val; + break; + case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: + IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); + dbgfs_pm->skip_dtim_periods = val; + break; + case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); + dbgfs_pm->rx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); + dbgfs_pm->tx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_DISABLE_POWER_OFF: + IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val); + dbgfs_pm->disable_power_off = val; + break; + } +} + +static ssize_t iwl_dbgfs_pm_params_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->dbgfs_data; + enum iwl_dbgfs_pm_mask param; + char buf[32] = {}; + int val; + int ret; + + if (copy_from_user(buf, user_buf, sizeof(buf))) + return -EFAULT; + + if (!strncmp("keep_alive=", buf, 11)) { + if (sscanf(buf + 11, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_KEEP_ALIVE; + } else if (!strncmp("skip_over_dtim=", buf, 15)) { + if (sscanf(buf + 15, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; + } else if (!strncmp("skip_dtim_periods=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; + } else if (!strncmp("rx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; + } else if (!strncmp("tx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; + } else if (!strncmp("disable_power_off=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_pm(mvm, vif, param, val); + ret = iwl_mvm_power_update_mode(mvm, vif); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_pm_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->dbgfs_data; + struct iwl_powertable_cmd cmd = {}; + char buf[256]; + int bufsz = sizeof(buf); + int pos = 0; + + iwl_mvm_power_build_cmd(mvm, vif, &cmd); + + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? + 0 : 1); + pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", + le32_to_cpu(cmd.skip_dtim_periods)); + pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", + iwlmvm_mod_params.power_scheme); + pos += scnprintf(buf+pos, bufsz-pos, "flags = %d\n", + le16_to_cpu(cmd.flags)); + pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", + cmd.keep_alive_seconds); + + if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { + pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", + (cmd.flags & + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? + 1 : 0); + pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", + le32_to_cpu(cmd.rx_data_timeout)); + pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", + le32_to_cpu(cmd.tx_data_timeout)); + } + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_mac_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -481,6 +621,255 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, return count; } +static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, + enum iwl_dbgfs_bf_mask param, int value) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + dbgfs_bf->mask |= param; + + switch (param) { + case MVM_DEBUGFS_BF_ENERGY_DELTA: + dbgfs_bf->bf_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: + dbgfs_bf->bf_roaming_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_STATE: + dbgfs_bf->bf_roaming_state = value; + break; + case MVM_DEBUGFS_BF_TEMPERATURE_DELTA: + dbgfs_bf->bf_temperature_delta = value; + break; + case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: + dbgfs_bf->bf_enable_beacon_filter = value; + break; + case MVM_DEBUGFS_BF_DEBUG_FLAG: + dbgfs_bf->bf_debug_flag = value; + break; + case MVM_DEBUGFS_BF_ESCAPE_TIMER: + dbgfs_bf->bf_escape_timer = value; + break; + case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: + dbgfs_bf->ba_enable_beacon_abort = value; + break; + case MVM_DEBUGFS_BA_ESCAPE_TIMER: + dbgfs_bf->ba_escape_timer = value; + break; + } +} + +static ssize_t iwl_dbgfs_bf_params_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->dbgfs_data; + enum iwl_dbgfs_bf_mask param; + char buf[256]; + int buf_size; + int value; + int ret = 0; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (!strncmp("bf_energy_delta=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ENERGY_DELTA_MIN || + value > IWL_BF_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || + value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_state=", buf, 17)) { + if (sscanf(buf+17, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_STATE_MIN || + value > IWL_BF_ROAMING_STATE_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_STATE; + } else if (!strncmp("bf_temperature_delta=", buf, 21)) { + if (sscanf(buf+21, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMPERATURE_DELTA_MIN || + value > IWL_BF_TEMPERATURE_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA; + } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; + } else if (!strncmp("bf_debug_flag=", buf, 14)) { + if (sscanf(buf+14, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_DEBUG_FLAG; + } else if (!strncmp("bf_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ESCAPE_TIMER_MIN || + value > IWL_BF_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ESCAPE_TIMER; + } else if (!strncmp("ba_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BA_ESCAPE_TIMER_MIN || + value > IWL_BA_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BA_ESCAPE_TIMER; + } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { + if (sscanf(buf+23, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_bf(vif, param, value); + if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) { + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + } else { + if (mvmvif->bf_enabled) + ret = iwl_mvm_enable_beacon_filter(mvm, vif); + else + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + } + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_bf_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_beacon_filter_cmd cmd = { + .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, + .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, + .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, + .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, + .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT, + .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), + .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT), + .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT, + }; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + if (mvmvif->bf_enabled) + cmd.bf_enable_beacon_filter = 1; + else + cmd.bf_enable_beacon_filter = 0; + + pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", + cmd.bf_energy_delta); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", + cmd.bf_roaming_energy_delta); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", + cmd.bf_roaming_state); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n", + cmd.bf_temperature_delta); + pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", + cmd.bf_enable_beacon_filter); + pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", + cmd.bf_debug_flag); + pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", + cmd.bf_escape_timer); + pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", + cmd.ba_escape_timer); + pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", + cmd.ba_enable_beacon_abort); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +#ifdef CONFIG_PM_SLEEP +static ssize_t iwl_dbgfs_d3_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[8] = {}; + int store; + + if (copy_from_user(buf, user_buf, sizeof(buf))) + return -EFAULT; + + if (sscanf(buf, "%d", &store) != 1) + return -EINVAL; + + mvm->store_d3_resume_sram = store; + + return count; +} + +static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + const struct fw_img *img; + int ofs, len, pos = 0; + size_t bufsz, ret; + char *buf; + u8 *ptr = mvm->d3_resume_sram; + + img = &mvm->fw->img[IWL_UCODE_WOWLAN]; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + + bufsz = len * 4 + 256; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n", + mvm->store_d3_resume_sram ? "en" : "dis"); + + if (ptr) { + for (ofs = 0; ofs < len; ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, + bufsz - pos, false); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "(no data captured)\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + + kfree(buf); + + return ret; +} +#endif + #define MVM_DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ @@ -524,9 +913,14 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); +#ifdef CONFIG_PM_SLEEP +MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram); +#endif /* Interface specific debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(mac_params); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params); int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { @@ -542,6 +936,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR); +#ifdef CONFIG_PM_SLEEP + MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR); +#endif /* * Create a symlink with mac80211. It will be removed when mac80211 @@ -577,9 +974,19 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return; } + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p) + MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | + S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); + if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + mvmvif == mvm->bf_allowed_vif) + MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index 51e015d1dfb2..6f8b2c16ae17 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags { * struct iwl_d3_manager_config - D3 manager configuration command * @min_sleep_time: minimum sleep time (in usec) * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags + * @wakeup_host_timer: force wakeup after this many seconds * * The structure is used for the D3_CONFIG_CMD command. */ struct iwl_d3_manager_config { __le32 min_sleep_time; __le32 wakeup_flags; -} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */ + __le32 wakeup_host_timer; +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 81fe45f46be7..d8e19290b0f3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -101,20 +101,107 @@ enum iwl_power_flags { * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @sleep_interval: not in use - * @keep_alive_beacons: not in use + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm */ struct iwl_powertable_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_5 */ + /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; u8 keep_alive_seconds; u8 debug_flags; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; - __le32 keep_alive_beacons; + __le32 skip_dtim_periods; __le32 lprx_rssi_threshold; } __packed; +/** + * struct iwl_beacon_filter_cmd + * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) + * @id_and_color: MAC contex identifier + * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon + * to driver if delta in Energy values calculated for this and last + * passed beacon is greater than this threshold. Zero value means that + * the Energy change is ignored for beacon filtering, and beacon will + * not be forced to be sent to driver regardless of this delta. Typical + * energy delta 5dB. + * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. + * Send beacon to driver if delta in Energy values calculated for this + * and last passed beacon is greater than this threshold. Zero value + * means that the Energy change is ignored for beacon filtering while in + * Roaming state, typical energy delta 1dB. + * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values + * calculated for current beacon is less than the threshold, use + * Roaming Energy Delta Threshold, otherwise use normal Energy Delta + * Threshold. Typical energy threshold is -72dBm. + * @bf_temperature_delta: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature changeis ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. + * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. + * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed + * for a specific period of time. Units: Beacons. + * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed + * for a longer period of time then this escape-timeout. Units: Beacons. + * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. + */ +struct iwl_beacon_filter_cmd { + u8 bf_energy_delta; + u8 bf_roaming_energy_delta; + u8 bf_roaming_state; + u8 bf_temperature_delta; + u8 bf_enable_beacon_filter; + u8 bf_debug_flag; + __le16 reserved1; + __le32 bf_escape_timer; + __le32 ba_escape_timer; + u8 ba_enable_beacon_abort; + u8 reserved2[3]; +} __packed; + +/* Beacon filtering and beacon abort */ +#define IWL_BF_ENERGY_DELTA_DEFAULT 5 +#define IWL_BF_ENERGY_DELTA_MAX 255 +#define IWL_BF_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 +#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 +#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_STATE_DEFAULT 72 +#define IWL_BF_ROAMING_STATE_MAX 255 +#define IWL_BF_ROAMING_STATE_MIN 0 + +#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5 +#define IWL_BF_TEMPERATURE_DELTA_MAX 255 +#define IWL_BF_TEMPERATURE_DELTA_MIN 0 + +#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 + +#define IWL_BF_DEBUG_FLAG_DEFAULT 0 + +#define IWL_BF_ESCAPE_TIMER_DEFAULT 50 +#define IWL_BF_ESCAPE_TIMER_MAX 1024 +#define IWL_BF_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ESCAPE_TIMER_DEFAULT 3 +#define IWL_BA_ESCAPE_TIMER_MAX 1024 +#define IWL_BA_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 + +#define IWL_BF_CMD_CONFIG_DEFAULTS \ + .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \ + .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \ + .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \ + .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \ + .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \ + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \ + .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT) + #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index 007a93b25bd7..6994232f5726 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -134,6 +134,7 @@ enum iwl_tx_flags { #define TX_CMD_SEC_WEP 0x01 #define TX_CMD_SEC_CCM 0x02 #define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x07 #define TX_CMD_SEC_WEP_KEY_IDX_POS 6 #define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 #define TX_CMD_SEC_KEY128 0x08 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 191dcae8ba47..cbfb3beae783 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -139,6 +139,9 @@ enum { /* Power */ POWER_TABLE_CMD = 0x77, + /* Thermal Throttling*/ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, + /* Scanning */ SCAN_REQUEST_CMD = 0x80, SCAN_ABORT_CMD = 0x81, @@ -161,6 +164,8 @@ enum { CARD_STATE_CMD = 0xa0, CARD_STATE_NOTIFICATION = 0xa1, + MISSED_BEACONS_NOTIFICATION = 0xa2, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, BA_NOTIF = 0xc5, @@ -170,9 +175,13 @@ enum { BT_COEX_PROT_ENV = 0xcd, BT_PROFILE_NOTIFICATION = 0xce, + REPLY_BEACON_FILTERING_CMD = 0xd2, + REPLY_DEBUG_CMD = 0xf0, DEBUG_LOG_MSG = 0xf7, + MCAST_FILTER_CMD = 0xd0, + /* D3 commands/notifications */ D3_CONFIG_CMD = 0xd3, PROT_OFFLOAD_CONFIG_CMD = 0xd4, @@ -936,6 +945,24 @@ struct iwl_card_state_notif { } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ /** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @mac_id: interface ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: + * @num_recvd_beacons: + */ +struct iwl_missed_beacons_notif { + __le32 mac_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ + +/** * struct iwl_set_calib_default_cmd - set default value for calibration. * ( SET_CALIB_DEFAULT_CMD = 0x8e ) * @calib_index: the calibration to set value for @@ -948,4 +975,237 @@ struct iwl_set_calib_default_cmd { u8 data[0]; } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ +#define MAX_PORT_ID_NUM 2 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + +struct mvm_statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ + +struct mvm_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 rssi_ant; + __le32 reserved2; +} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ + +struct mvm_statistics_general_common { + __le32 temperature; /* radio temperature */ + __le32 temperature_m; /* radio voltage */ + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; +} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */ + +struct mvm_statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; + __le32 directed_data_mpdu; +} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ + +struct mvm_statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved; +} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ + +struct mvm_statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ + +#define MAX_CHAINS 3 + +struct mvm_statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __s8 txpower[MAX_CHAINS]; + __s8 reserved; + __le32 reserved2; +} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ + +struct mvm_statistics_tx_channel_width { + __le32 ext_cca_narrow_ch20[1]; + __le32 ext_cca_narrow_ch40[2]; + __le32 ext_cca_narrow_ch80[3]; + __le32 ext_cca_narrow_ch160[4]; + __le32 last_tx_ch_width_indx; + __le32 rx_detected_per_ch_width[4]; + __le32 success_per_ch_width[4]; + __le32 fail_per_ch_width[4]; +}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ + +struct mvm_statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +} __packed; /* STATISTICS_TX_API_S_VER_4 */ + + +struct mvm_statistics_bt_activity { + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ + +struct mvm_statistics_general { + struct mvm_statistics_general_common common; + __le32 beacon_filtered; + __le32 missed_beacons; + __s8 beacon_filter_everage_energy; + __s8 beacon_filter_reason; + __s8 beacon_filter_current_energy; + __s8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; +} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */ + +struct mvm_statistics_rx { + struct mvm_statistics_rx_phy ofdm; + struct mvm_statistics_rx_phy cck; + struct mvm_statistics_rx_non_phy general; + struct mvm_statistics_rx_ht_phy ofdm_ht; +} __packed; /* STATISTICS_RX_API_S_VER_3 */ + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ + +struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */ + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general general; +} __packed; + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index e18c92dd60ec..cd7c0032cc58 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -326,6 +326,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); WARN_ON(ret); + /* + * abort after reading the nvm in case RF Kill is on, we will complete + * the init seq later when RF kill will switch to off + */ + if (iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_RF_KILL(mvm, + "jump over all phy activities due to RF kill\n"); + iwl_remove_notification(&mvm->notif_wait, &calib_wait); + return 1; + } + /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw)); if (ret) @@ -388,6 +399,8 @@ out: int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; + struct ieee80211_channel *chan; + struct cfg80211_chan_def chandef; lockdep_assert_held(&mvm->mutex); @@ -400,8 +413,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_run_init_mvm_ucode(mvm, false); if (ret && !iwlmvm_mod_params.init_dbg) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); + /* this can't happen */ + if (WARN_ON(ret > 0)) + ret = -ERFKILL; goto error; } + /* should stop & start HW since that INIT image just loaded */ + iwl_trans_stop_hw(mvm->trans, false); + ret = iwl_trans_start_hw(mvm->trans); + if (ret) + return ret; } if (iwlmvm_mod_params.init_dbg) @@ -443,8 +464,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; - IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); + /* Add all the PHY contexts */ + chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + for (i = 0; i < NUM_PHY_CTX; i++) { + /* + * The channel used here isn't relevant as it's + * going to be overwritten in the other flows. + * For now use the first channel we have. + */ + ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], + &chandef, 1, 1); + if (ret) + goto error; + } + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: iwl_trans_stop_device(mvm->trans); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index e6eca4d66f6c..b4e8e597d2b7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -227,7 +227,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, .found_vif = false, }; u32 ac; - int ret; + int ret, i; /* * Allocate a MAC ID and a TSF for this MAC, along with the queues @@ -335,6 +335,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT; mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) + mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; + return 0; exit_fail: @@ -586,10 +589,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, */ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mac_data_sta *ctxt_sta) + struct iwl_mac_data_sta *ctxt_sta, + bool force_assoc_off) { /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && + !force_assoc_off) { u32 dtim_offs; /* @@ -659,7 +664,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta, + action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -677,7 +683,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta, + action == FW_CTXT_ACTION_ADD); cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); @@ -1043,3 +1050,28 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, rate); return 0; } + +static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + u16 *id = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->id == *id) + ieee80211_beacon_loss(vif); +} + +int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data; + u16 id = (u16)le32_to_cpu(missed_beacons->mac_id); + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_beacon_loss_iterator, + &id); + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index dd158ec571fb..c942eb0bbbeb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -127,6 +127,17 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = { }; #endif +static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) +{ + int i; + + memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); + for (i = 0; i < NUM_PHY_CTX; i++) { + mvm->phy_ctxts[i].id = i; + mvm->phy_ctxts[i].ref = 0; + } +} + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -141,7 +152,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | IEEE80211_HW_AMPDU_AGGREGATION | - IEEE80211_HW_TIMING_BEACON_ONLY; + IEEE80211_HW_TIMING_BEACON_ONLY | + IEEE80211_HW_CONNECTION_MONITOR; hw->queues = IWL_MVM_FIRST_AGG_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; @@ -158,7 +170,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); - hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt); + hw->chanctx_data_size = sizeof(u16); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -193,6 +205,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->n_addresses++; } + iwl_mvm_reset_phy_ctxts(mvm); + /* we create the 802.11 header and a max-length SSID element */ hw->wiphy->max_scan_ie_len = mvm->fw->ucode_capa.max_probe_length - 24 - 34; @@ -222,20 +236,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | - WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_EAP_IDENTITY_REQ | - WIPHY_WOWLAN_RFKILL_RELEASE; + mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; if (!iwlwifi_mod_params.sw_crypto) - hw->wiphy->wowlan.flags |= - WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | - WIPHY_WOWLAN_GTK_REKEY_FAILURE | - WIPHY_WOWLAN_4WAY_HANDSHAKE; - - hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; - hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; - hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; - hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + + mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; + mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; + mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support; + hw->wiphy->wowlan = &mvm->wowlan; } #endif @@ -252,8 +266,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) { - IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n"); + if (iwl_mvm_is_radio_killed(mvm)) { + IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } @@ -345,8 +359,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvmvif->phy_ctxt = NULL; + mvmvif->phy_ctxt = NULL; } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) @@ -363,6 +376,9 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_cleanup_iterator, mvm); + mvm->p2p_device_vif = NULL; + + iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); @@ -456,6 +472,20 @@ static void iwl_mvm_power_update_iterator(void *data, u8 *mac, iwl_mvm_power_update_mode(mvm, vif); } +static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) +{ + u16 i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < NUM_PHY_CTX; i++) + if (!mvm->phy_ctxts[i].ref) + return &mvm->phy_ctxts[i]; + + IWL_ERR(mvm, "No available PHY context\n"); + return NULL; +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -530,32 +560,34 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ iwl_mvm_power_update_mode(mvm, vif); + /* beacon filtering */ + if (!mvm->bf_allowed_vif && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p){ + mvm->bf_allowed_vif = mvmvif; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + } + + ret = iwl_mvm_disable_beacon_filter(mvm, vif); + if (ret) + goto out_release; + /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - struct ieee80211_channel *chan; - struct cfg80211_chan_def chandef; - - mvmvif->phy_ctxt = &mvm->phy_ctxt_roc; - /* - * The channel used here isn't relevant as it's - * going to be overwritten as part of the ROC flow. - * For now use the first channel we have. - */ - chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0]; - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); - ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, - &chandef, 1, 1); - if (ret) + mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->phy_ctxt) { + ret = -ENOSPC; goto out_remove_mac; + } + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) - goto out_remove_phy; + goto out_unref_phy; ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta); if (ret) @@ -571,8 +603,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); - out_remove_phy: - iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); + out_unref_phy: + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); @@ -646,6 +678,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + } + iwl_mvm_vif_dbgfs_clean(mvm, vif); /* @@ -661,7 +698,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->p2p_device_vif = NULL; iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta); iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; } @@ -701,6 +738,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, *total_flags = 0; } +static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mcast_filter_cmd mcast_filter_cmd = { + .pass_all = 1, + }; + + memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN); + + return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, + sizeof(mcast_filter_cmd), + &mcast_filter_cmd); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -722,6 +773,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, return; } iwl_mvm_bt_coex_vif_assoc(mvm, vif); + iwl_mvm_configure_mcast_filter(mvm, vif); } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* remove AP station now that the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); @@ -733,7 +785,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update quotas\n"); } - } else if (changes & BSS_CHANGED_DTIM_PERIOD) { + } else if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon _after_ association so * remove the session protection. @@ -931,7 +983,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvmsta->pending_frames) > 0) + if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) ieee80211_sta_block_awake(hw, sta, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx @@ -984,9 +1036,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, mvmvif->phy_ctxt->channel->band); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif)); ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* disable beacon filtering */ + WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif)); ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { @@ -1152,29 +1208,107 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; - int ret; + struct iwl_mvm_phy_ctxt *phy_ctxt; + int ret, i; + + IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, + duration, type); if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type); return -EINVAL; } - IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, - duration, type); - mutex_lock(&mvm->mutex); + for (i = 0; i < NUM_PHY_CTX; i++) { + phy_ctxt = &mvm->phy_ctxts[i]; + if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) + continue; + + if (phy_ctxt->ref && channel == phy_ctxt->channel) { + /* + * Unbind the P2P_DEVICE from the current PHY context, + * and if the PHY context is not used remove it. + */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + + /* Bind the P2P_DEVICE to the current PHY Context */ + mvmvif->phy_ctxt = phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + goto schedule_time_event; + } + } + + /* Need to update the PHY context only if the ROC channel changed */ + if (channel == mvmvif->phy_ctxt->channel) + goto schedule_time_event; + cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); - ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc, - &chandef, 1, 1); + /* + * Change the PHY context configuration as it is currently referenced + * only by the P2P Device MAC + */ + if (mvmvif->phy_ctxt->ref == 1) { + ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, + &chandef, 1, 1); + if (ret) + goto out_unlock; + } else { + /* + * The PHY context is shared with other MACs. Need to remove the + * P2P Device from the binding, allocate an new PHY context and + * create a new binding + */ + phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!phy_ctxt) { + ret = -ENOSPC; + goto out_unlock; + } + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, + 1, 1); + if (ret) { + IWL_ERR(mvm, "Failed to change PHY context\n"); + goto out_unlock; + } + + /* Unbind the P2P_DEVICE from the current PHY context */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + + /* Bind the P2P_DEVICE to the new allocated PHY context */ + mvmvif->phy_ctxt = phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + if (WARN(ret, "Failed binding P2P_DEVICE\n")) + goto out_unlock; + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + } + +schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); +out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); - return ret; } @@ -1196,15 +1330,30 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt; int ret; + IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); + mutex_lock(&mvm->mutex); + phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!phy_ctxt) { + ret = -ENOSPC; + goto out; + } + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, + ctx->rx_chains_static, + ctx->rx_chains_dynamic); + if (ret) { + IWL_ERR(mvm, "Failed to add PHY context\n"); + goto out; + } - IWL_DEBUG_MAC80211(mvm, "Add PHY context\n"); - ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def, - ctx->rx_chains_static, - ctx->rx_chains_dynamic); + iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); + *phy_ctxt_id = phy_ctxt->id; +out: mutex_unlock(&mvm->mutex); return ret; } @@ -1213,10 +1362,11 @@ static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; mutex_lock(&mvm->mutex); - iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt); + iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); mutex_unlock(&mvm->mutex); } @@ -1225,7 +1375,16 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + + if (WARN_ONCE((phy_ctxt->ref > 1) && + (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | + IEEE80211_CHANCTX_CHANGE_RX_CHAINS | + IEEE80211_CHANCTX_CHANGE_RADAR)), + "Cannot change PHY. Ref=%d, changed=0x%X\n", + phy_ctxt->ref, changed)) + return; mutex_lock(&mvm->mutex); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def, @@ -1239,13 +1398,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv; + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); - mvmvif->phy_ctxt = phyctx; + mvmvif->phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 8269bc562951..109200bdf5c5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -109,6 +109,7 @@ extern struct iwl_mvm_mod_params iwlmvm_mod_params; struct iwl_mvm_phy_ctxt { u16 id; u16 color; + u32 ref; /* * TODO: This should probably be removed. Currently here only for rate @@ -149,6 +150,60 @@ enum iwl_power_scheme { #define IWL_CONN_MAX_LISTEN_INTERVAL 70 +#ifdef CONFIG_IWLWIFI_DEBUGFS +enum iwl_dbgfs_pm_mask { + MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), + MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), + MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), + MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), + MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), + MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5), +}; + +struct iwl_dbgfs_pm { + u8 keep_alive_seconds; + u32 rx_data_timeout; + u32 tx_data_timeout; + bool skip_over_dtim; + u8 skip_dtim_periods; + bool disable_power_off; + int mask; +}; + +/* beacon filtering */ + +enum iwl_dbgfs_bf_mask { + MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), + MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), + MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), + MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3), + MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4), + MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5), + MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6), + MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7), + MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8), +}; + +struct iwl_dbgfs_bf { + u8 bf_energy_delta; + u8 bf_roaming_energy_delta; + u8 bf_roaming_state; + u8 bf_temperature_delta; + u8 bf_enable_beacon_filter; + u8 bf_debug_flag; + u32 bf_escape_timer; + u32 ba_escape_timer; + u8 ba_enable_beacon_abort; + int mask; +}; +#endif + +enum iwl_mvm_smps_type_request { + IWL_MVM_SMPS_REQ_BT_COEX, + IWL_MVM_SMPS_REQ_TT, + NUM_IWL_MVM_SMPS_REQ, +}; + /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 @@ -163,6 +218,8 @@ enum iwl_power_scheme { * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. * @beacon_skb: the skb used to hold the AP/GO beacon template + * @smps_requests: the requests of of differents parts of the driver, regard + the desired smps mode. */ struct iwl_mvm_vif { u16 id; @@ -172,6 +229,8 @@ struct iwl_mvm_vif { bool uploaded; bool ap_active; bool monitor_active; + /* indicate whether beacon filtering is enabled */ + bool bf_enabled; u32 ap_beacon_time; @@ -214,7 +273,11 @@ struct iwl_mvm_vif { struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; void *dbgfs_data; + struct iwl_dbgfs_pm dbgfs_pm; + struct iwl_dbgfs_bf dbgfs_bf; #endif + + enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; }; static inline struct iwl_mvm_vif * @@ -223,12 +286,6 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) return (void *)vif->drv_priv; } -enum iwl_mvm_status { - IWL_MVM_STATUS_HW_RFKILL, - IWL_MVM_STATUS_ROC_RUNNING, - IWL_MVM_STATUS_IN_HW_RESTART, -}; - enum iwl_scan_status { IWL_MVM_SCAN_NONE, IWL_MVM_SCAN_OS, @@ -246,6 +303,63 @@ struct iwl_nvm_section { const u8 *data; }; +/* + * Tx-backoff threshold + * @temperature: The threshold in Celsius + * @backoff: The tx-backoff in uSec + */ +struct iwl_tt_tx_backoff { + s32 temperature; + u32 backoff; +}; + +#define TT_TX_BACKOFF_SIZE 6 + +/** + * struct iwl_tt_params - thermal throttling parameters + * @ct_kill_entry: CT Kill entry threshold + * @ct_kill_exit: CT Kill exit threshold + * @ct_kill_duration: The time intervals (in uSec) in which the driver needs + * to checks whether to exit CT Kill. + * @dynamic_smps_entry: Dynamic SMPS entry threshold + * @dynamic_smps_exit: Dynamic SMPS exit threshold + * @tx_protection_entry: TX protection entry threshold + * @tx_protection_exit: TX protection exit threshold + * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. + * @support_ct_kill: Support CT Kill? + * @support_dynamic_smps: Support dynamic SMPS? + * @support_tx_protection: Support tx protection? + * @support_tx_backoff: Support tx-backoff? + */ +struct iwl_tt_params { + s32 ct_kill_entry; + s32 ct_kill_exit; + u32 ct_kill_duration; + s32 dynamic_smps_entry; + s32 dynamic_smps_exit; + s32 tx_protection_entry; + s32 tx_protection_exit; + struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; + bool support_ct_kill; + bool support_dynamic_smps; + bool support_tx_protection; + bool support_tx_backoff; +}; + +/** + * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure + * @ct_kill_exit: worker to exit thermal kill + * @dynamic_smps: Is thermal throttling enabled dynamic_smps? + * @tx_backoff: The current thremal throttling tx backoff in uSec. + * @params: Parameters to configure the thermal throttling algorithm. + */ +struct iwl_mvm_tt_mgmt { + struct delayed_work ct_kill_exit; + bool dynamic_smps; + u32 tx_backoff; + const struct iwl_tt_params *params; +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -266,6 +380,12 @@ struct iwl_mvm { unsigned long status; + /* + * for beacon filtering - + * currently only one interface can be supported + */ + struct iwl_mvm_vif *bf_allowed_vif; + enum iwl_ucode_type cur_ucode; bool ucode_loaded; bool init_ucode_run; @@ -292,6 +412,7 @@ struct iwl_mvm { struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct work_struct sta_drained_wk; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; + atomic_t pending_frames[IWL_MVM_STATION_COUNT]; /* configured by mac80211 */ u32 rts_threshold; @@ -312,7 +433,7 @@ struct iwl_mvm { bool prevent_power_down_d3; #endif - struct iwl_mvm_phy_ctxt phy_ctxt_roc; + struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct list_head time_event_list; spinlock_t time_event_lock; @@ -336,12 +457,21 @@ struct iwl_mvm { struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; +#ifdef CONFIG_IWLWIFI_DEBUGFS + bool store_d3_resume_sram; + void *d3_resume_sram; +#endif #endif /* BT-Coex */ u8 bt_kill_msk; struct iwl_bt_coex_profile_notif last_bt_notif; + + /* Thermal Throttling and CTkill */ + struct iwl_mvm_tt_mgmt thermal_throttle; + s32 temperature; /* Celsius */ }; /* Extract MVM priv from op_mode and _hw */ @@ -351,6 +481,19 @@ struct iwl_mvm { #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) +enum iwl_mvm_status { + IWL_MVM_STATUS_HW_RFKILL, + IWL_MVM_STATUS_HW_CTKILL, + IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_IN_HW_RESTART, +}; + +static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || + test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -442,8 +585,10 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); -void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt); +void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); +void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -458,6 +603,9 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); +int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -533,4 +681,36 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event rssi_event); void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +/* beacon filtering */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd); +int iwl_mvm_dbgfs_set_fw_dbg_log(struct iwl_mvm *mvm); +#else +static inline void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{} +static inline int iwl_mvm_dbgfs_set_fw_dbg_log(struct iwl_mvm *mvm) +{ + return 0; +} +#endif +int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* SMPS */ +void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request); + +/* Thermal management and CT-kill */ +void iwl_mvm_tt_handler(struct iwl_mvm *mvm); +void iwl_mvm_tt_initialize(struct iwl_mvm *mvm); +void iwl_mvm_tt_exit(struct iwl_mvm *mvm); +void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index b8ec02f89acc..edb94ea31654 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -60,6 +60,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ +#include <linux/firmware.h> #include "iwl-trans.h" #include "mvm.h" #include "iwl-eeprom-parse.h" @@ -75,31 +76,56 @@ static const int nvm_to_read[] = { }; /* Default NVM size to read */ -#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024); +#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) +#define IWL_MAX_NVM_SECTION_SIZE 6000 -static inline void iwl_nvm_fill_read(struct iwl_nvm_access_cmd *cmd, - u16 offset, u16 length, u16 section) +#define NVM_WRITE_OPCODE 1 +#define NVM_READ_OPCODE 0 + +/* + * prepare the NVM host command w/ the pointers to the nvm buffer + * and send it to fw + */ +static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, + u16 offset, u16 length, const u8 *data) { - cmd->offset = cpu_to_le16(offset); - cmd->length = cpu_to_le16(length); - cmd->type = cpu_to_le16(section); + struct iwl_nvm_access_cmd nvm_access_cmd = { + .offset = cpu_to_le16(offset), + .length = cpu_to_le16(length), + .type = cpu_to_le16(section), + .op_code = NVM_WRITE_OPCODE, + }; + struct iwl_host_cmd cmd = { + .id = NVM_ACCESS_CMD, + .len = { sizeof(struct iwl_nvm_access_cmd), length }, + .flags = CMD_SYNC | CMD_SEND_IN_RFKILL, + .data = { &nvm_access_cmd, data }, + /* data may come from vmalloc, so use _DUP */ + .dataflags = { 0, IWL_HCMD_DFL_DUP }, + }; + + return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, u16 offset, u16 length, u8 *data) { - struct iwl_nvm_access_cmd nvm_access_cmd = {}; + struct iwl_nvm_access_cmd nvm_access_cmd = { + .offset = cpu_to_le16(offset), + .length = cpu_to_le16(length), + .type = cpu_to_le16(section), + .op_code = NVM_READ_OPCODE, + }; struct iwl_nvm_access_resp *nvm_resp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, - .flags = CMD_SYNC | CMD_WANT_SKB, + .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, }, }; int ret, bytes_read, offset_read; u8 *resp_data; - iwl_nvm_fill_read(&nvm_access_cmd, offset, length, section); cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); ret = iwl_mvm_send_cmd(mvm, &cmd); @@ -144,6 +170,30 @@ exit: return ret; } +static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, + const u8 *data, u16 length) +{ + int offset = 0; + + /* copy data in chunks of 2k (and remainder if any) */ + + while (offset < length) { + int chunk_size, ret; + + chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, + length - offset); + + ret = iwl_nvm_write_chunk(mvm, section, offset, + chunk_size, data + offset); + if (ret < 0) + return ret; + + offset += chunk_size; + } + + return 0; +} + /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read @@ -177,7 +227,8 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, offset += ret; } - IWL_INFO(mvm, "NVM section %d read completed\n", section); + IWL_DEBUG_EEPROM(mvm->trans->dev, + "NVM section %d read completed\n", section); return offset; } @@ -200,7 +251,130 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; - return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib); + return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, + iwl_fw_valid_tx_ant(mvm->fw), + iwl_fw_valid_rx_ant(mvm->fw)); +} + +#define MAX_NVM_FILE_LEN 16384 + +/* + * HOW TO CREATE THE NVM FILE FORMAT: + * ------------------------------ + * 1. create hex file, format: + * 3800 -> header + * 0000 -> header + * 5a40 -> data + * + * rev - 6 bit (word1) + * len - 10 bit (word1) + * id - 4 bit (word2) + * rsv - 12 bit (word2) + * + * 2. flip 8bits with 8 bits per line to get the right NVM file format + * + * 3. create binary file from the hex file + * + * 4. save as "iNVM_xxx.bin" under /lib/firmware + */ +static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm) +{ + int ret, section_id, section_size; + const struct firmware *fw_entry; + const struct { + __le16 word1; + __le16 word2; + u8 data[]; + } *file_sec; + const u8 *eof; + +#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) +#define NVM_WORD2_ID(x) (x >> 12) + + /* + * Obtain NVM image via request_firmware. Since we already used + * request_firmware_nowait() for the firmware binary load and only + * get here after that we assume the NVM request can be satisfied + * synchronously. + */ + ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file, + mvm->trans->dev); + if (ret) { + IWL_ERR(mvm, "ERROR: %s isn't available %d\n", + iwlwifi_mod_params.nvm_file, ret); + return ret; + } + + IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", + iwlwifi_mod_params.nvm_file, fw_entry->size); + + if (fw_entry->size < sizeof(*file_sec)) { + IWL_ERR(mvm, "NVM file too small\n"); + ret = -EINVAL; + goto out; + } + + if (fw_entry->size > MAX_NVM_FILE_LEN) { + IWL_ERR(mvm, "NVM file too large\n"); + ret = -EINVAL; + goto out; + } + + eof = fw_entry->data + fw_entry->size; + + file_sec = (void *)fw_entry->data; + + while (true) { + if (file_sec->data > eof) { + IWL_ERR(mvm, + "ERROR - NVM file too short for section header\n"); + ret = -EINVAL; + break; + } + + /* check for EOF marker */ + if (!file_sec->word1 && !file_sec->word2) { + ret = 0; + break; + } + + section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); + section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); + + if (section_size > IWL_MAX_NVM_SECTION_SIZE) { + IWL_ERR(mvm, "ERROR - section too large (%d)\n", + section_size); + ret = -EINVAL; + break; + } + + if (!section_size) { + IWL_ERR(mvm, "ERROR - section empty\n"); + ret = -EINVAL; + break; + } + + if (file_sec->data + section_size > eof) { + IWL_ERR(mvm, + "ERROR - NVM file too short for section (%d bytes)\n", + section_size); + ret = -EINVAL; + break; + } + + ret = iwl_nvm_write_section(mvm, section_id, file_sec->data, + section_size); + if (ret < 0) { + IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); + break; + } + + /* advance to the next section */ + file_sec = (void *)(file_sec->data + section_size); + } +out: + release_firmware(fw_entry); + return ret; } int iwl_nvm_init(struct iwl_mvm *mvm) @@ -208,6 +382,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm) int ret, i, section; u8 *nvm_buffer, *temp; + /* load external NVM if configured */ + if (iwlwifi_mod_params.nvm_file) { + /* move to External NVM flow */ + ret = iwl_mvm_load_external_nvm(mvm); + if (ret) + return ret; + } + + /* Read From FW NVM */ + IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); + /* TODO: find correct NVM max size for a section */ nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, GFP_KERNEL); @@ -231,8 +416,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm) if (ret < 0) return ret; - ret = 0; mvm->nvm_data = iwl_parse_nvm_sections(mvm); + if (!mvm->nvm_data) + return -ENODATA; - return ret; + return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index fe031d304d1e..d7a199b1cdef 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -222,10 +222,14 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false), + RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), + RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, + false), + RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), }; #undef RX_HANDLER @@ -288,10 +292,14 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(NET_DETECT_HOTSPOTS_CMD), CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), CMD(CARD_STATE_NOTIFICATION), + CMD(MISSED_BEACONS_NOTIFICATION), CMD(BT_COEX_PRIO_TABLE), CMD(BT_COEX_PROT_ENV), CMD(BT_PROFILE_NOTIFICATION), CMD(BT_CONFIG), + CMD(MCAST_FILTER_CMD), + CMD(REPLY_BEACON_FILTERING_CMD), + CMD(REPLY_THERMAL_MNG_BACKOFF), }; #undef CMD @@ -392,10 +400,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (err) goto out_free; + iwl_mvm_tt_initialize(mvm); + mutex_lock(&mvm->mutex); err = iwl_run_init_mvm_ucode(mvm, true); mutex_unlock(&mvm->mutex); - if (err && !iwlmvm_mod_params.init_dbg) { + /* returns 0 if successful, 1 if success but in rfkill */ + if (err < 0 && !iwlmvm_mod_params.init_dbg) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); goto out_free; } @@ -438,10 +449,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_leds_exit(mvm); + iwl_mvm_tt_exit(mvm); + ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); +#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) + kfree(mvm->d3_resume_sram); +#endif + iwl_trans_stop_hw(mvm->trans, true); iwl_phy_db_free(mvm->phy_db); @@ -588,6 +605,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) ieee80211_wake_queue(mvm->hw, mq); } +void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) +{ + if (state) + set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); + else + clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); + + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); +} + static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -597,7 +624,7 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) else clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); - wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); + wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c index a28a1d1f23eb..1b4db25d53fb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -195,21 +195,6 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, return ret; } - -struct phy_ctx_used_data { - unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)]; -}; - -static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - void *_data) -{ - struct phy_ctx_used_data *data = _data; - struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv; - - __set_bit(phy_ctxt->id, data->used); -} - /* * Send a command to add a PHY context based on the current HW configuration. */ @@ -217,34 +202,27 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - struct phy_ctx_used_data data = { - .used = { }, - }; - - /* - * If this is a regular PHY context (not the ROC one) - * skip the ROC PHY context's ID. - */ - if (ctxt != &mvm->phy_ctxt_roc) - __set_bit(mvm->phy_ctxt_roc.id, data.used); + int ret; + WARN_ON(ctxt->ref); lockdep_assert_held(&mvm->mutex); - ctxt->color++; - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - ieee80211_iter_chan_contexts_atomic( - mvm->hw, iwl_mvm_phy_ctx_used_iter, &data); + ctxt->channel = chandef->chan; + ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, + chains_static, chains_dynamic, + FW_CTXT_ACTION_ADD, 0); - ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX); - if (WARN_ONCE(ctxt->id == NUM_PHY_CTX, - "Failed to init PHY context - no free ID!\n")) - return -EIO; - } + return ret; +} - ctxt->channel = chandef->chan; - return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, - chains_static, chains_dynamic, - FW_CTXT_ACTION_ADD, 0); +/* + * Update the number of references to the given PHY context. This is valid only + * in case the PHY context was already created, i.e., its reference count > 0. + */ +void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +{ + lockdep_assert_held(&mvm->mutex); + ctxt->ref++; } /* @@ -264,23 +242,12 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, FW_CTXT_ACTION_MODIFY, 0); } -/* - * Send a command to the FW to remove the given phy context. - * Once the command is sent, regardless of success or failure, the context is - * marked as invalid - */ -void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) +void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { - struct iwl_phy_context_cmd cmd; - int ret; - lockdep_assert_held(&mvm->mutex); - iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0); - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC, - sizeof(struct iwl_phy_context_cmd), - &cmd); - if (ret) - IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n", - ctxt->id); + if (WARN_ON_ONCE(!ctxt)) + return; + + ctxt->ref--; } diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index ed77e437aac4..67cf24aa72f9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -75,6 +75,54 @@ #define POWER_KEEP_ALIVE_PERIOD_SEC 25 +static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, + struct iwl_beacon_filter_cmd *cmd) +{ + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC, + sizeof(struct iwl_beacon_filter_cmd), cmd); + + if (!ret) { + IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", + cmd->ba_enable_beacon_abort); + IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", + cmd->ba_escape_timer); + IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", + cmd->bf_debug_flag); + IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", + cmd->bf_enable_beacon_filter); + IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", + cmd->bf_energy_delta); + IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", + cmd->bf_escape_timer); + IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", + cmd->bf_roaming_energy_delta); + IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", + cmd->bf_roaming_state); + IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n", + cmd->bf_temperature_delta); + } + return ret; +} + +static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, bool enable) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = 1, + .ba_enable_beacon_abort = enable, + }; + + if (!mvmvif->bf_enabled) + return 0; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); +} + static void iwl_mvm_power_log(struct iwl_mvm *mvm, struct iwl_powertable_cmd *cmd) { @@ -91,6 +139,9 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm, le32_to_cpu(cmd->tx_data_timeout)); IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold); + if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) + IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", + le32_to_cpu(cmd->skip_dtim_periods)); } } @@ -103,6 +154,8 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int dtimper, dtimper_msec; int keep_alive; bool radar_detect = false; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); /* * Regardless of power management state the driver must set @@ -116,6 +169,11 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && + mvmvif->dbgfs_pm.disable_power_off) + cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif if (!vif->bss_conf.ps) return; @@ -135,8 +193,11 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* Check skip over DTIM conditions */ if (!radar_detect && (dtimper <= 10) && - (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP)) + (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || + mvm->cur_ucode == IWL_UCODE_WOWLAN)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->skip_dtim_periods = cpu_to_le32(3); + } /* Check that keep alive period is at least 3 * DTIM */ dtimper_msec = dtimper * vif->bss_conf.beacon_int; @@ -145,12 +206,41 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC); cmd->keep_alive_seconds = keep_alive; - cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); - cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { + cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC); + } else { + cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC); + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) + cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds; + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { + if (mvmvif->dbgfs_pm.skip_over_dtim) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + else + cmd->flags &= + cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); + } + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) + cmd->rx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) + cmd->tx_data_timeout = + cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) + cmd->skip_dtim_periods = + cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods); +#endif /* CONFIG_IWLWIFI_DEBUGFS */ } int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + int ret; + bool ba_enable; struct iwl_powertable_cmd cmd = {}; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) @@ -159,13 +249,22 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_log(mvm, &cmd); - return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, + sizeof(cmd), &cmd); + if (ret) + return ret; + + ba_enable = !!(cmd.flags & + cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)); + + return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); } int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_powertable_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif __maybe_unused = + iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; @@ -173,8 +272,82 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF && + mvmvif->dbgfs_pm.disable_power_off) + cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK); +#endif iwl_mvm_power_log(mvm, &cmd); return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC, sizeof(cmd), &cmd); } + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void +iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, + struct iwl_beacon_filter_cmd *cmd) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) + cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) + cmd->bf_roaming_energy_delta = + dbgfs_bf->bf_roaming_energy_delta; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) + cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA) + cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) + cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag; + if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) + cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); + if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) + cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); + if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) + cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort; +} +#endif + +int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = 1, + }; + int ret; + + if (mvmvif != mvm->bf_allowed_vif || + vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); + + if (!ret) + mvmvif->bf_enabled = true; + + return ret; +} + +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_beacon_filter_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) + return 0; + + ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); + + if (!ret) + mvmvif->bf_enabled = false; + + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 55334d542e26..6a050c69e7d0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -3080,3 +3080,29 @@ void iwl_mvm_rate_control_unregister(void) { ieee80211_rate_control_unregister(&rs_mvm_ops); } + +/** + * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable + * Tx protection, according to this rquest and previous requests, + * and send the LQ command. + * @lq: The LQ command + * @mvmsta: The station + * @enable: Enable Tx protection? + */ +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + struct iwl_mvm_sta *mvmsta, bool enable) +{ + lockdep_assert_held(&mvm->mutex); + + if (enable) { + if (mvmsta->tx_protection == 0) + lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + mvmsta->tx_protection++; + } else { + mvmsta->tx_protection--; + if (mvmsta->tx_protection == 0) + lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK; + } + + return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 219c6857cc0f..f66155a57238 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -390,4 +390,9 @@ extern int iwl_mvm_rate_control_register(void); */ extern void iwl_mvm_rate_control_unregister(void); +struct iwl_mvm_sta; + +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + struct iwl_mvm_sta *mvmsta, bool enable); + #endif /* __rs__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4dfc21a3e83e..e4930d5027d2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -363,3 +363,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, rxb, &rx_status); return 0; } + +/* + * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler + * + * TODO: This handler is implemented partially. + * It only gets the NIC's temperature. + */ +int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_notif_statistics *stats = (void *)&pkt->data; + struct mvm_statistics_general_common *common = &stats->general.common; + + if (mvm->temperature != le32_to_cpu(common->temperature)) { + mvm->temperature = le32_to_cpu(common->temperature); + iwl_mvm_tt_handler(mvm); + } + + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 0fd96e4da461..2278858d5658 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -64,6 +64,7 @@ #include "mvm.h" #include "sta.h" +#include "rs.h" static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm) { @@ -217,9 +218,11 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvmvif->color); mvm_sta->vif = vif; mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + mvm_sta->tx_protection = 0; + mvm_sta->tt_tx_protection = false; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm_sta->pending_frames, 0); + atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0; mvm_sta->tfd_queue_msk = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) @@ -407,14 +410,21 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, } /* + * Make sure that the tx response code sees the station as -EBUSY and + * calls the drain worker. + */ + spin_lock_bh(&mvm_sta->lock); + /* * There are frames pending on the AC queues for this station. * We need to wait until all the frames are drained... */ - if (atomic_read(&mvm_sta->pending_frames)) { - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-EBUSY)); + spin_unlock_bh(&mvm_sta->lock); + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); } else { + spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); } @@ -791,21 +801,23 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, min(mvmsta->max_agg_bufsize, buf_size); mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); + if (mvm->cfg->ht_params->use_rts_for_aggregation) { /* * switch to RTS/CTS if it is the prefer protection * method for HT traffic + * this function also sends the LQ command */ - mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, + mvmsta, true); /* * TODO: remove the TLC_RTS flag when we tear down the last * AGG session (agg_tids_count in DVM) */ } - IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", - sta->addr, tid); - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false); } diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 12abd2d71835..3efa0a0cc987 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -274,8 +274,9 @@ struct iwl_mvm_tid_data { * @bt_reduced_txpower: is reduced tx power enabled for this station * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. - * @pending_frames: number of frames for this STA on the shared Tx queues. * @tid_data: per tid data. Look at %iwl_mvm_tid_data. + * @tx_protection: reference counter for controlling the Tx protection. + * @tt_tx_protection: is thermal throttling enable Tx protection? * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -290,7 +291,6 @@ struct iwl_mvm_sta { u8 max_agg_bufsize; bool bt_reduced_txpower; spinlock_t lock; - atomic_t pending_frames; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; @@ -298,6 +298,10 @@ struct iwl_mvm_sta { #ifdef CONFIG_PM_SLEEP u16 last_seq_ctl; #endif + + /* Temporary, until the new TLC will control the Tx protection */ + s8 tx_protection; + bool tt_tx_protection; }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c new file mode 100644 index 000000000000..4665fc033c17 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -0,0 +1,509 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "mvm.h" +#include "iwl-config.h" +#include "iwl-io.h" +#include "iwl-csr.h" +#include "iwl-prph.h" + +#define OTP_DTS_DIODE_DEVIATION 96 /*in words*/ +/* VBG - Voltage Band Gap error data (temperature offset) */ +#define OTP_WP_DTS_VBG (OTP_DTS_DIODE_DEVIATION + 2) +#define MEAS_VBG_MIN_VAL 2300 +#define MEAS_VBG_MAX_VAL 3000 +#define MEAS_VBG_DEFAULT_VAL 2700 +#define DTS_DIODE_VALID(flags) (flags & DTS_DIODE_REG_FLAGS_PASS_ONCE) +#define MIN_TEMPERATURE 0 +#define MAX_TEMPERATURE 125 +#define TEMPERATURE_ERROR (MAX_TEMPERATURE + 1) +#define PTAT_DIGITAL_VALUE_MIN_VALUE 0 +#define PTAT_DIGITAL_VALUE_MAX_VALUE 0xFF +#define DTS_VREFS_NUM 5 +static inline u32 DTS_DIODE_GET_VREFS_ID(u32 flags) +{ + return (flags & DTS_DIODE_REG_FLAGS_VREFS_ID) >> + DTS_DIODE_REG_FLAGS_VREFS_ID_POS; +} + +#define CALC_VREFS_MIN_DIFF 43 +#define CALC_VREFS_MAX_DIFF 51 +#define CALC_LUT_SIZE (1 + CALC_VREFS_MAX_DIFF - CALC_VREFS_MIN_DIFF) +#define CALC_LUT_INDEX_OFFSET CALC_VREFS_MIN_DIFF +#define CALC_TEMPERATURE_RESULT_SHIFT_OFFSET 23 + +/* + * @digital_value: The diode's digital-value sampled (temperature/voltage) + * @vref_low: The lower voltage-reference (the vref just below the diode's + * sampled digital-value) + * @vref_high: The higher voltage-reference (the vref just above the diode's + * sampled digital-value) + * @flags: bits[1:0]: The ID of the Vrefs pair (lowVref,highVref) + * bits[6:2]: Reserved. + * bits[7:7]: Indicates completion of at least 1 successful sample + * since last DTS reset. + */ +struct iwl_mvm_dts_diode_bits { + u8 digital_value; + u8 vref_low; + u8 vref_high; + u8 flags; +} __packed; + +union dts_diode_results { + u32 reg_value; + struct iwl_mvm_dts_diode_bits bits; +} __packed; + +static s16 iwl_mvm_dts_get_volt_band_gap(struct iwl_mvm *mvm) +{ + struct iwl_nvm_section calib_sec; + const __le16 *calib; + u16 vbg; + + /* TODO: move parsing to NVM code */ + calib_sec = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION]; + calib = (__le16 *)calib_sec.data; + + vbg = le16_to_cpu(calib[OTP_WP_DTS_VBG]); + + if (vbg < MEAS_VBG_MIN_VAL || vbg > MEAS_VBG_MAX_VAL) + vbg = MEAS_VBG_DEFAULT_VAL; + + return vbg; +} + +static u16 iwl_mvm_dts_get_ptat_deviation_offset(struct iwl_mvm *mvm) +{ + const u8 *calib; + u8 ptat, pa1, pa2, median; + + /* TODO: move parsing to NVM code */ + calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data; + ptat = calib[OTP_DTS_DIODE_DEVIATION]; + pa1 = calib[OTP_DTS_DIODE_DEVIATION + 1]; + pa2 = calib[OTP_DTS_DIODE_DEVIATION + 2]; + + /* get the median: */ + if (ptat > pa1) { + if (ptat > pa2) + median = (pa1 > pa2) ? pa1 : pa2; + else + median = ptat; + } else { + if (pa1 > pa2) + median = (ptat > pa2) ? ptat : pa2; + else + median = pa1; + } + + return ptat - median; +} + +static u8 iwl_mvm_dts_calibrate_ptat_deviation(struct iwl_mvm *mvm, u8 value) +{ + /* Calibrate the PTAT digital value, based on PTAT deviation data: */ + s16 new_val = value - iwl_mvm_dts_get_ptat_deviation_offset(mvm); + + if (new_val > PTAT_DIGITAL_VALUE_MAX_VALUE) + new_val = PTAT_DIGITAL_VALUE_MAX_VALUE; + else if (new_val < PTAT_DIGITAL_VALUE_MIN_VALUE) + new_val = PTAT_DIGITAL_VALUE_MIN_VALUE; + + return new_val; +} + +static bool dts_get_adjacent_vrefs(struct iwl_mvm *mvm, + union dts_diode_results *avg_ptat) +{ + u8 vrefs_results[DTS_VREFS_NUM]; + u8 low_vref_index = 0, flags; + u32 reg; + + reg = iwl_read_prph(mvm->trans, DTSC_VREF_AVG); + memcpy(vrefs_results, ®, sizeof(reg)); + reg = iwl_read_prph(mvm->trans, DTSC_VREF5_AVG); + vrefs_results[4] = reg & 0xff; + + if (avg_ptat->bits.digital_value < vrefs_results[0] || + avg_ptat->bits.digital_value > vrefs_results[4]) + return false; + + if (avg_ptat->bits.digital_value > vrefs_results[3]) + low_vref_index = 3; + else if (avg_ptat->bits.digital_value > vrefs_results[2]) + low_vref_index = 2; + else if (avg_ptat->bits.digital_value > vrefs_results[1]) + low_vref_index = 1; + + avg_ptat->bits.vref_low = vrefs_results[low_vref_index]; + avg_ptat->bits.vref_high = vrefs_results[low_vref_index + 1]; + flags = avg_ptat->bits.flags; + avg_ptat->bits.flags = + (flags & ~DTS_DIODE_REG_FLAGS_VREFS_ID) | + (low_vref_index & DTS_DIODE_REG_FLAGS_VREFS_ID); + return true; +} + +/* + * return true it the results are valid, and false otherwise. + */ +static bool dts_read_ptat_avg_results(struct iwl_mvm *mvm, + union dts_diode_results *avg_ptat) +{ + u32 reg; + u8 tmp; + + /* fill the diode value and pass_once with avg-reg results */ + reg = iwl_read_prph(mvm->trans, DTSC_PTAT_AVG); + reg &= DTS_DIODE_REG_DIG_VAL | DTS_DIODE_REG_PASS_ONCE; + avg_ptat->reg_value = reg; + + /* calibrate the PTAT digital value */ + tmp = avg_ptat->bits.digital_value; + tmp = iwl_mvm_dts_calibrate_ptat_deviation(mvm, tmp); + avg_ptat->bits.digital_value = tmp; + + /* + * fill vrefs fields, based on the avgVrefs results + * and the diode value + */ + return dts_get_adjacent_vrefs(mvm, avg_ptat) && + DTS_DIODE_VALID(avg_ptat->bits.flags); +} + +static s32 calculate_nic_temperature(union dts_diode_results avg_ptat, + u16 volt_band_gap) +{ + u32 tmp_result; + u8 vrefs_diff; + /* + * For temperature calculation (at the end, shift right by 23) + * LUT[(D2-D1)] = ROUND{ 2^23 / ((D2-D1)*9*10) } + * (D2-D1) == 43 44 45 46 47 48 49 50 51 + */ + static const u16 calc_lut[CALC_LUT_SIZE] = { + 2168, 2118, 2071, 2026, 1983, 1942, 1902, 1864, 1828, + }; + + /* + * The diff between the high and low voltage-references is assumed + * to be strictly be in range of [60,68] + */ + vrefs_diff = avg_ptat.bits.vref_high - avg_ptat.bits.vref_low; + + if (vrefs_diff < CALC_VREFS_MIN_DIFF || + vrefs_diff > CALC_VREFS_MAX_DIFF) + return TEMPERATURE_ERROR; + + /* calculate the result: */ + tmp_result = + vrefs_diff * (DTS_DIODE_GET_VREFS_ID(avg_ptat.bits.flags) + 9); + tmp_result += avg_ptat.bits.digital_value; + tmp_result -= avg_ptat.bits.vref_high; + + /* multiply by the LUT value (based on the diff) */ + tmp_result *= calc_lut[vrefs_diff - CALC_LUT_INDEX_OFFSET]; + + /* + * Get the BandGap (the voltage refereces source) error data + * (temperature offset) + */ + tmp_result *= volt_band_gap; + + /* + * here, tmp_result value can be up to 32-bits. We want to right-shift + * it *without* sign-extend. + */ + tmp_result = tmp_result >> CALC_TEMPERATURE_RESULT_SHIFT_OFFSET; + + /* + * at this point, tmp_result should be in the range: + * 200 <= tmp_result <= 365 + */ + return (s16)tmp_result - 240; +} + +static s32 check_nic_temperature(struct iwl_mvm *mvm) +{ + u16 volt_band_gap; + union dts_diode_results avg_ptat; + + volt_band_gap = iwl_mvm_dts_get_volt_band_gap(mvm); + + /* disable DTS */ + iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0); + + /* SV initialization */ + iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 1); + iwl_write_prph(mvm->trans, DTSC_CFG_MODE, + DTSC_CFG_MODE_PERIODIC); + + /* wait for results */ + msleep(100); + if (!dts_read_ptat_avg_results(mvm, &avg_ptat)) + return TEMPERATURE_ERROR; + + /* disable DTS */ + iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0); + + return calculate_nic_temperature(avg_ptat, volt_band_gap); +} + +static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) +{ + u32 duration = mvm->thermal_throttle.params->ct_kill_duration; + + IWL_ERR(mvm, "Enter CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, true); + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies_relative(duration * HZ)); +} + +static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) +{ + IWL_ERR(mvm, "Exit CT Kill\n"); + iwl_mvm_set_hw_ctkill_state(mvm, false); +} + +static void check_exit_ctkill(struct work_struct *work) +{ + struct iwl_mvm_tt_mgmt *tt; + struct iwl_mvm *mvm; + u32 duration; + s32 temp; + + tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); + mvm = container_of(tt, struct iwl_mvm, thermal_throttle); + + duration = tt->params->ct_kill_duration; + + iwl_trans_start_hw(mvm->trans); + temp = check_nic_temperature(mvm); + iwl_trans_stop_hw(mvm->trans, false); + + if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) { + IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n"); + goto reschedule; + } + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); + + if (temp <= tt->params->ct_kill_exit) { + iwl_mvm_exit_ctkill(mvm); + return; + } + +reschedule: + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies(duration * HZ)); +} + +static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = _data; + enum ieee80211_smps_mode smps_mode; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->thermal_throttle.dynamic_smps) + smps_mode = IEEE80211_SMPS_DYNAMIC; + else + smps_mode = IEEE80211_SMPS_AUTOMATIC; + + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); +} + +static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + int i, err; + + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + mvmsta = (void *)sta->drv_priv; + if (enable == mvmsta->tt_tx_protection) + continue; + err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq, + mvmsta, enable); + if (err) { + IWL_ERR(mvm, "Failed to %s Tx protection\n", + enable ? "enable" : "disable"); + } else { + IWL_DEBUG_TEMP(mvm, "%s Tx protection\n", + enable ? "Enable" : "Disable"); + mvmsta->tt_tx_protection = enable; + } + } +} + +static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_THERMAL_MNG_BACKOFF, + .len = { sizeof(u32), }, + .data = { &backoff, }, + .flags = CMD_SYNC, + }; + + if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { + IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", + backoff); + mvm->thermal_throttle.tx_backoff = backoff; + } else { + IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n"); + } +} + +void iwl_mvm_tt_handler(struct iwl_mvm *mvm) +{ + const struct iwl_tt_params *params = mvm->thermal_throttle.params; + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + s32 temperature = mvm->temperature; + int i; + u32 tx_backoff; + + IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature); + + if (params->support_ct_kill && temperature >= params->ct_kill_entry) { + iwl_mvm_enter_ctkill(mvm); + return; + } + + if (params->support_dynamic_smps) { + if (!tt->dynamic_smps && + temperature >= params->dynamic_smps_entry) { + IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n"); + tt->dynamic_smps = true; + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tt_smps_iterator, mvm); + } else if (tt->dynamic_smps && + temperature <= params->dynamic_smps_exit) { + IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n"); + tt->dynamic_smps = false; + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tt_smps_iterator, mvm); + } + } + + if (params->support_tx_protection) { + if (temperature >= params->tx_protection_entry) + iwl_mvm_tt_tx_protection(mvm, true); + else if (temperature <= params->tx_protection_exit) + iwl_mvm_tt_tx_protection(mvm, false); + } + + if (params->support_tx_backoff) { + tx_backoff = 0; + for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { + if (temperature < params->tx_backoff[i].temperature) + break; + tx_backoff = params->tx_backoff[i].backoff; + } + if (tt->tx_backoff != tx_backoff) + iwl_mvm_tt_tx_backoff(mvm, tx_backoff); + } +} + +static const struct iwl_tt_params iwl7000_tt_params = { + .ct_kill_entry = 118, + .ct_kill_exit = 96, + .ct_kill_duration = 5, + .dynamic_smps_entry = 114, + .dynamic_smps_exit = 110, + .tx_protection_entry = 114, + .tx_protection_exit = 108, + .tx_backoff = { + {.temperature = 112, .backoff = 200}, + {.temperature = 113, .backoff = 600}, + {.temperature = 114, .backoff = 1200}, + {.temperature = 115, .backoff = 2000}, + {.temperature = 116, .backoff = 4000}, + {.temperature = 117, .backoff = 10000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + +void iwl_mvm_tt_initialize(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; + + IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); + tt->params = &iwl7000_tt_params; + INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); +} + +void iwl_mvm_tt_exit(struct iwl_mvm *mvm) +{ + cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); + IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); +} diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 479074303bd7..f212f16502ff 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - if (mvmsta->vif->type == NL80211_IFTYPE_AP && - txq_id < IWL_MVM_FIRST_AGG_QUEUE) - atomic_inc(&mvmsta->pending_frames); + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE) + atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -680,16 +679,41 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... - * If there are no pending frames for this STA, notify mac80211 that - * this station can go to sleep in its STA table. */ - if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta && - !WARN_ON(skb_freed > 1) && - mvmsta->vif->type == NL80211_IFTYPE_AP && - atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { - ieee80211_sta_block_awake(mvm->hw, sta, false); - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) && + atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { + if (mvmsta) { + /* + * If there are no pending frames for this STA, notify + * mac80211 that this station can go to sleep in its + * STA table. + */ + if (mvmsta->vif->type == NL80211_IFTYPE_AP) + ieee80211_sta_block_awake(mvm->hw, sta, false); + /* + * We might very well have taken mvmsta pointer while + * the station was being removed. The remove flow might + * have seen a pending_frame (because we didn't take + * the lock) even if now the queues are drained. So make + * really sure now that this the station is not being + * removed. If it is, run the drain worker to remove it. + */ + spin_lock_bh(&mvmsta->lock); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) { + /* + * Station disappeared in the meantime: + * so we are draining. + */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } + spin_unlock_bh(&mvmsta->lock); + } else if (!mvmsta) { + /* Tx response without STA, so we are draining */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } } rcu_read_unlock(); diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 687b34e387ac..c9b44ab4af07 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -471,3 +471,34 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, return iwl_mvm_send_cmd(mvm, &cmd); } + +/** + * iwl_mvm_update_smps - Get a requst to change the SMPS mode + * @req_type: The part of the driver who call for a change. + * @smps_requests: The request to change the SMPS mode. + * + * Get a requst to change the SMPS mode, + * and change it according to all other requests in the driver. + */ +void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request) +{ + struct iwl_mvm_vif *mvmvif; + enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; + int i; + + lockdep_assert_held(&mvm->mutex); + mvmvif = iwl_mvm_vif_from_mac80211(vif); + mvmvif->smps_requests[req_type] = smps_request; + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { + smps_mode = IEEE80211_SMPS_STATIC; + break; + } + if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) + smps_mode = IEEE80211_SMPS_DYNAMIC; + } + + ieee80211_request_smps(vif, smps_mode); +} diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 8cb53ec2b77b..db7bdd35a9c5 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -256,10 +256,54 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { /* 7000 Series */ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)}, - {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, + +/* 3160 Series */ + {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {0} }; diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 50ba0a468f94..0b021305eedf 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, { u8 *v_addr; dma_addr_t p_addr; - u32 offset; + u32 offset, chunk_sz = section->len; int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); - v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL); - if (!v_addr) - return -ENOMEM; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!v_addr) { + IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); + chunk_sz = PAGE_SIZE; + v_addr = dma_alloc_coherent(trans->dev, chunk_sz, + &p_addr, GFP_KERNEL); + if (!v_addr) + return -ENOMEM; + } - for (offset = 0; offset < section->len; offset += PAGE_SIZE) { + for (offset = 0; offset < section->len; offset += chunk_sz) { u32 copy_size; - copy_size = min_t(u32, PAGE_SIZE, section->len - offset); + copy_size = min_t(u32, chunk_sz, section->len - offset); memcpy(v_addr, (u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, @@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, } } - dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr); + dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); return ret; } @@ -573,10 +580,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans) { - /* let the ucode operate on its own */ - iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - iwl_disable_interrupts(trans); iwl_pcie_disable_ict(trans); @@ -636,9 +639,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } - iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - *status = IWL_D3_STATUS_ALIVE; return 0; } @@ -917,11 +917,11 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) + const void *buf, int dwords) { unsigned long flags; int offs, ret = 0; - u32 *vals = buf; + const u32 *vals = buf; if (iwl_trans_grab_nic_access(trans, false, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index c5e30294c5ac..a35c6aefbc3e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: - len += CCMP_MIC_LEN; + len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: - len += TKIP_ICV_LEN; + len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: - len += WEP_IV_LEN + WEP_ICV_LEN; + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } @@ -1045,6 +1045,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { @@ -1518,6 +1522,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", get_cmd_string(trans_pcie, cmd->id)); + dump_stack(); ret = -EIO; goto cancel; } @@ -1564,7 +1569,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) return -EIO; - if (test_bit(STATUS_RFKILL, &trans_pcie->status)) { + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL, &trans_pcie->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 4f614aad9ded..f7ff4725506a 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -3,13 +3,13 @@ config MWIFIEX depends on CFG80211 ---help--- This adds support for wireless adapters based on Marvell - 802.11n chipsets. + 802.11n/ac chipsets. If you choose to build it as a module, it will be called mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8897" depends on MWIFIEX && MMC select FW_LOADER ---help--- diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e42b266a023a..856aea25052b 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -1231,6 +1231,51 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, return 0; } +/* cfg80211 operation handler for del_station. + * Function deauthenticates station which value is provided in mac parameter. + * If mac is NULL/broadcast, all stations in associated station list are + * deauthenticated. If bss is not started or there are no stations in + * associated stations list, no action is taken. + */ +static int +mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_sta_node *sta_node; + unsigned long flags; + + if (list_empty(&priv->sta_list) || !priv->bss_started) + return 0; + + if (!mac || is_broadcast_ether_addr(mac)) { + wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__); + list_for_each_entry(sta_node, &priv->sta_list, list) { + if (mwifiex_send_cmd_sync(priv, + HostCmd_CMD_UAP_STA_DEAUTH, + HostCmd_ACT_GEN_SET, 0, + sta_node->mac_addr)) + return -1; + mwifiex_uap_del_sta_data(priv, sta_node); + } + } else { + wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac); + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_node = mwifiex_get_sta_entry(priv, mac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + if (sta_node) { + if (mwifiex_send_cmd_sync(priv, + HostCmd_CMD_UAP_STA_DEAUTH, + HostCmd_ACT_GEN_SET, 0, + sta_node->mac_addr)) + return -1; + mwifiex_uap_del_sta_data(priv, sta_node); + } + } + + return 0; +} + static int mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) { @@ -1859,6 +1904,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, int i, offset, ret; struct ieee80211_channel *chan; struct ieee_types_header *ie; + struct mwifiex_user_scan_cfg *user_scan_cfg; wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); @@ -1869,20 +1915,22 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } - if (priv->user_scan_cfg) { + /* Block scan request if scan operation or scan cleanup when interface + * is disabled is in process + */ + if (priv->scan_request || priv->scan_aborting) { dev_err(priv->adapter->dev, "cmd: Scan already in process..\n"); return -EBUSY; } - priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), - GFP_KERNEL); - if (!priv->user_scan_cfg) + user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); + if (!user_scan_cfg) return -ENOMEM; priv->scan_request = request; - priv->user_scan_cfg->num_ssids = request->n_ssids; - priv->user_scan_cfg->ssid_list = request->ssids; + user_scan_cfg->num_ssids = request->n_ssids; + user_scan_cfg->ssid_list = request->ssids; if (request->ie && request->ie_len) { offset = 0; @@ -1902,25 +1950,25 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, for (i = 0; i < min_t(u32, request->n_channels, MWIFIEX_USER_SCAN_CHAN_MAX); i++) { chan = request->channels[i]; - priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; - priv->user_scan_cfg->chan_list[i].radio_type = chan->band; + user_scan_cfg->chan_list[i].chan_number = chan->hw_value; + user_scan_cfg->chan_list[i].radio_type = chan->band; if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) - priv->user_scan_cfg->chan_list[i].scan_type = + user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_PASSIVE; else - priv->user_scan_cfg->chan_list[i].scan_type = + user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_ACTIVE; - priv->user_scan_cfg->chan_list[i].scan_time = 0; + user_scan_cfg->chan_list[i].scan_time = 0; } - ret = mwifiex_scan_networks(priv, priv->user_scan_cfg); + ret = mwifiex_scan_networks(priv, user_scan_cfg); + kfree(user_scan_cfg); if (ret) { dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + priv->scan_aborting = false; priv->scan_request = NULL; - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; return ret; } @@ -2419,6 +2467,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .change_beacon = mwifiex_cfg80211_change_beacon, .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, .set_antenna = mwifiex_cfg80211_set_antenna, + .del_station = mwifiex_cfg80211_del_station, #ifdef CONFIG_PM .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, @@ -2426,6 +2475,16 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #endif }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support mwifiex_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT, + .n_patterns = MWIFIEX_MAX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, + .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, +}; +#endif + /* * This function registers the device with CFG802.11 subsystem. * @@ -2483,11 +2542,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); #ifdef CONFIG_PM - wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT; - wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS; - wiphy->wowlan.pattern_min_len = 1; - wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN; - wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN; + wiphy->wowlan = &mwifiex_wowlan_support; #endif wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 26755d9acb55..2d761477d15e 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -570,6 +570,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_UAP_SYS_CONFIG: case HostCmd_CMD_UAP_BSS_START: case HostCmd_CMD_UAP_BSS_STOP: + case HostCmd_CMD_UAP_STA_DEAUTH: ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action, cmd_oid, data_buf, cmd_ptr); diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 1f7578d553ec..d6ada7354c14 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -271,6 +271,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 #define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 +#define HostCmd_CMD_CFG_DATA 0x008f #define HostCmd_CMD_VERSION_EXT 0x0097 #define HostCmd_CMD_MEF_CFG 0x009a #define HostCmd_CMD_RSSI_INFO 0x00a4 @@ -279,6 +280,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0 #define HostCmd_CMD_UAP_BSS_START 0x00b1 #define HostCmd_CMD_UAP_BSS_STOP 0x00b2 +#define HostCmd_CMD_UAP_STA_DEAUTH 0x00b5 #define HostCmd_CMD_11N_CFG 0x00cd #define HostCmd_CMD_11N_ADDBA_REQ 0x00ce #define HostCmd_CMD_11N_ADDBA_RSP 0x00cf @@ -464,6 +466,8 @@ enum P2P_MODES { #define MWIFIEX_CRITERIA_UNICAST BIT(1) #define MWIFIEX_CRITERIA_MULTICAST BIT(3) +#define CFG_DATA_TYPE_CAL 2 + struct mwifiex_ie_types_header { __le16 type; __le16 len; @@ -1197,6 +1201,11 @@ struct host_cmd_ds_amsdu_aggr_ctrl { __le16 curr_buf_size; } __packed; +struct host_cmd_ds_sta_deauth { + u8 mac[ETH_ALEN]; + __le16 reason; +} __packed; + struct mwifiex_ie_types_wmm_param_set { struct mwifiex_ie_types_header header; u8 wmm_ie[1]; @@ -1573,6 +1582,12 @@ struct mwifiex_ie_list { struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX]; } __packed; +struct host_cmd_ds_802_11_cfg_data { + __le16 action; + __le16 type; + __le16 data_len; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -1630,7 +1645,9 @@ struct host_cmd_ds_command { struct host_cmd_ds_802_11_eeprom_access eeprom; struct host_cmd_ds_802_11_subsc_evt subsc_evt; struct host_cmd_ds_sys_config uap_sys_config; + struct host_cmd_ds_sta_deauth sta_deauth; struct host_cmd_11ac_vht_cfg vht_cfg; + struct host_cmd_ds_802_11_cfg_data cfg_data; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 9f44fda19db9..c7f11c0c3bb7 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -59,6 +59,9 @@ static void scan_delay_timer_fn(unsigned long data) struct cmd_ctrl_node *cmd_node, *tmp_node; unsigned long flags; + if (adapter->surprise_removed) + return; + if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { /* * Abort scan operation by cancelling all pending scan @@ -78,19 +81,13 @@ static void scan_delay_timer_fn(unsigned long data) adapter->empty_tx_q_cnt = 0; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - if (priv->user_scan_cfg) { - if (priv->scan_request) { - dev_dbg(priv->adapter->dev, - "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } else { - dev_dbg(priv->adapter->dev, - "info: scan already aborted\n"); - } - - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; + if (priv->scan_request) { + dev_dbg(adapter->dev, "info: aborting scan\n"); + cfg80211_scan_done(priv->scan_request, 1); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + dev_dbg(adapter->dev, "info: scan already aborted\n"); } goto done; } @@ -447,23 +444,29 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) } /* - * This function frees the adapter structure. + * This function performs cleanup for adapter structure. * - * The freeing operation is done recursively, by canceling all - * pending commands, freeing the member buffers previously - * allocated (command buffers, scan table buffer, sleep confirm - * command buffer), stopping the timers and calling the cleanup - * routines for every interface, before the actual adapter - * structure is freed. + * The cleanup is done recursively, by canceling all pending + * commands, freeing the member buffers previously allocated + * (command buffers, scan table buffer, sleep confirm command + * buffer), stopping the timers and calling the cleanup routines + * for every interface. */ static void -mwifiex_free_adapter(struct mwifiex_adapter *adapter) +mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) { + int i; + if (!adapter) { pr_err("%s: adapter is NULL\n", __func__); return; } + for (i = 0; i < adapter->priv_num; i++) { + if (adapter->priv[i]) + del_timer_sync(&adapter->priv[i]->scan_delay_timer); + } + mwifiex_cancel_all_pending_cmd(adapter); /* Free lock variables */ @@ -684,7 +687,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) int ret = -EINPROGRESS; struct mwifiex_private *priv; s32 i; - unsigned long flags; struct sk_buff *skb; /* mwifiex already shutdown */ @@ -719,7 +721,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) } } - spin_lock_irqsave(&adapter->mwifiex_lock, flags); + spin_lock(&adapter->mwifiex_lock); if (adapter->if_ops.data_complete) { while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) { @@ -733,10 +735,9 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) } } - /* Free adapter structure */ - mwifiex_free_adapter(adapter); + mwifiex_adapter_cleanup(adapter); - spin_unlock_irqrestore(&adapter->mwifiex_lock, flags); + spin_unlock(&adapter->mwifiex_lock); /* Notify completion */ ret = mwifiex_shutdown_fw_complete(adapter); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 6bcb66e6e97c..122175af18c6 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -919,9 +919,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memcpy(&priv->curr_bss_params.data_rates, &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n", - adhoc_start->data_rate[0], adhoc_start->data_rate[1], - adhoc_start->data_rate[2], adhoc_start->data_rate[3]); + dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n", + adhoc_start->data_rate); dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 2eb88ea9acf7..5bc7ef8d04d6 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -25,6 +25,8 @@ #define VERSION "1.0" const char driver_version[] = "mwifiex " VERSION " (%s) "; +static char *cal_data_cfg; +module_param(cal_data_cfg, charp, 0); /* * This function registers the device and performs all the necessary @@ -336,6 +338,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) dev_notice(adapter->dev, "WLAN FW is active\n"); + if (cal_data_cfg) { + if ((request_firmware(&adapter->cal_data, cal_data_cfg, + adapter->dev)) < 0) + dev_err(adapter->dev, + "Cal data request_firmware() failed\n"); + } + adapter->init_wait_q_woken = false; ret = mwifiex_init_fw(adapter); if (ret == -1) { @@ -390,6 +399,10 @@ err_init_fw: pr_debug("info: %s: unregister device\n", __func__); adapter->if_ops.unregister_dev(adapter); done: + if (adapter->cal_data) { + release_firmware(adapter->cal_data); + adapter->cal_data = NULL; + } release_firmware(adapter->firmware); complete(&adapter->fw_load); return; @@ -436,6 +449,7 @@ mwifiex_close(struct net_device *dev) dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; + priv->scan_aborting = true; } return 0; diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 4ef67fca06d3..0832c2437daf 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -492,7 +492,6 @@ struct mwifiex_private { struct semaphore async_sem; u8 report_scan_result; struct cfg80211_scan_request *scan_request; - struct mwifiex_user_scan_cfg *user_scan_cfg; u8 cfg_bssid[6]; struct wps wps; u8 scan_block; @@ -510,6 +509,7 @@ struct mwifiex_private { u8 ap_11ac_enabled; u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; + bool scan_aborting; }; enum mwifiex_ba_status { @@ -730,6 +730,7 @@ struct mwifiex_adapter { u16 max_mgmt_ie_index; u8 scan_delay_cnt; u8 empty_tx_q_cnt; + const struct firmware *cal_data; /* 11AC */ u32 is_hw_11ac_capable; @@ -1115,6 +1116,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, struct cfg80211_beacon_data *data); int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); u8 *mwifiex_11d_code_2_region(u8 code); +void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, + struct mwifiex_sta_node *node); extern const struct ethtool_ops mwifiex_ethtool_ops; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 9cf5d8f07df8..801b6b728379 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1784,22 +1784,17 @@ check_next_scan: if (priv->report_scan_result) priv->report_scan_result = false; - if (priv->user_scan_cfg) { - if (priv->scan_request) { - dev_dbg(priv->adapter->dev, - "info: notifying scan done\n"); - cfg80211_scan_done(priv->scan_request, 0); - priv->scan_request = NULL; - } else { - dev_dbg(priv->adapter->dev, - "info: scan already aborted\n"); - } - - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; + if (priv->scan_request) { + dev_dbg(adapter->dev, "info: notifying scan done\n"); + cfg80211_scan_done(priv->scan_request, 0); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + dev_dbg(adapter->dev, "info: scan already aborted\n"); } } else { - if (priv->user_scan_cfg && !priv->scan_request) { + if ((priv->scan_aborting && !priv->scan_request) || + priv->scan_block) { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT; diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 363ba31b58bf..5ee5ed02eccd 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -77,6 +77,17 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; + if (id->driver_data) { + struct mwifiex_sdio_device *data = (void *)id->driver_data; + + card->firmware = data->firmware; + card->reg = data->reg; + card->max_ports = data->max_ports; + card->mp_agg_pkt_limit = data->mp_agg_pkt_limit; + card->supports_sdio_new_mode = data->supports_sdio_new_mode; + card->has_control_mask = data->has_control_mask; + } + sdio_claim_host(func); ret = sdio_enable_func(func); sdio_release_host(func); @@ -251,12 +262,19 @@ static int mwifiex_sdio_resume(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) /* Device ID for SD8797 */ #define SDIO_DEVICE_ID_MARVELL_8797 (0x9129) +/* Device ID for SD8897 */ +#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d) /* WLAN IDs */ static const struct sdio_device_id mwifiex_ids[] = { - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, - {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786), + .driver_data = (unsigned long) &mwifiex_sdio_sd8786}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787), + .driver_data = (unsigned long) &mwifiex_sdio_sd8787}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797), + .driver_data = (unsigned long) &mwifiex_sdio_sd8797}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897), + .driver_data = (unsigned long) &mwifiex_sdio_sd8897}, {}, }; @@ -282,13 +300,13 @@ static struct sdio_driver mwifiex_sdio = { * This function writes data into SDIO card register. */ static int -mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data) +mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data) { struct sdio_mmc_card *card = adapter->card; int ret = -1; sdio_claim_host(card->func); - sdio_writeb(card->func, (u8) data, reg, &ret); + sdio_writeb(card->func, data, reg, &ret); sdio_release_host(card->func); return ret; @@ -298,7 +316,7 @@ mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data) * This function reads data from SDIO card register. */ static int -mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data) +mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data) { struct sdio_mmc_card *card = adapter->card; int ret = -1; @@ -400,7 +418,40 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) } /* - * This function initializes the IO ports. + * This function is used to initialize IO ports for the + * chipsets supporting SDIO new mode eg SD8897. + */ +static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter) +{ + u8 reg; + + adapter->ioport = MEM_PORT; + + /* enable sdio new mode */ + if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, ®)) + return -1; + if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG, + reg | CMD53_NEW_MODE)) + return -1; + + /* Configure cmd port and enable reading rx length from the register */ + if (mwifiex_read_reg(adapter, CMD_CONFIG_0, ®)) + return -1; + if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN)) + return -1; + + /* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is + * completed + */ + if (mwifiex_read_reg(adapter, CMD_CONFIG_1, ®)) + return -1; + if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN)) + return -1; + + return 0; +} + +/* This function initializes the IO ports. * * The following operations are performed - * - Read the IO ports (0, 1 and 2) @@ -409,10 +460,17 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) */ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) { - u32 reg; + u8 reg; + struct sdio_mmc_card *card = adapter->card; adapter->ioport = 0; + if (card->supports_sdio_new_mode) { + if (mwifiex_init_sdio_new_mode(adapter)) + return -1; + goto cont; + } + /* Read the IO port */ if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, ®)) adapter->ioport |= (reg & 0xff); @@ -428,19 +486,19 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) adapter->ioport |= ((reg & 0xff) << 16); else return -1; - +cont: pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, ®)) mwifiex_write_reg(adapter, HOST_INT_RSR_REG, - reg | SDIO_INT_MASK); + reg | card->reg->sdio_int_mask); else return -1; /* Dnld/Upld ready set to auto reset */ - if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, ®)) - mwifiex_write_reg(adapter, CARD_MISC_CFG_REG, + if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, ®)) + mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg, reg | AUTO_RE_ENABLE_INT); else return -1; @@ -486,34 +544,42 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter, static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) { struct sdio_mmc_card *card = adapter->card; - u16 rd_bitmap = card->mp_rd_bitmap; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u32 rd_bitmap = card->mp_rd_bitmap; - dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap); + dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap); - if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK))) - return -1; + if (card->supports_sdio_new_mode) { + if (!(rd_bitmap & reg->data_port_mask)) + return -1; + } else { + if (!(rd_bitmap & (CTRL_PORT_MASK | reg->data_port_mask))) + return -1; + } - if (card->mp_rd_bitmap & CTRL_PORT_MASK) { - card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK); + if ((card->has_control_mask) && + (card->mp_rd_bitmap & CTRL_PORT_MASK)) { + card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK); *port = CTRL_PORT; - dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n", + dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n", *port, card->mp_rd_bitmap); - } else { - if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) { - card->mp_rd_bitmap &= (u16) - (~(1 << card->curr_rd_port)); - *port = card->curr_rd_port; + return 0; + } - if (++card->curr_rd_port == MAX_PORT) - card->curr_rd_port = 1; - } else { - return -1; - } + if (!(card->mp_rd_bitmap & (1 << card->curr_rd_port))) + return -1; + + /* We are now handling the SDIO data ports */ + card->mp_rd_bitmap &= (u32)(~(1 << card->curr_rd_port)); + *port = card->curr_rd_port; + + if (++card->curr_rd_port == card->max_ports) + card->curr_rd_port = reg->start_rd_port; + + dev_dbg(adapter->dev, + "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n", + *port, rd_bitmap, card->mp_rd_bitmap); - dev_dbg(adapter->dev, - "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n", - *port, rd_bitmap, card->mp_rd_bitmap); - } return 0; } @@ -524,35 +590,45 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) * increased (provided it does not reach the maximum limit, in which * case it is reset to 1) */ -static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port) +static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) { struct sdio_mmc_card *card = adapter->card; - u16 wr_bitmap = card->mp_wr_bitmap; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u32 wr_bitmap = card->mp_wr_bitmap; - dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap); + dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); - if (!(wr_bitmap & card->mp_data_port_mask)) + if (card->supports_sdio_new_mode && + !(wr_bitmap & reg->data_port_mask)) { + adapter->data_sent = true; + return -EBUSY; + } else if (!card->supports_sdio_new_mode && + !(wr_bitmap & card->mp_data_port_mask)) { return -1; + } if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) { - card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port)); + card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port)); *port = card->curr_wr_port; - if (++card->curr_wr_port == card->mp_end_port) - card->curr_wr_port = 1; + if (((card->supports_sdio_new_mode) && + (++card->curr_wr_port == card->max_ports)) || + ((!card->supports_sdio_new_mode) && + (++card->curr_wr_port == card->mp_end_port))) + card->curr_wr_port = reg->start_wr_port; } else { adapter->data_sent = true; return -EBUSY; } - if (*port == CTRL_PORT) { - dev_err(adapter->dev, "invalid data port=%d cur port=%d" - " mp_wr_bitmap=0x%04x -> 0x%04x\n", + if ((card->has_control_mask) && (*port == CTRL_PORT)) { + dev_err(adapter->dev, + "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", *port, card->curr_wr_port, wr_bitmap, card->mp_wr_bitmap); return -1; } - dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n", + dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", *port, wr_bitmap, card->mp_wr_bitmap); return 0; @@ -564,11 +640,12 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port) static int mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) { + struct sdio_mmc_card *card = adapter->card; u32 tries; - u32 cs; + u8 cs; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { - if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs)) + if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs)) break; else if ((cs & bits) == bits) return 0; @@ -587,12 +664,14 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) static int mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) { - u32 fws0, fws1; + struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; + u8 fws0, fws1; - if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0)) + if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0)) return -1; - if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1)) + if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1)) return -1; *dat = (u16) ((fws1 << 8) | fws0); @@ -608,14 +687,14 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) */ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) { - u32 host_int_mask; + u8 host_int_mask, host_int_disable = HOST_INT_DISABLE; /* Read back the host_int_mask register */ if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask)) return -1; /* Update with the mask and write back to the register */ - host_int_mask &= ~HOST_INT_DISABLE; + host_int_mask &= ~host_int_disable; if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) { dev_err(adapter->dev, "disable host interrupt failed\n"); @@ -633,8 +712,11 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter) */ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) { + struct sdio_mmc_card *card = adapter->card; + /* Simply write the mask to the register */ - if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) { + if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, + card->reg->host_int_enable)) { dev_err(adapter->dev, "enable host interrupt failed\n"); return -1; } @@ -686,11 +768,13 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter, static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, struct mwifiex_fw_image *fw) { + struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int ret; u8 *firmware = fw->fw_buf; u32 firmware_len = fw->fw_len; u32 offset = 0; - u32 base0, base1; + u8 base0, base1; u8 *fwbuf; u16 len = 0; u32 txlen, tx_blocks = 0, tries; @@ -727,7 +811,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { - ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0, + ret = mwifiex_read_reg(adapter, reg->base_0_reg, &base0); if (ret) { dev_err(adapter->dev, @@ -736,7 +820,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, base0, base0); goto done; } - ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1, + ret = mwifiex_read_reg(adapter, reg->base_1_reg, &base1); if (ret) { dev_err(adapter->dev, @@ -828,10 +912,11 @@ done: static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { + struct sdio_mmc_card *card = adapter->card; int ret = 0; u16 firmware_stat; u32 tries; - u32 winner_status; + u8 winner_status; /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { @@ -849,7 +934,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, if (ret) { if (mwifiex_read_reg - (adapter, CARD_FW_STATUS0_REG, &winner_status)) + (adapter, card->reg->status_reg_0, &winner_status)) winner_status = 0; if (winner_status) @@ -866,12 +951,12 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; - u32 sdio_ireg; + u8 sdio_ireg; unsigned long flags; - if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS, - REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, - 0)) { + if (mwifiex_read_data_sync(adapter, card->mp_regs, + card->reg->max_mp_regs, + REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) { dev_err(adapter->dev, "read mp_regs failed\n"); return; } @@ -880,6 +965,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) if (sdio_ireg) { /* * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS + * For SDIO new mode CMD port interrupts + * DN_LD_CMD_PORT_HOST_INT_STATUS and/or + * UP_LD_CMD_PORT_HOST_INT_STATUS * Clear the interrupt status register */ dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); @@ -1003,11 +1091,11 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, s32 f_aggr_cur = 0; struct sk_buff *skb_deaggr; u32 pind; - u32 pkt_len, pkt_type = 0; + u32 pkt_len, pkt_type, mport; u8 *curr_ptr; u32 rx_len = skb->len; - if (port == CTRL_PORT) { + if ((card->has_control_mask) && (port == CTRL_PORT)) { /* Read the command Resp without aggr */ dev_dbg(adapter->dev, "info: %s: no aggregation for cmd " "response\n", __func__); @@ -1024,7 +1112,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, goto rx_curr_single; } - if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) { + if ((!card->has_control_mask && (card->mp_rd_bitmap & + card->reg->data_port_mask)) || + (card->has_control_mask && (card->mp_rd_bitmap & + (~((u32) CTRL_PORT_MASK))))) { /* Some more data RX pending */ dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); @@ -1060,10 +1151,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if (f_aggr_cur) { dev_dbg(adapter->dev, "info: current packet aggregation\n"); /* Curr pkt can be aggregated */ - MP_RX_AGGR_SETUP(card, skb, port); + mp_rx_aggr_setup(card, skb, port); if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || - MP_RX_AGGR_PORT_LIMIT_REACHED(card)) { + mp_rx_aggr_port_limit_reached(card)) { dev_dbg(adapter->dev, "info: %s: aggregated packet " "limit reached\n", __func__); /* No more pkts allowed in Aggr buf, rx it */ @@ -1076,11 +1167,28 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n", card->mpa_rx.pkt_cnt); + if (card->supports_sdio_new_mode) { + int i; + u32 port_count; + + for (i = 0, port_count = 0; i < card->max_ports; i++) + if (card->mpa_rx.ports & BIT(i)) + port_count++; + + /* Reading data from "start_port + 0" to "start_port + + * port_count -1", so decrease the count by 1 + */ + port_count--; + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (port_count << 8)) + card->mpa_rx.start_port; + } else { + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (card->mpa_rx.ports << 4)) + + card->mpa_rx.start_port; + } + if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf, - card->mpa_rx.buf_len, - (adapter->ioport | 0x1000 | - (card->mpa_rx.ports << 4)) + - card->mpa_rx.start_port, 1)) + card->mpa_rx.buf_len, mport, 1)) goto error; curr_ptr = card->mpa_rx.buf; @@ -1167,6 +1275,7 @@ error: static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int ret = 0; u8 sdio_ireg; struct sk_buff *skb; @@ -1175,6 +1284,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) u32 rx_blocks; u16 rx_len; unsigned long flags; + u32 bitmap; + u8 cr; spin_lock_irqsave(&adapter->int_lock, flags); sdio_ireg = adapter->int_status; @@ -1184,10 +1295,60 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (!sdio_ireg) return ret; + /* Following interrupt is only for SDIO new mode */ + if (sdio_ireg & DN_LD_CMD_PORT_HOST_INT_STATUS && adapter->cmd_sent) + adapter->cmd_sent = false; + + /* Following interrupt is only for SDIO new mode */ + if (sdio_ireg & UP_LD_CMD_PORT_HOST_INT_STATUS) { + u32 pkt_type; + + /* read the len of control packet */ + rx_len = card->mp_regs[CMD_RD_LEN_1] << 8; + rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0]; + rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE); + if (rx_len <= INTF_HEADER_LEN || + (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > + MWIFIEX_RX_DATA_BUF_SIZE) + return -1; + rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); + + skb = dev_alloc_skb(rx_len); + if (!skb) + return -1; + + skb_put(skb, rx_len); + + if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data, + skb->len, adapter->ioport | + CMD_PORT_SLCT)) { + dev_err(adapter->dev, + "%s: failed to card_to_host", __func__); + dev_kfree_skb_any(skb); + goto term_cmd; + } + + if ((pkt_type != MWIFIEX_TYPE_CMD) && + (pkt_type != MWIFIEX_TYPE_EVENT)) + dev_err(adapter->dev, + "%s:Received wrong packet on cmd port", + __func__); + + mwifiex_decode_rx_packet(adapter, skb, pkt_type); + } + if (sdio_ireg & DN_LD_HOST_INT_STATUS) { - card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8; - card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L]; - dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n", + bitmap = (u32) card->mp_regs[reg->wr_bitmap_l]; + bitmap |= ((u32) card->mp_regs[reg->wr_bitmap_u]) << 8; + if (card->supports_sdio_new_mode) { + bitmap |= + ((u32) card->mp_regs[reg->wr_bitmap_1l]) << 16; + bitmap |= + ((u32) card->mp_regs[reg->wr_bitmap_1u]) << 24; + } + card->mp_wr_bitmap = bitmap; + + dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n", card->mp_wr_bitmap); if (adapter->data_sent && (card->mp_wr_bitmap & card->mp_data_port_mask)) { @@ -1200,11 +1361,11 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) /* As firmware will not generate download ready interrupt if the port updated is command port only, cmd_sent should be done for any SDIO interrupt. */ - if (adapter->cmd_sent) { + if (card->has_control_mask && adapter->cmd_sent) { /* Check if firmware has attach buffer at command port and update just that in wr_bit_map. */ card->mp_wr_bitmap |= - (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK; + (u32) card->mp_regs[reg->wr_bitmap_l] & CTRL_PORT_MASK; if (card->mp_wr_bitmap & CTRL_PORT_MASK) adapter->cmd_sent = false; } @@ -1212,9 +1373,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); if (sdio_ireg & UP_LD_HOST_INT_STATUS) { - card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8; - card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L]; - dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n", + bitmap = (u32) card->mp_regs[reg->rd_bitmap_l]; + bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8; + if (card->supports_sdio_new_mode) { + bitmap |= + ((u32) card->mp_regs[reg->rd_bitmap_1l]) << 16; + bitmap |= + ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24; + } + card->mp_rd_bitmap = bitmap; + dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n", card->mp_rd_bitmap); while (true) { @@ -1224,8 +1392,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) "info: no more rd_port available\n"); break; } - len_reg_l = RD_LEN_P0_L + (port << 1); - len_reg_u = RD_LEN_P0_U + (port << 1); + len_reg_l = reg->rd_len_p0_l + (port << 1); + len_reg_u = reg->rd_len_p0_u + (port << 1); rx_len = ((u16) card->mp_regs[len_reg_u]) << 8; rx_len |= (u16) card->mp_regs[len_reg_l]; dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n", @@ -1257,37 +1425,33 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb, port)) { - u32 cr = 0; - dev_err(adapter->dev, "card_to_host_mpa failed:" " int status=%#x\n", sdio_ireg); - if (mwifiex_read_reg(adapter, - CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, - "read CFG reg failed\n"); - - dev_dbg(adapter->dev, - "info: CFG reg val = %d\n", cr); - if (mwifiex_write_reg(adapter, - CONFIGURATION_REG, - (cr | 0x04))) - dev_err(adapter->dev, - "write CFG reg failed\n"); - - dev_dbg(adapter->dev, "info: write success\n"); - if (mwifiex_read_reg(adapter, - CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, - "read CFG reg failed\n"); - - dev_dbg(adapter->dev, - "info: CFG reg val =%x\n", cr); - return -1; + goto term_cmd; } } } return 0; + +term_cmd: + /* terminate cmd */ + if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) + dev_err(adapter->dev, "read CFG reg failed\n"); + else + dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr); + + if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04))) + dev_err(adapter->dev, "write CFG reg failed\n"); + else + dev_dbg(adapter->dev, "info: write success\n"); + + if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) + dev_err(adapter->dev, "read CFG reg failed\n"); + else + dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr); + + return -1; } /* @@ -1305,7 +1469,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) * and return. */ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, - u8 *payload, u32 pkt_len, u8 port, + u8 *payload, u32 pkt_len, u32 port, u32 next_pkt_len) { struct sdio_mmc_card *card = adapter->card; @@ -1314,8 +1478,11 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, s32 f_send_cur_buf = 0; s32 f_precopy_cur_buf = 0; s32 f_postcopy_cur_buf = 0; + u32 mport; - if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) { + if (!card->mpa_tx.enabled || + (card->has_control_mask && (port == CTRL_PORT)) || + (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) { dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n", __func__); @@ -1329,7 +1496,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { - if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) && + if (!mp_tx_aggr_port_limit_reached(card) && MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { f_precopy_cur_buf = 1; @@ -1342,7 +1509,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, /* No room in Aggr buf, send it */ f_send_aggr_buf = 1; - if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) || + if (mp_tx_aggr_port_limit_reached(card) || !(card->mp_wr_bitmap & (1 << card->curr_wr_port))) f_send_cur_buf = 1; @@ -1381,7 +1548,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) || - MP_TX_AGGR_PORT_LIMIT_REACHED(card)) + mp_tx_aggr_port_limit_reached(card)) /* No more pkts allowed in Aggr buf, send it */ f_send_aggr_buf = 1; } @@ -1390,11 +1557,28 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n", __func__, card->mpa_tx.start_port, card->mpa_tx.ports); + if (card->supports_sdio_new_mode) { + u32 port_count; + int i; + + for (i = 0, port_count = 0; i < card->max_ports; i++) + if (card->mpa_tx.ports & BIT(i)) + port_count++; + + /* Writing data from "start_port + 0" to "start_port + + * port_count -1", so decrease the count by 1 + */ + port_count--; + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (port_count << 8)) + card->mpa_tx.start_port; + } else { + mport = (adapter->ioport | SDIO_MPA_ADDR_BASE | + (card->mpa_tx.ports << 4)) + + card->mpa_tx.start_port; + } + ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, - card->mpa_tx.buf_len, - (adapter->ioport | 0x1000 | - (card->mpa_tx.ports << 4)) + - card->mpa_tx.start_port); + card->mpa_tx.buf_len, mport); MP_TX_AGGR_BUF_RESET(card); } @@ -1434,7 +1618,7 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, int ret; u32 buf_block_len; u32 blk_size; - u8 port = CTRL_PORT; + u32 port = CTRL_PORT; u8 *payload = (u8 *)skb->data; u32 pkt_len = skb->len; @@ -1465,6 +1649,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, pkt_len > MWIFIEX_UPLD_SIZE) dev_err(adapter->dev, "%s: payload=%p, nb=%d\n", __func__, payload, pkt_len); + + if (card->supports_sdio_new_mode) + port = CMD_PORT_SLCT; } /* Transfer data to card */ @@ -1586,18 +1773,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &func->dev; - switch (func->device) { - case SDIO_DEVICE_ID_MARVELL_8786: - strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME); - break; - case SDIO_DEVICE_ID_MARVELL_8797: - strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME); - break; - case SDIO_DEVICE_ID_MARVELL_8787: - default: - strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); - break; - } + strcpy(adapter->fw_name, card->firmware); return 0; @@ -1626,8 +1802,9 @@ disable_func: static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int ret; - u32 sdio_ireg; + u8 sdio_ireg; /* * Read the HOST_INT_STATUS_REG for ACK the first interrupt got @@ -1645,30 +1822,35 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) /* Initialize SDIO variables in card */ card->mp_rd_bitmap = 0; card->mp_wr_bitmap = 0; - card->curr_rd_port = 1; - card->curr_wr_port = 1; + card->curr_rd_port = reg->start_rd_port; + card->curr_wr_port = reg->start_wr_port; - card->mp_data_port_mask = DATA_PORT_MASK; + card->mp_data_port_mask = reg->data_port_mask; card->mpa_tx.buf_len = 0; card->mpa_tx.pkt_cnt = 0; card->mpa_tx.start_port = 0; card->mpa_tx.enabled = 1; - card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; + card->mpa_tx.pkt_aggr_limit = card->mp_agg_pkt_limit; card->mpa_rx.buf_len = 0; card->mpa_rx.pkt_cnt = 0; card->mpa_rx.start_port = 0; card->mpa_rx.enabled = 1; - card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT; + card->mpa_rx.pkt_aggr_limit = card->mp_agg_pkt_limit; /* Allocate buffers for SDIO MP-A */ - card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL); + card->mp_regs = kzalloc(reg->max_mp_regs, GFP_KERNEL); if (!card->mp_regs) return -ENOMEM; + /* Allocate skb pointer buffers */ + card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) * + card->mp_agg_pkt_limit, GFP_KERNEL); + card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) * + card->mp_agg_pkt_limit, GFP_KERNEL); ret = mwifiex_alloc_sdio_mpa_buffers(adapter, SDIO_MP_TX_AGGR_DEF_BUF_SIZE, SDIO_MP_RX_AGGR_DEF_BUF_SIZE); @@ -1705,6 +1887,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) struct sdio_mmc_card *card = adapter->card; kfree(card->mp_regs); + kfree(card->mpa_rx.skb_arr); + kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); } @@ -1716,16 +1900,20 @@ static void mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) { struct sdio_mmc_card *card = adapter->card; + const struct mwifiex_sdio_card_reg *reg = card->reg; int i; card->mp_end_port = port; - card->mp_data_port_mask = DATA_PORT_MASK; + card->mp_data_port_mask = reg->data_port_mask; - for (i = 1; i <= MAX_PORT - card->mp_end_port; i++) - card->mp_data_port_mask &= ~(1 << (MAX_PORT - i)); + if (reg->start_wr_port) { + for (i = 1; i <= card->max_ports - card->mp_end_port; i++) + card->mp_data_port_mask &= + ~(1 << (card->max_ports - i)); + } - card->curr_wr_port = 1; + card->curr_wr_port = reg->start_wr_port; dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n", port, card->mp_data_port_mask); @@ -1831,3 +2019,4 @@ MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 8cc5468654b4..6d51dfdd8251 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -32,30 +32,37 @@ #define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin" #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" #define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" +#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 #define REG_PORT 0 -#define RD_BITMAP_L 0x04 -#define RD_BITMAP_U 0x05 -#define WR_BITMAP_L 0x06 -#define WR_BITMAP_U 0x07 -#define RD_LEN_P0_L 0x08 -#define RD_LEN_P0_U 0x09 #define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff #define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000 +#define SDIO_MPA_ADDR_BASE 0x1000 #define CTRL_PORT 0 #define CTRL_PORT_MASK 0x0001 -#define DATA_PORT_MASK 0xfffe -#define MAX_MP_REGS 64 -#define MAX_PORT 16 - -#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8 +#define CMD_PORT_UPLD_INT_MASK (0x1U<<6) +#define CMD_PORT_DNLD_INT_MASK (0x1U<<7) +#define HOST_TERM_CMD53 (0x1U << 2) +#define REG_PORT 0 +#define MEM_PORT 0x10000 +#define CMD_RD_LEN_0 0xB4 +#define CMD_RD_LEN_1 0xB5 +#define CARD_CONFIG_2_1_REG 0xCD +#define CMD53_NEW_MODE (0x1U << 0) +#define CMD_CONFIG_0 0xB8 +#define CMD_PORT_RD_LEN_EN (0x1U << 2) +#define CMD_CONFIG_1 0xB9 +#define CMD_PORT_AUTO_EN (0x1U << 0) +#define CMD_PORT_SLCT 0x8000 +#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U) +#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U) #define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */ @@ -75,14 +82,8 @@ /* Host Control Registers : Configuration */ #define CONFIGURATION_REG 0x00 -/* Host Control Registers : Host without Command 53 finish host*/ -#define HOST_TO_CARD_EVENT (0x1U << 3) -/* Host Control Registers : Host without Command 53 finish host */ -#define HOST_WO_CMD53_FINISH_HOST (0x1U << 2) /* Host Control Registers : Host power up */ #define HOST_POWER_UP (0x1U << 1) -/* Host Control Registers : Host power down */ -#define HOST_POWER_DOWN (0x1U << 0) /* Host Control Registers : Host interrupt mask */ #define HOST_INT_MASK_REG 0x02 @@ -90,8 +91,7 @@ #define UP_LD_HOST_INT_MASK (0x1U) /* Host Control Registers : Download host interrupt mask */ #define DN_LD_HOST_INT_MASK (0x2U) -/* Enable Host interrupt mask */ -#define HOST_INT_ENABLE (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK) + /* Disable Host interrupt mask */ #define HOST_INT_DISABLE 0xff @@ -104,74 +104,15 @@ /* Host Control Registers : Host interrupt RSR */ #define HOST_INT_RSR_REG 0x01 -/* Host Control Registers : Upload host interrupt RSR */ -#define UP_LD_HOST_INT_RSR (0x1U) -#define SDIO_INT_MASK 0x3F /* Host Control Registers : Host interrupt status */ #define HOST_INT_STATUS_REG 0x28 -/* Host Control Registers : Upload CRC error */ -#define UP_LD_CRC_ERR (0x1U << 2) -/* Host Control Registers : Upload restart */ -#define UP_LD_RESTART (0x1U << 1) -/* Host Control Registers : Download restart */ -#define DN_LD_RESTART (0x1U << 0) - -/* Card Control Registers : Card status register */ -#define CARD_STATUS_REG 0x30 + /* Card Control Registers : Card I/O ready */ #define CARD_IO_READY (0x1U << 3) -/* Card Control Registers : CIS card ready */ -#define CIS_CARD_RDY (0x1U << 2) -/* Card Control Registers : Upload card ready */ -#define UP_LD_CARD_RDY (0x1U << 1) /* Card Control Registers : Download card ready */ #define DN_LD_CARD_RDY (0x1U << 0) -/* Card Control Registers : Host interrupt mask register */ -#define HOST_INTERRUPT_MASK_REG 0x34 -/* Card Control Registers : Host power interrupt mask */ -#define HOST_POWER_INT_MASK (0x1U << 3) -/* Card Control Registers : Abort card interrupt mask */ -#define ABORT_CARD_INT_MASK (0x1U << 2) -/* Card Control Registers : Upload card interrupt mask */ -#define UP_LD_CARD_INT_MASK (0x1U << 1) -/* Card Control Registers : Download card interrupt mask */ -#define DN_LD_CARD_INT_MASK (0x1U << 0) - -/* Card Control Registers : Card interrupt status register */ -#define CARD_INTERRUPT_STATUS_REG 0x38 -/* Card Control Registers : Power up interrupt */ -#define POWER_UP_INT (0x1U << 4) -/* Card Control Registers : Power down interrupt */ -#define POWER_DOWN_INT (0x1U << 3) - -/* Card Control Registers : Card interrupt RSR register */ -#define CARD_INTERRUPT_RSR_REG 0x3c -/* Card Control Registers : Power up RSR */ -#define POWER_UP_RSR (0x1U << 4) -/* Card Control Registers : Power down RSR */ -#define POWER_DOWN_RSR (0x1U << 3) - -/* Card Control Registers : Miscellaneous Configuration Register */ -#define CARD_MISC_CFG_REG 0x6C - -/* Host F1 read base 0 */ -#define HOST_F1_RD_BASE_0 0x0040 -/* Host F1 read base 1 */ -#define HOST_F1_RD_BASE_1 0x0041 -/* Host F1 card ready */ -#define HOST_F1_CARD_RDY 0x0020 - -/* Firmware status 0 register */ -#define CARD_FW_STATUS0_REG 0x60 -/* Firmware status 1 register */ -#define CARD_FW_STATUS1_REG 0x61 -/* Rx length register */ -#define CARD_RX_LEN_REG 0x62 -/* Rx unit register */ -#define CARD_RX_UNIT_REG 0x63 - /* Max retry number of CMD53 write */ #define MAX_WRITE_IOMEM_RETRY 2 @@ -192,7 +133,8 @@ if (a->mpa_tx.start_port <= port) \ a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt)); \ else \ - a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \ + a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+ \ + (a->max_ports - \ a->mp_end_port))); \ a->mpa_tx.pkt_cnt++; \ } while (0) @@ -201,12 +143,6 @@ #define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \ (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit) -/* SDIO Tx aggregation port limit ? */ -#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port < \ - a->mpa_tx.start_port) && (((MAX_PORT - \ - a->mpa_tx.start_port) + a->curr_wr_port) >= \ - SDIO_MP_AGGR_DEF_PKT_LIMIT)) - /* Reset SDIO Tx aggregation buffer parameters */ #define MP_TX_AGGR_BUF_RESET(a) do { \ a->mpa_tx.pkt_cnt = 0; \ @@ -219,12 +155,6 @@ #define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \ (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit) -/* SDIO Tx aggregation port limit ? */ -#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port < \ - a->mpa_rx.start_port) && (((MAX_PORT - \ - a->mpa_rx.start_port) + a->curr_rd_port) >= \ - SDIO_MP_AGGR_DEF_PKT_LIMIT)) - /* SDIO Rx aggregation in progress ? */ #define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0) @@ -232,20 +162,6 @@ #define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len) \ ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size) -/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */ -#define MP_RX_AGGR_SETUP(a, skb, port) do { \ - a->mpa_rx.buf_len += skb->len; \ - if (!a->mpa_rx.pkt_cnt) \ - a->mpa_rx.start_port = port; \ - if (a->mpa_rx.start_port <= port) \ - a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt)); \ - else \ - a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1)); \ - a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \ - a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \ - a->mpa_rx.pkt_cnt++; \ -} while (0) - /* Reset SDIO Rx aggregation buffer parameters */ #define MP_RX_AGGR_BUF_RESET(a) do { \ a->mpa_rx.pkt_cnt = 0; \ @@ -254,14 +170,13 @@ a->mpa_rx.start_port = 0; \ } while (0) - /* data structure for SDIO MPA TX */ struct mwifiex_sdio_mpa_tx { /* multiport tx aggregation buffer pointer */ u8 *buf; u32 buf_len; u32 pkt_cnt; - u16 ports; + u32 ports; u16 start_port; u8 enabled; u32 buf_size; @@ -272,11 +187,11 @@ struct mwifiex_sdio_mpa_rx { u8 *buf; u32 buf_len; u32 pkt_cnt; - u16 ports; + u32 ports; u16 start_port; - struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT]; - u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT]; + struct sk_buff **skb_arr; + u32 *len_arr; u8 enabled; u32 buf_size; @@ -286,15 +201,47 @@ struct mwifiex_sdio_mpa_rx { int mwifiex_bus_register(void); void mwifiex_bus_unregister(void); +struct mwifiex_sdio_card_reg { + u8 start_rd_port; + u8 start_wr_port; + u8 base_0_reg; + u8 base_1_reg; + u8 poll_reg; + u8 host_int_enable; + u8 status_reg_0; + u8 status_reg_1; + u8 sdio_int_mask; + u32 data_port_mask; + u8 max_mp_regs; + u8 rd_bitmap_l; + u8 rd_bitmap_u; + u8 rd_bitmap_1l; + u8 rd_bitmap_1u; + u8 wr_bitmap_l; + u8 wr_bitmap_u; + u8 wr_bitmap_1l; + u8 wr_bitmap_1u; + u8 rd_len_p0_l; + u8 rd_len_p0_u; + u8 card_misc_cfg_reg; +}; + struct sdio_mmc_card { struct sdio_func *func; struct mwifiex_adapter *adapter; - u16 mp_rd_bitmap; - u16 mp_wr_bitmap; + const char *firmware; + const struct mwifiex_sdio_card_reg *reg; + u8 max_ports; + u8 mp_agg_pkt_limit; + bool supports_sdio_new_mode; + bool has_control_mask; + + u32 mp_rd_bitmap; + u32 mp_wr_bitmap; u16 mp_end_port; - u16 mp_data_port_mask; + u32 mp_data_port_mask; u8 curr_rd_port; u8 curr_wr_port; @@ -305,6 +252,98 @@ struct sdio_mmc_card { struct mwifiex_sdio_mpa_rx mpa_rx; }; +struct mwifiex_sdio_device { + const char *firmware; + const struct mwifiex_sdio_card_reg *reg; + u8 max_ports; + u8 mp_agg_pkt_limit; + bool supports_sdio_new_mode; + bool has_control_mask; +}; + +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { + .start_rd_port = 1, + .start_wr_port = 1, + .base_0_reg = 0x0040, + .base_1_reg = 0x0041, + .poll_reg = 0x30, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK, + .status_reg_0 = 0x60, + .status_reg_1 = 0x61, + .sdio_int_mask = 0x3f, + .data_port_mask = 0x0000fffe, + .max_mp_regs = 64, + .rd_bitmap_l = 0x04, + .rd_bitmap_u = 0x05, + .wr_bitmap_l = 0x06, + .wr_bitmap_u = 0x07, + .rd_len_p0_l = 0x08, + .rd_len_p0_u = 0x09, + .card_misc_cfg_reg = 0x6c, +}; + +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0x60, + .base_1_reg = 0x61, + .poll_reg = 0x50, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .status_reg_0 = 0xc0, + .status_reg_1 = 0xc1, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .max_mp_regs = 184, + .rd_bitmap_l = 0x04, + .rd_bitmap_u = 0x05, + .rd_bitmap_1l = 0x06, + .rd_bitmap_1u = 0x07, + .wr_bitmap_l = 0x08, + .wr_bitmap_u = 0x09, + .wr_bitmap_1l = 0x0a, + .wr_bitmap_1u = 0x0b, + .rd_len_p0_l = 0x0c, + .rd_len_p0_u = 0x0d, + .card_misc_cfg_reg = 0xcc, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { + .firmware = SD8786_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd87xx, + .max_ports = 16, + .mp_agg_pkt_limit = 8, + .supports_sdio_new_mode = false, + .has_control_mask = true, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { + .firmware = SD8787_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd87xx, + .max_ports = 16, + .mp_agg_pkt_limit = 8, + .supports_sdio_new_mode = false, + .has_control_mask = true, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { + .firmware = SD8797_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd87xx, + .max_ports = 16, + .mp_agg_pkt_limit = 8, + .supports_sdio_new_mode = false, + .has_control_mask = true, +}; + +static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { + .firmware = SD8897_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8897, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .supports_sdio_new_mode = true, + .has_control_mask = false, +}; + /* * .cmdrsp_complete handler */ @@ -325,4 +364,77 @@ static inline int mwifiex_sdio_event_complete(struct mwifiex_adapter *adapter, return 0; } +static inline bool +mp_rx_aggr_port_limit_reached(struct sdio_mmc_card *card) +{ + u8 tmp; + + if (card->curr_rd_port < card->mpa_rx.start_port) { + if (card->supports_sdio_new_mode) + tmp = card->mp_end_port >> 1; + else + tmp = card->mp_agg_pkt_limit; + + if (((card->max_ports - card->mpa_rx.start_port) + + card->curr_rd_port) >= tmp) + return true; + } + + if (!card->supports_sdio_new_mode) + return false; + + if ((card->curr_rd_port - card->mpa_rx.start_port) >= + (card->mp_end_port >> 1)) + return true; + + return false; +} + +static inline bool +mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card) +{ + u16 tmp; + + if (card->curr_wr_port < card->mpa_tx.start_port) { + if (card->supports_sdio_new_mode) + tmp = card->mp_end_port >> 1; + else + tmp = card->mp_agg_pkt_limit; + + if (((card->max_ports - card->mpa_tx.start_port) + + card->curr_wr_port) >= tmp) + return true; + } + + if (!card->supports_sdio_new_mode) + return false; + + if ((card->curr_wr_port - card->mpa_tx.start_port) >= + (card->mp_end_port >> 1)) + return true; + + return false; +} + +/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */ +static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card, + struct sk_buff *skb, u8 port) +{ + card->mpa_rx.buf_len += skb->len; + + if (!card->mpa_rx.pkt_cnt) + card->mpa_rx.start_port = port; + + if (card->supports_sdio_new_mode) { + card->mpa_rx.ports |= (1 << port); + } else { + if (card->mpa_rx.start_port <= port) + card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt); + else + card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1); + } + card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb; + card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len; + card->mpa_rx.pkt_cnt++; +} #endif /* _MWIFIEX_SDIO_H */ diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index b193e25977d2..8ece48580642 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -1134,6 +1134,55 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv, return 0; } +/* This function parse cal data from ASCII to hex */ +static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst) +{ + u8 *s = src, *d = dst; + + while (s - src < len) { + if (*s && (isspace(*s) || *s == '\t')) { + s++; + continue; + } + if (isxdigit(*s)) { + *d++ = simple_strtol(s, NULL, 16); + s += 2; + } else { + s++; + } + } + + return d - dst; +} + +/* This function prepares command of set_cfg_data. */ +static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action) +{ + struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data; + struct mwifiex_adapter *adapter = priv->adapter; + u32 len, cal_data_offset; + u8 *tmp_cmd = (u8 *)cmd; + + cal_data_offset = S_DS_GEN + sizeof(*cfg_data); + if ((adapter->cal_data->data) && (adapter->cal_data->size > 0)) + len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data, + adapter->cal_data->size, + (u8 *)(tmp_cmd + cal_data_offset)); + else + return -1; + + cfg_data->action = cpu_to_le16(cmd_action); + cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL); + cfg_data->data_len = cpu_to_le16(len); + + cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA); + cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len); + + return 0; +} + /* * This function prepares the commands before sending them to the firmware. * @@ -1152,6 +1201,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_GET_HW_SPEC: ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr); break; + case HostCmd_CMD_CFG_DATA: + ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action); + break; case HostCmd_CMD_MAC_CONTROL: ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action, data_buf); @@ -1384,6 +1436,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, */ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) { + struct mwifiex_adapter *adapter = priv->adapter; int ret; u16 enable = true; struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl; @@ -1404,6 +1457,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) HostCmd_ACT_GEN_SET, 0, NULL); if (ret) return -1; + + /* Download calibration data to firmware */ + if (adapter->cal_data) { + ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA, + HostCmd_ACT_GEN_SET, 0, NULL); + if (ret) + return -1; + } + /* Read MAC address from HW */ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC, HostCmd_ACT_GEN_GET, 0, NULL); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 9f990e14966e..d85df158cc6c 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -818,6 +818,18 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, return 0; } +/* This function handles the command response of set_cfg_data */ +static int mwifiex_ret_cfg_data(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp) +{ + if (resp->result != HostCmd_RESULT_OK) { + dev_err(priv->adapter->dev, "Cal data cmd resp failed\n"); + return -1; + } + + return 0; +} + /* * This function handles the command responses. * @@ -841,6 +853,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_GET_HW_SPEC: ret = mwifiex_ret_get_hw_spec(priv, resp); break; + case HostCmd_CMD_CFG_DATA: + ret = mwifiex_ret_cfg_data(priv, resp); + break; case HostCmd_CMD_MAC_CONTROL: break; case HostCmd_CMD_802_11_MAC_ADDRESS: @@ -978,6 +993,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_UAP_BSS_STOP: priv->bss_started = 0; break; + case HostCmd_CMD_UAP_STA_DEAUTH: + break; case HostCmd_CMD_MEF_CFG: break; default: diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index b04b1db29100..2de882dead0f 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -689,6 +689,23 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action, return 0; } +/* This function prepares AP specific deauth command with mac supplied in + * function parameter. + */ +static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, u8 *mac) +{ + struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth; + + cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH); + memcpy(sta_deauth->mac, mac, ETH_ALEN); + sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING); + + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) + + S_DS_GEN); + return 0; +} + /* This function prepares the AP specific commands before sending them * to the firmware. * This is a generic function which calls specific command preparation @@ -710,6 +727,10 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd->command = cpu_to_le16(cmd_no); cmd->size = cpu_to_le16(S_DS_GEN); break; + case HostCmd_CMD_UAP_STA_DEAUTH: + if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf)) + return -1; + break; default: dev_err(priv->adapter->dev, "PREP_CMD: unknown cmd %#x\n", cmd_no); diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index 21c640d3b579..718066577c6c 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -107,18 +107,15 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies, */ static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac) { - struct mwifiex_sta_node *node, *tmp; + struct mwifiex_sta_node *node; unsigned long flags; spin_lock_irqsave(&priv->sta_list_spinlock, flags); node = mwifiex_get_sta_entry(priv, mac); if (node) { - list_for_each_entry_safe(node, tmp, &priv->sta_list, - list) { - list_del(&node->list); - kfree(node); - } + list_del(&node->list); + kfree(node); } spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); @@ -295,3 +292,19 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) return 0; } + +/* This function deletes station entry from associated station list. + * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream + * tables created for this station are deleted. + */ +void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, + struct mwifiex_sta_node *node) +{ + if (priv->ap_11n_enabled && node->is_11n_enabled) { + mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr); + mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr); + } + mwifiex_del_sta_entry(priv, node->mac_addr); + + return; +} diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 6820fce4016b..a3707fd4ef62 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1548,7 +1548,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) if (!priv->pending_tx_pkts) return 0; - retry = 0; + retry = 1; rc = 0; spin_lock_bh(&priv->tx_lock); @@ -1572,13 +1572,19 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) spin_lock_bh(&priv->tx_lock); - if (timeout) { + if (timeout || !priv->pending_tx_pkts) { WARN_ON(priv->pending_tx_pkts); if (retry) wiphy_notice(hw->wiphy, "tx rings drained\n"); break; } + if (retry) { + mwl8k_tx_start(priv); + retry = 0; + continue; + } + if (priv->pending_tx_pkts < oldcount) { wiphy_notice(hw->wiphy, "waiting for tx rings to drain (%d -> %d pkts)\n", @@ -2055,6 +2061,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, mwl8k_remove_stream(hw, stream); spin_unlock(&priv->stream_lock); } + mwl8k_tx_start(priv); spin_unlock_bh(&priv->tx_lock); pci_unmap_single(priv->pdev, dma, skb->len, PCI_DMA_TODEVICE); diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index 978e7eb26567..7fc46f26cf2b 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -42,8 +42,7 @@ MODULE_FIRMWARE("3826.arm"); -/* - * gpios should be handled in board files and provided via platform data, +/* gpios should be handled in board files and provided via platform data, * but because it's currently impossible for p54spi to have a header file * in include/linux, let's use module paramaters for now */ @@ -191,8 +190,7 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev) const struct firmware *eeprom; int ret; - /* - * allow users to customize their eeprom. + /* allow users to customize their eeprom. */ ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); @@ -285,8 +283,7 @@ static void p54spi_power_on(struct p54s_priv *priv) gpio_set_value(p54spi_gpio_power, 1); enable_irq(gpio_to_irq(p54spi_gpio_irq)); - /* - * need to wait a while before device can be accessed, the length + /* need to wait a while before device can be accessed, the length * is just a guess */ msleep(10); @@ -365,7 +362,8 @@ static int p54spi_rx(struct p54s_priv *priv) /* Firmware may insert up to 4 padding bytes after the lmac header, * but it does not amend the size of SPI data transfer. * Such packets has correct data size in header, thus referencing - * past the end of allocated skb. Reserve extra 4 bytes for this case */ + * past the end of allocated skb. Reserve extra 4 bytes for this case + */ skb = dev_alloc_skb(len + 4); if (!skb) { p54spi_sleep(priv); @@ -383,7 +381,8 @@ static int p54spi_rx(struct p54s_priv *priv) } p54spi_sleep(priv); /* Put additional bytes to compensate for the possible - * alignment-caused truncation */ + * alignment-caused truncation + */ skb_put(skb, 4); if (p54_rx(priv->hw, skb) == 0) @@ -713,27 +712,7 @@ static struct spi_driver p54spi_driver = { .remove = p54spi_remove, }; -static int __init p54spi_init(void) -{ - int ret; - - ret = spi_register_driver(&p54spi_driver); - if (ret < 0) { - printk(KERN_ERR "failed to register SPI driver: %d", ret); - goto out; - } - -out: - return ret; -} - -static void __exit p54spi_exit(void) -{ - spi_unregister_driver(&p54spi_driver); -} - -module_init(p54spi_init); -module_exit(p54spi_exit); +module_spi_driver(p54spi_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index b52d70c75e1a..ead3a3e746a2 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -840,7 +840,7 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, unsigned int beacon_base) { int i; - const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size; + const int txwi_desc_size = rt2x00dev->bcn->winfo_size; /* * For the Beacon base registers we only need to clear @@ -3953,379 +3953,577 @@ static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 106, 0x35); } -static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) +static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev) { - int ant, div_mode; u16 eeprom; u8 value; - rt2800_init_bbp_early(rt2x00dev); + rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) + value |= 0x20; + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) + value &= ~0x02; + rt2800_bbp_write(rt2x00dev, 138, value); +} - rt2800_bbp_read(rt2x00dev, 105, &value); - rt2x00_set_field8(&value, BBP105_MLD, - rt2x00dev->default_ant.rx_chain_num == 2); - rt2800_bbp_write(rt2x00dev, 105, value); +static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x01); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); +} + +static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { + rt2800_bbp_write(rt2x00dev, 69, 0x16); + rt2800_bbp_write(rt2x00dev, 73, 0x12); + } else { + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + } + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) + rt2800_bbp_write(rt2x00dev, 84, 0x19); + else + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); +} + +static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || + rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || + rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + if (rt2x00_rt(rt2x00dev, RT3071) || + rt2x00_rt(rt2x00dev, RT3090)) + rt2800_disable_unused_dac_adc(rt2x00dev); +} + +static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev) +{ + u8 value; rt2800_bbp4_mac_if_ctrl(rt2x00dev); - rt2800_bbp_write(rt2x00dev, 20, 0x06); rt2800_bbp_write(rt2x00dev, 31, 0x08); - rt2800_bbp_write(rt2x00dev, 65, 0x2C); - rt2800_bbp_write(rt2x00dev, 68, 0xDD); - rt2800_bbp_write(rt2x00dev, 69, 0x1A); - rt2800_bbp_write(rt2x00dev, 70, 0x05); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 74, 0x0F); - rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); + + rt2800_bbp_write(rt2x00dev, 77, 0x58); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 74, 0x0b); + rt2800_bbp_write(rt2x00dev, 79, 0x18); + rt2800_bbp_write(rt2x00dev, 80, 0x09); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + + rt2800_bbp_write(rt2x00dev, 86, 0x38); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x02); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 104, 0x92); + + rt2800_bbp_write(rt2x00dev, 105, 0x1c); + + rt2800_bbp_write(rt2x00dev, 106, 0x03); + + rt2800_bbp_write(rt2x00dev, 128, 0x12); + + rt2800_bbp_write(rt2x00dev, 67, 0x24); + rt2800_bbp_write(rt2x00dev, 143, 0x04); + rt2800_bbp_write(rt2x00dev, 142, 0x99); + rt2800_bbp_write(rt2x00dev, 150, 0x30); + rt2800_bbp_write(rt2x00dev, 151, 0x2e); + rt2800_bbp_write(rt2x00dev, 152, 0x20); + rt2800_bbp_write(rt2x00dev, 153, 0x34); + rt2800_bbp_write(rt2x00dev, 154, 0x40); + rt2800_bbp_write(rt2x00dev, 155, 0x3b); + rt2800_bbp_write(rt2x00dev, 253, 0x04); + + rt2800_bbp_read(rt2x00dev, 47, &value); + rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); + rt2800_bbp_write(rt2x00dev, 47, value); + + /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ + rt2800_bbp_read(rt2x00dev, 3, &value); + rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); + rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); + rt2800_bbp_write(rt2x00dev, 3, value); +} + +static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 3, 0x00); + rt2800_bbp_write(rt2x00dev, 4, 0x50); + + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 47, 0x48); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); - rt2800_bbp_write(rt2x00dev, 84, 0x9A); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); - rt2800_bbp_write(rt2x00dev, 95, 0x9a); - rt2800_bbp_write(rt2x00dev, 98, 0x12); - rt2800_bbp_write(rt2x00dev, 103, 0xC0); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); - /* FIXME BBP105 owerwrite */ - rt2800_bbp_write(rt2x00dev, 105, 0x3C); - rt2800_bbp_write(rt2x00dev, 106, 0x35); - rt2800_bbp_write(rt2x00dev, 128, 0x12); - rt2800_bbp_write(rt2x00dev, 134, 0xD0); - rt2800_bbp_write(rt2x00dev, 135, 0xF6); - rt2800_bbp_write(rt2x00dev, 137, 0x0F); - /* Initialize GLRT (Generalized Likehood Radio Test) */ - rt2800_init_bbp_5592_glrt(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 105, 0x34); + + rt2800_bbp_write(rt2x00dev, 106, 0x05); + + rt2800_bbp_write(rt2x00dev, 120, 0x50); + + rt2800_bbp_write(rt2x00dev, 137, 0x0f); + + rt2800_bbp_write(rt2x00dev, 163, 0xbd); + /* Set ITxBF timeout to 0x9c40=1000msec */ + rt2800_bbp_write(rt2x00dev, 179, 0x02); + rt2800_bbp_write(rt2x00dev, 180, 0x00); + rt2800_bbp_write(rt2x00dev, 182, 0x40); + rt2800_bbp_write(rt2x00dev, 180, 0x01); + rt2800_bbp_write(rt2x00dev, 182, 0x9c); + rt2800_bbp_write(rt2x00dev, 179, 0x00); + /* Reprogram the inband interface to put right values in RXWI */ + rt2800_bbp_write(rt2x00dev, 142, 0x04); + rt2800_bbp_write(rt2x00dev, 143, 0x3b); + rt2800_bbp_write(rt2x00dev, 142, 0x06); + rt2800_bbp_write(rt2x00dev, 143, 0xa0); + rt2800_bbp_write(rt2x00dev, 142, 0x07); + rt2800_bbp_write(rt2x00dev, 143, 0xa1); + rt2800_bbp_write(rt2x00dev, 142, 0x08); + rt2800_bbp_write(rt2x00dev, 143, 0xa2); + + rt2800_bbp_write(rt2x00dev, 148, 0xc8); +} - rt2800_bbp4_mac_if_ctrl(rt2x00dev); +static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; - rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) { - /* Main antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - } else { - /* Auxiliary antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - } - rt2800_bbp_write(rt2x00dev, 152, value); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { - rt2800_bbp_read(rt2x00dev, 254, &value); - rt2x00_set_field8(&value, BBP254_BIT7, 1); - rt2800_bbp_write(rt2x00dev, 254, value); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a); - rt2800_init_freq_calibration(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); - rt2800_bbp_write(rt2x00dev, 84, 0x19); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); } -static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev) { - unsigned int i; - u16 eeprom; - u8 reg_id; - u8 value; + rt2800_bbp_write(rt2x00dev, 31, 0x08); - if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || - rt2800_wait_bbp_ready(rt2x00dev))) - return -EACCES; + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); - if (rt2x00_rt(rt2x00dev, RT5592)) { - rt2800_init_bbp_5592(rt2x00dev); - return 0; - } + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); - if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 3, 0x00); - rt2800_bbp_write(rt2x00dev, 4, 0x50); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp4_mac_if_ctrl(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); - if (rt2800_is_305x_soc(rt2x00dev) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 82, 0x62); - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 47, 0x48); + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); +} + +static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) +{ + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 68, 0x0b); + rt2800_bbp_write(rt2x00dev, 68, 0x0b); - if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { - rt2800_bbp_write(rt2x00dev, 69, 0x16); - rt2800_bbp_write(rt2x00dev, 73, 0x12); - } else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 75, 0x46); - rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); - if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 77, 0x58); - else - rt2800_bbp_write(rt2x00dev, 77, 0x59); - } else { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x10); - } + rt2800_bbp_write(rt2x00dev, 77, 0x59); rt2800_bbp_write(rt2x00dev, 70, 0x0a); - if (rt2x00_rt(rt2x00dev, RT3070) || - rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 79, 0x13); - rt2800_bbp_write(rt2x00dev, 80, 0x05); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2800_is_305x_soc(rt2x00dev)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - } else if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 74, 0x0b); - rt2800_bbp_write(rt2x00dev, 79, 0x18); - rt2800_bbp_write(rt2x00dev, 80, 0x09); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } else { - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 83, 0x7a); - else - rt2800_bbp_write(rt2x00dev, 83, 0x6a); - if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) - rt2800_bbp_write(rt2x00dev, 84, 0x19); - else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 84, 0x9a); - else - rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 83, 0x7a); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 86, 0x38); - else - rt2800_bbp_write(rt2x00dev, 86, 0x00); + rt2800_bbp_write(rt2x00dev, 84, 0x9a); - if (rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5392)) + rt2800_bbp_write(rt2x00dev, 86, 0x38); + + if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 91, 0x04); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 92, 0x02); - else - rt2800_bbp_write(rt2x00dev, 92, 0x00); + rt2800_bbp_write(rt2x00dev, 92, 0x02); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 95, 0x9a); rt2800_bbp_write(rt2x00dev, 98, 0x12); } - if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || - rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 103, 0xc0); - else - rt2800_bbp_write(rt2x00dev, 103, 0x00); + rt2800_bbp_write(rt2x00dev, 103, 0xc0); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 104, 0x92); + rt2800_bbp_write(rt2x00dev, 104, 0x92); - if (rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 105, 0x01); - else if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 105, 0x1c); - else if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 105, 0x34); - else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 105, 0x3c); - else - rt2800_bbp_write(rt2x00dev, 105, 0x05); + rt2800_bbp_write(rt2x00dev, 105, 0x3c); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390)) + if (rt2x00_rt(rt2x00dev, RT5390)) rt2800_bbp_write(rt2x00dev, 106, 0x03); - else if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 106, 0x05); else if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 106, 0x12); else - rt2800_bbp_write(rt2x00dev, 106, 0x35); - - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 120, 0x50); + WARN_ON(1); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 128, 0x12); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 134, 0xd0); rt2800_bbp_write(rt2x00dev, 135, 0xf6); } - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 137, 0x0f); + rt2800_disable_unused_dac_adc(rt2x00dev); - if (rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) - value |= 0x20; - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) - value &= ~0x02; + /* check if this is a Bluetooth combo card */ + if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { + u32 reg; - rt2800_bbp_write(rt2x00dev, 138, value); + rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); + rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); + rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); + if (ant == 0) + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); + else if (ant == 1) + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); + rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); } - if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 67, 0x24); - rt2800_bbp_write(rt2x00dev, 143, 0x04); - rt2800_bbp_write(rt2x00dev, 142, 0x99); - rt2800_bbp_write(rt2x00dev, 150, 0x30); - rt2800_bbp_write(rt2x00dev, 151, 0x2e); - rt2800_bbp_write(rt2x00dev, 152, 0x20); - rt2800_bbp_write(rt2x00dev, 153, 0x34); - rt2800_bbp_write(rt2x00dev, 154, 0x40); - rt2800_bbp_write(rt2x00dev, 155, 0x3b); - rt2800_bbp_write(rt2x00dev, 253, 0x04); - - rt2800_bbp_read(rt2x00dev, 47, &value); - rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); - rt2800_bbp_write(rt2x00dev, 47, value); - - /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ - rt2800_bbp_read(rt2x00dev, 3, &value); - rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); - rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); - rt2800_bbp_write(rt2x00dev, 3, value); + /* This chip has hardware antenna diversity*/ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ } - if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 163, 0xbd); - /* Set ITxBF timeout to 0x9c40=1000msec */ - rt2800_bbp_write(rt2x00dev, 179, 0x02); - rt2800_bbp_write(rt2x00dev, 180, 0x00); - rt2800_bbp_write(rt2x00dev, 182, 0x40); - rt2800_bbp_write(rt2x00dev, 180, 0x01); - rt2800_bbp_write(rt2x00dev, 182, 0x9c); - rt2800_bbp_write(rt2x00dev, 179, 0x00); - /* Reprogram the inband interface to put right values in RXWI */ - rt2800_bbp_write(rt2x00dev, 142, 0x04); - rt2800_bbp_write(rt2x00dev, 143, 0x3b); - rt2800_bbp_write(rt2x00dev, 142, 0x06); - rt2800_bbp_write(rt2x00dev, 143, 0xa0); - rt2800_bbp_write(rt2x00dev, 142, 0x07); - rt2800_bbp_write(rt2x00dev, 143, 0xa1); - rt2800_bbp_write(rt2x00dev, 142, 0x08); - rt2800_bbp_write(rt2x00dev, 143, 0xa2); - - rt2800_bbp_write(rt2x00dev, 148, 0xc8); + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + else + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); + rt2800_bbp_write(rt2x00dev, 152, value); + + rt2800_init_freq_calibration(rt2x00dev); +} + +static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) +{ + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_init_bbp_early(rt2x00dev); + + rt2800_bbp_read(rt2x00dev, 105, &value); + rt2x00_set_field8(&value, BBP105_MLD, + rt2x00dev->default_ant.rx_chain_num == 2); + rt2800_bbp_write(rt2x00dev, 105, value); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 20, 0x06); + rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 65, 0x2C); + rt2800_bbp_write(rt2x00dev, 68, 0xDD); + rt2800_bbp_write(rt2x00dev, 69, 0x1A); + rt2800_bbp_write(rt2x00dev, 70, 0x05); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 74, 0x0F); + rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); + rt2800_bbp_write(rt2x00dev, 84, 0x9A); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); + rt2800_bbp_write(rt2x00dev, 95, 0x9a); + rt2800_bbp_write(rt2x00dev, 98, 0x12); + rt2800_bbp_write(rt2x00dev, 103, 0xC0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); + /* FIXME BBP105 owerwrite */ + rt2800_bbp_write(rt2x00dev, 105, 0x3C); + rt2800_bbp_write(rt2x00dev, 106, 0x35); + rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 134, 0xD0); + rt2800_bbp_write(rt2x00dev, 135, 0xF6); + rt2800_bbp_write(rt2x00dev, 137, 0x0F); + + /* Initialize GLRT (Generalized Likehood Radio Test) */ + rt2800_init_bbp_5592_glrt(rt2x00dev); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) { + /* Main antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + } else { + /* Auxiliary antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); } + rt2800_bbp_write(rt2x00dev, 152, value); - if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - int ant, div_mode; + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { + rt2800_bbp_read(rt2x00dev, 254, &value); + rt2x00_set_field8(&value, BBP254_BIT7, 1); + rt2800_bbp_write(rt2x00dev, 254, value); + } - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, - EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; + rt2800_init_freq_calibration(rt2x00dev); - /* check if this is a Bluetooth combo card */ - if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { - u32 reg; - - rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); - rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); - rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); - if (ant == 0) - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); - else if (ant == 1) - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); - rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); - } + rt2800_bbp_write(rt2x00dev, 84, 0x19); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); +} - /* This chip has hardware antenna diversity*/ - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { - rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ - rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ - rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ - } +static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; - rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - else - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - rt2800_bbp_write(rt2x00dev, 152, value); + if (rt2800_is_305x_soc(rt2x00dev)) + rt2800_init_bbp_305x_soc(rt2x00dev); - rt2800_init_freq_calibration(rt2x00dev); + switch (rt2x00dev->chip.rt) { + case RT2860: + case RT2872: + case RT2883: + rt2800_init_bbp_28xx(rt2x00dev); + break; + case RT3070: + case RT3071: + case RT3090: + rt2800_init_bbp_30xx(rt2x00dev); + break; + case RT3290: + rt2800_init_bbp_3290(rt2x00dev); + break; + case RT3352: + rt2800_init_bbp_3352(rt2x00dev); + break; + case RT3390: + rt2800_init_bbp_3390(rt2x00dev); + break; + case RT3572: + rt2800_init_bbp_3572(rt2x00dev); + break; + case RT5390: + case RT5392: + rt2800_init_bbp_53xx(rt2x00dev); + break; + case RT5592: + rt2800_init_bbp_5592(rt2x00dev); + return; } for (i = 0; i < EEPROM_BBP_SIZE; i++) { @@ -4337,8 +4535,6 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, reg_id, value); } } - - return 0; } static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev) @@ -5189,9 +5385,11 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev) } msleep(1); - if (unlikely(rt2800_init_bbp(rt2x00dev))) + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || + rt2800_wait_bbp_ready(rt2x00dev))) return -EIO; + rt2800_init_bbp(rt2x00dev); rt2800_init_rfcsr(rt2x00dev); if (rt2x00_is_usb(rt2x00dev) && diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 6f4a861af336..330f1d25726d 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -1014,7 +1014,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) * Since we have only one producer and one consumer we don't * need to lock the kfifo. */ - for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { + for (i = 0; i < rt2x00dev->tx->limit; i++) { rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status); if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index ac854d75bd6c..c71a48da9a31 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -327,7 +327,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) * this limit so reduce the number to prevent errors. */ rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_AGG_LIMIT, - ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE) + ((rt2x00dev->rx->limit * DATA_FRAME_SIZE) / 1024) - 3); rt2x00_set_field32(®, USB_DMA_CFG_RX_BULK_EN, 1); rt2x00_set_field32(®, USB_DMA_CFG_TX_BULK_EN, 1); diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 90dc14336980..6a201725bc50 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1077,7 +1077,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) */ int kfifo_size = roundup_pow_of_two(rt2x00dev->ops->tx_queues * - rt2x00dev->ops->tx->entry_num * + rt2x00dev->tx->limit * sizeof(u32)); status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size, @@ -1301,23 +1301,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) (rt2x00dev->ops->max_ap_intf - 1); /* - * Determine which operating modes are supported, all modes - * which require beaconing, depend on the availability of - * beacon entries. - */ - rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - if (rt2x00dev->ops->bcn->entry_num > 0) - rt2x00dev->hw->wiphy->interface_modes |= - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_WDS); - - rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; - - /* * Initialize work. */ rt2x00dev->workqueue = @@ -1348,6 +1331,23 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) goto exit; /* + * Determine which operating modes are supported, all modes + * which require beaconing, depend on the availability of + * beacon entries. + */ + rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + if (rt2x00dev->ops->bcn->entry_num > 0) + rt2x00dev->hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_WDS); + + rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; + + /* * Initialize ieee80211 structure. */ retval = rt2x00lib_probe_hw(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index dc49e525ae5e..76d95deb274b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -105,11 +105,13 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) goto exit_release_regions; } + pci_enable_msi(pci_dev); + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; - goto exit_release_regions; + goto exit_disable_msi; } pci_set_drvdata(pci_dev, hw); @@ -150,6 +152,9 @@ exit_free_reg: exit_free_device: ieee80211_free_hw(hw); +exit_disable_msi: + pci_disable_msi(pci_dev); + exit_release_regions: pci_release_regions(pci_dev); @@ -174,6 +179,8 @@ void rt2x00pci_remove(struct pci_dev *pci_dev) rt2x00pci_free_reg(rt2x00dev); ieee80211_free_hw(hw); + pci_disable_msi(pci_dev); + /* * Free the PCI device data. */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 0dc8180e251b..7e1759b3e49a 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2175,7 +2175,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) * that the TX_STA_FIFO stack has a size of 16. We stick to our * tx ring size for now. */ - for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) { + for (i = 0; i < rt2x00dev->tx->limit; i++) { rt2x00mmio_register_read(rt2x00dev, STA_CSR4, ®); if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) break; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 953111a502ee..796928ba875f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6018,6 +6018,15 @@ int wlcore_free_hw(struct wl1271 *wl) } EXPORT_SYMBOL_GPL(wlcore_free_hw); +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support wlcore_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL1271_MAX_RX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE, +}; +#endif + static void wlcore_nvs_cb(const struct firmware *fw, void *context) { struct wl1271 *wl = context; @@ -6071,14 +6080,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) if (!ret) { wl->irq_wake_enabled = true; device_init_wakeup(wl->dev, 1); - if (pdata->pwr_in_suspend) { - wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; - wl->hw->wiphy->wowlan.n_patterns = - WL1271_MAX_RX_FILTERS; - wl->hw->wiphy->wowlan.pattern_min_len = 1; - wl->hw->wiphy->wowlan.pattern_max_len = - WL1271_RX_FILTER_MAX_PATTERN_SIZE; - } + if (pdata->pwr_in_suspend) + wl->hw->wiphy->wowlan = &wlcore_wowlan_support; } #endif disable_irq(wl->irq); diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index a3a851e49321..18c0d8d1ddf7 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL"); #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) -/* The RPX series use SLOT_B */ -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) -#define CONFIG_PCMCIA_SLOT_B -#define CONFIG_BD_IS_MHZ -#endif - /* The ADS board use SLOT_A */ #ifdef CONFIG_ADS #define CONFIG_PCMCIA_SLOT_A @@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev); #define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ -/* ------------------------------------------------------------------------- */ -/* board specific stuff: */ -/* voltage_set(), hardware_enable() and hardware_disable() */ -/* ------------------------------------------------------------------------- */ -/* RPX Boards from Embedded Planet */ - -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) - -/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks. - * SYPCR is write once only, therefore must the slowest memory be faster - * than the bus monitor or we will get a machine check due to the bus timeout. - */ - -#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE" - -#undef PCMCIA_BMT_LIMIT -#define PCMCIA_BMT_LIMIT (6*8) - -static int voltage_set(int slot, int vcc, int vpp) -{ - u32 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= BCSR1_PCVCTL4; - break; - case 50: - reg |= BCSR1_PCVCTL5; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= BCSR1_PCVCTL6; - else - return 1; - break; - case 120: - reg |= BCSR1_PCVCTL7; - default: - return 1; - } - - if (!((vcc == 50) || (vcc == 0))) - return 1; - - /* first, turn off all power */ - - out_be32(((u32 *) RPX_CSR_ADDR), - in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | - BCSR1_PCVCTL5 | - BCSR1_PCVCTL6 | - BCSR1_PCVCTL7)); - - /* enable new powersettings */ - - out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_RPXCLASSIC */ - /* FADS Boards from Motorola */ #if defined(CONFIG_FADS) @@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp) #endif -/* ------------------------------------------------------------------------- */ -/* Motorola MBX860 */ - -#if defined(CONFIG_MBX) - -#define PCMCIA_BOARD_MSG "MBX" - -static int voltage_set(int slot, int vcc, int vpp) -{ - u8 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= CSR2_VCC_33; - break; - case 50: - reg |= CSR2_VCC_50; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= CSR2_VPP_VCC; - else - return 1; - break; - case 120: - if ((vcc == 33) || (vcc == 50)) - reg |= CSR2_VPP_12; - else - return 1; - default: - return 1; - } - - /* first, turn off all power */ - out_8((u8 *) MBX_CSR2_ADDR, - in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); - - /* enable new powersettings */ - out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_MBX */ - #if defined(CONFIG_PRxK) #include <asm/cpld.h> extern volatile fpga_pc_regs *fpga_pc; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 3338437b559b..85772616efbf 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -781,4 +781,12 @@ config APPLE_GMUX graphics as well as the backlight. Currently only backlight control is supported by the driver. +config PVPANIC + tristate "pvpanic device support" + depends on ACPI + ---help--- + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + endif # X86_PLATFORM_DEVICES diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index ace2b38942fe..ef0ec746f78c 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o + +obj-$(CONFIG_PVPANIC) += pvpanic.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 210b5b872125..8fcb41e18b9c 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_x401u, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X75A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X75A"), + }, + .driver_data = &quirk_asus_x401u, + }, {}, }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index fa3ee6209572..1134119521ac 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm) { /* Final token is a terminator, so we don't want to copy it */ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; + struct calling_interface_token *new_da_tokens; struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); @@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm) da_command_address = table->cmdIOAddress; da_command_code = table->cmdIOCode; - da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * - sizeof(struct calling_interface_token), - GFP_KERNEL); + new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * + sizeof(struct calling_interface_token), + GFP_KERNEL); - if (!da_tokens) + if (!new_da_tokens) return; + da_tokens = new_da_tokens; memcpy(da_tokens+da_num_tokens, table->tokens, sizeof(struct calling_interface_token) * tokens); diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c index 3f945457f71c..bcf8cc6b5537 100644 --- a/drivers/platform/x86/dell-wmi-aio.c +++ b/drivers/platform/x86/dell-wmi-aio.c @@ -34,6 +34,14 @@ MODULE_LICENSE("GPL"); #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" +struct dell_wmi_event { + u16 length; + /* 0x000: A hot key pressed or an event occurred + * 0x00F: A sequence of hot keys are pressed */ + u16 type; + u16 event[]; +}; + static const char *dell_wmi_aio_guids[] = { EVENT_GUID1, EVENT_GUID2, @@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2); static const struct key_entry dell_wmi_aio_keymap[] = { { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe030, { KEY_VOLUMEUP } }, + { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe020, { KEY_MUTE } }, + { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } }, + { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } }, + { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } }, + { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } }, { KE_END, 0 } }; static struct input_dev *dell_wmi_aio_input_dev; +/* + * The new WMI event data format will follow the dell_wmi_event structure + * So, we will check if the buffer matches the format + */ +static bool dell_wmi_aio_event_check(u8 *buffer, int length) +{ + struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; + + if (event == NULL || length < 6) + return false; + + if ((event->type == 0 || event->type == 0xf) && + event->length >= 2) + return true; + + return false; +} + static void dell_wmi_aio_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; + struct dell_wmi_event *event; acpi_status status; status = wmi_get_event_data(value, &response); @@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context) obj = (union acpi_object *)response.pointer; if (obj) { - unsigned int scancode; + unsigned int scancode = 0; switch (obj->type) { case ACPI_TYPE_INTEGER: @@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context) scancode, 1, true); break; case ACPI_TYPE_BUFFER: - /* Broken machines return the scancode in a buffer */ - if (obj->buffer.pointer && obj->buffer.length > 0) { - scancode = obj->buffer.pointer[0]; + if (dell_wmi_aio_event_check(obj->buffer.pointer, + obj->buffer.length)) { + event = (struct dell_wmi_event *) + obj->buffer.pointer; + scancode = event->event[0]; + } else { + /* Broken machines return the scancode in a + buffer */ + if (obj->buffer.pointer && + obj->buffer.length > 0) + scancode = obj->buffer.pointer[0]; + } + if (scancode) sparse_keymap_report_event( dell_wmi_aio_input_dev, scancode, 1, true); - } break; } } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 1a779bbfb87d..8df0c5a21be2 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -71,6 +71,14 @@ enum hp_wmi_event_ids { HPWMI_WIRELESS = 5, HPWMI_CPU_BATTERY_THROTTLE = 6, HPWMI_LOCK_SWITCH = 7, + HPWMI_LID_SWITCH = 8, + HPWMI_SCREEN_ROTATION = 9, + HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A, + HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B, + HPWMI_PROXIMITY_SENSOR = 0x0C, + HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, + HPWMI_PEAKSHIFT_PERIOD = 0x0F, + HPWMI_BATTERY_CHARGE_PERIOD = 0x10, }; struct bios_args { @@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_LOCK_SWITCH: break; + case HPWMI_LID_SWITCH: + break; + case HPWMI_SCREEN_ROTATION: + break; + case HPWMI_COOLSENSE_SYSTEM_MOBILE: + break; + case HPWMI_COOLSENSE_SYSTEM_HOT: + break; + case HPWMI_PROXIMITY_SENSOR: + break; + case HPWMI_BACKLIT_KB_BRIGHTNESS: + break; + case HPWMI_PEAKSHIFT_PERIOD: + break; + case HPWMI_BATTERY_CHARGE_PERIOD: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index e64a7a870d42..a8e43cf70fac 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev) static int lis3lv02d_resume(struct device *dev) { - return lis3lv02d_poweron(&lis3_dev); + lis3lv02d_poweron(&lis3_dev); + return 0; } static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17f00b8dc5cb..89c4519d48ac 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) for (bit = 0; bit < 16; bit++) { if (test_bit(bit, &value)) { switch (bit) { - case 6: + case 0: /* Z580 */ + case 6: /* Z570 */ /* Thermal Management button */ ideapad_input_report(priv, 65); break; @@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) /* OneKey Theater button */ ideapad_input_report(priv, 64); break; + default: + pr_info("Unknown special button: %lu\n", bit); + break; } } } diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c new file mode 100644 index 000000000000..47ae0c47d4b5 --- /dev/null +++ b/drivers/platform/x86/pvpanic.c @@ -0,0 +1,124 @@ +/* + * pvpanic.c - pvpanic Device Support + * + * Copyright (C) 2013 Fujitsu. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>"); +MODULE_DESCRIPTION("pvpanic device driver"); +MODULE_LICENSE("GPL"); + +static int pvpanic_add(struct acpi_device *device); +static int pvpanic_remove(struct acpi_device *device); + +static const struct acpi_device_id pvpanic_device_ids[] = { + { "QEMU0001", 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); + +#define PVPANIC_PANICKED (1 << 0) + +static u16 port; + +static struct acpi_driver pvpanic_driver = { + .name = "pvpanic", + .class = "QEMU", + .ids = pvpanic_device_ids, + .ops = { + .add = pvpanic_add, + .remove = pvpanic_remove, + }, + .owner = THIS_MODULE, +}; + +static void +pvpanic_send_event(unsigned int event) +{ + outb(event, port); +} + +static int +pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + pvpanic_send_event(PVPANIC_PANICKED); + return NOTIFY_DONE; +} + +static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, +}; + + +static acpi_status +pvpanic_walk_resources(struct acpi_resource *res, void *context) +{ + switch (res->type) { + case ACPI_RESOURCE_TYPE_END_TAG: + return AE_OK; + + case ACPI_RESOURCE_TYPE_IO: + port = res->data.io.minimum; + return AE_OK; + + default: + return AE_ERROR; + } +} + +static int pvpanic_add(struct acpi_device *device) +{ + acpi_status status; + u64 ret; + + status = acpi_evaluate_integer(device->handle, "_STA", NULL, + &ret); + + if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B) + return -ENODEV; + + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + pvpanic_walk_resources, NULL); + + if (!port) + return -ENODEV; + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_remove(struct acpi_device *device) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + return 0; +} + +module_acpi_driver(pvpanic_driver); diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index 5f770059fd4d..1a90b62a71c6 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -176,10 +176,7 @@ static int __init samsungq10_init(void) samsungq10_probe, NULL, 0, NULL, 0); - if (IS_ERR(samsungq10_device)) - return PTR_ERR(samsungq10_device); - - return 0; + return PTR_RET(samsungq10_device); } static void __exit samsungq10_exit(void) diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index d544e3aaf761..2ac045f27f10 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) real_ev = __sony_nc_gfx_switch_status_get(); break; + case 0x015B: + /* Hybrid GFX switching SVS151290S */ + ev_type = GFX_SWITCH; + real_ev = __sony_nc_gfx_switch_status_get(); + break; default: dprintk("Unknown event 0x%x for handle 0x%x\n", event, handle); @@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device, break; case 0x0128: case 0x0146: + case 0x015B: result = sony_nc_gfx_switch_setup(pf_device, handle); if (result) pr_err("couldn't set up GFX Switch status (%d)\n", @@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device, case 0x0143: case 0x014b: case 0x014c: + case 0x0163: result = sony_nc_kbd_backlight_setup(pf_device, handle); if (result) pr_err("couldn't set up keyboard backlight function (%d)\n", @@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) break; case 0x0128: case 0x0146: + case 0x015B: sony_nc_gfx_switch_cleanup(pd); break; case 0x0131: @@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_cleanup(pd); break; default: @@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_resume(); break; default: @@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void) { unsigned int result; - if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result)) + if (sony_call_snc_handle(gfxs_ctl->handle, + gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100, + &result)) return -EIO; switch (gfxs_ctl->handle) { @@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void) */ return result & 0x1 ? SPEED : STAMINA; break; + case 0x015B: + /* 0: discrete GFX (speed) + * 1: integrated GFX (stamina) + */ + return result & 0x1 ? STAMINA : SPEED; + break; case 0x0128: /* it's a more elaborated bitmask, for now: * 2: integrated GFX (stamina) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index db95c547c09d..86af29f53bbe 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1353,6 +1353,8 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS + select GENERIC_CSUM + select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse Family of Fibre Channel PCI host adapters. diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c index 64136c56e706..33072388ea16 100644 --- a/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; - if (dev->dev_type == SATA_PM_PORT) + if (dev->dev_type == SAS_SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); @@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, @@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || - dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); @@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; - if ((dev->dev_type == SATA_DEV) || + if ((dev->dev_type == SAS_SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && @@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev) } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && - (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV)) + (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else @@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); @@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); @@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev) } } - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, @@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev) spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { - case SATA_PM: + case SAS_SATA_PM: res = asd_init_sata_pm_ddb(dev); break; - case SATA_PM_PORT: + case SAS_SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 81b736c76fff..4df867e07b20 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy) memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); - phy->identify_frame->dev_type = SAS_END_DEV; + phy->identify_frame->dev_type = SAS_END_DEVICE; if (phy->sas_phy.role & PHY_ROLE_INITIATOR) phy->identify_frame->initiator_bits = phy->sas_phy.iproto; if (phy->sas_phy.role & PHY_ROLE_TARGET) diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index cf9040933da6..d4c35df3d4ae 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev) struct sas_phy *phy = sas_get_local_phy(dev); /* Standard mandates link reset for ATA (type 0) and * hard reset for SSP (type 1) */ - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index f1733dfa3ae2..777e7c0bbb4b 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 5c87768c109c..e66aa7c11a8a 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; struct be_cmd_req_hdr *ioctl_hdr; + struct be_cmd_resp_hdr *ioctl_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; if (beiscsi_error(phba)) @@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, ioctl_hdr->subsystem, ioctl_hdr->opcode, status, addl_status); + + if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { + ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; + if (ioctl_resp_hdr->response_length) + goto release_mcc_tag; + } rc = -EAGAIN; } @@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_cmd_req_hdr *hdr = embedded_payload(wrb); + struct be_cmd_resp_hdr *resp_hdr; be_dws_le_to_cpu(compl, 4); @@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, hdr->subsystem, hdr->opcode, compl_status, extd_status); + if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { + resp_hdr = (struct be_cmd_resp_hdr *) hdr; + if (resp_hdr->response_length) + return 0; + } return -EBUSY; } return 0; @@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) void beiscsi_async_link_state_process(struct beiscsi_hba *phba, struct be_async_event_link_state *evt) { - switch (evt->port_link_status) { - case ASYNC_EVENT_LINK_DOWN: + if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) { + phba->state = BE_ADAPTER_LINK_DOWN; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link Down on Physical Port %d\n", + "BC_%d : Link Down on Port %d\n", evt->physical_port); - phba->state |= BE_ADAPTER_LINK_DOWN; iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); - break; - case ASYNC_EVENT_LINK_UP: + } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { phba->state = BE_ADAPTER_UP; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link UP on Physical Port %d\n", - evt->physical_port); - break; - default: - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Unexpected Async Notification %d on" - "Physical Port %d\n", - evt->port_link_status, + "BC_%d : Link UP on Port %d\n", evt->physical_port); } } @@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) { void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - int wait = 0; + uint32_t wait = 0; u32 ready; do { @@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) struct be_mcc_compl *compl = &mbox->compl; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val &= ~MPU_MAILBOX_DB_RDY_MASK; val |= MPU_MAILBOX_DB_HI_MASK; val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba) struct be_mcc_compl *compl = &mbox->compl; struct be_ctrl_info *ctrl = &phba->ctrl; + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, return status; } +/** + * be_cmd_fw_initialize()- Initialize FW + * @ctrl: Pointer to function control structure + * + * Send FW initialize pattern for the function. + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) return status; } +/** + * be_cmd_fw_uninit()- Uinitialize FW + * @ctrl: Pointer to function control structure + * + * Send FW uninitialize pattern for the function + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + u8 *endian_check; + + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + endian_check = (u8 *) wrb; + *endian_check++ = 0xFF; + *endian_check++ = 0xAA; + *endian_check++ = 0xBB; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0xCC; + *endian_check++ = 0xDD; + *endian_check = 0xFF; + + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : be_cmd_fw_uninit Failed\n"); + + spin_unlock(&ctrl->mbox_lock); + return status; +} + int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) @@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (chip_skh_r(ctrl->pdev)) { - req->hdr.version = MBX_CMD_VER2; - req->page_size = 1; - AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, - ctxt, coalesce_wm); - AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, - ctxt, no_delay); - AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, - __ilog2_u32(cq->len / 256)); - AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); - AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); @@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + } else { + req->hdr.version = MBX_CMD_VER2; + req->page_size = 1; + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); struct be_dma_mem *q_mem = &dq->dma_mem; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); void *ctxt = &req->context; int status; @@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, - 1); - AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, - PCI_FUNC(ctrl->pdev->devfn)); - AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, - be_encoded_q_len(length / sizeof(struct phys_addr))); - AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, - ctxt, entry_size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, - cq->id); + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + cq_id_recv, ctxt, cq->id); + } else { + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + cq_id_recv, ctxt, cq->id); + } be_dws_cpu_to_le(ctxt, sizeof(req->context)); diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 23397d51ac54..99073086dfe0 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -52,6 +52,10 @@ struct be_mcc_wrb { /* Completion Status */ #define MCC_STATUS_SUCCESS 0x0 +#define MCC_STATUS_FAILED 0x1 +#define MCC_STATUS_ILLEGAL_REQUEST 0x2 +#define MCC_STATUS_ILLEGAL_FIELD 0x3 +#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 #define CQE_STATUS_COMPL_MASK 0xFFFF #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ @@ -118,7 +122,8 @@ struct be_async_event_trailer { enum { ASYNC_EVENT_LINK_DOWN = 0x0, - ASYNC_EVENT_LINK_UP = 0x1 + ASYNC_EVENT_LINK_UP = 0x1, + ASYNC_EVENT_LOGICAL = 0x2 }; /** @@ -130,6 +135,9 @@ struct be_async_event_link_state { u8 port_link_status; u8 port_duplex; u8 port_speed; +#define BEISCSI_PHY_LINK_FAULT_NONE 0x00 +#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01 +#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02 u8 port_fault; u8 rsvd0[7]; struct be_async_event_trailer trailer; @@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); @@ -751,6 +760,18 @@ struct amap_be_default_pdu_context { u8 rsvd4[32]; /* dword 3 */ } __packed; +struct amap_default_pdu_context_ext { + u8 rsvd0[16]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 rsvd2[22]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[16]; /* dword 2 */ + u8 rsvd3[32]; /* dword 3 */ +} __packed; + struct be_defq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; @@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 { * stack to notify the * controller of a posted Work Request Block */ -#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */ #define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ #define DB_DEF_PDU_WRB_INDEX_SHIFT 16 diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 9014690fe841..ef36be003f67 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, struct beiscsi_conn *beiscsi_conn, unsigned int cid) { - if (phba->conn_table[cid]) { + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Connection table already occupied. Detected clash\n"); @@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, } else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", - cid, beiscsi_conn); + cri_index, beiscsi_conn); - phba->conn_table[cid] = beiscsi_conn; + phba->conn_table[cri_index] = beiscsi_conn; } return 0; } @@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; + struct beiscsi_conn *beiscsi_conn; beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = NULL; + + /** + * Check if any connection resource allocated by driver + * is to be freed.This case occurs when target redirection + * or connection retry is done. + **/ + if (!beiscsi_ep->conn) + return; + + beiscsi_conn = beiscsi_ep->conn; + if (beiscsi_conn->login_in_progress) { + beiscsi_free_mgmt_task_handles(beiscsi_conn, + beiscsi_conn->task); + beiscsi_conn->login_in_progress = 0; + } } /** @@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; struct beiscsi_hba *phba = beiscsi_ep->phba; - struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; unsigned int tag; @@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", beiscsi_ep->ep_cid); - phba->ep_array[beiscsi_ep->ep_cid - - phba->fw_config.iscsi_cid_start] = ep; - if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + - phba->params.cxns_per_ctrl * 2)) { - - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Failed in allocate iscsi cid\n"); - goto free_ep; - } + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : Failed to allocate memory for" " mgmt_open_connection\n"); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); + beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); - if (!tag) { + if (tag <= 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); - goto free_ep; + beiscsi_free_ep(beiscsi_ep); + return -EBUSY; } - ptcpcnct_out = embedded_payload(wrb); + ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; beiscsi_ep = ep->dd_data; beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; @@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; - -free_ep: - beiscsi_free_ep(beiscsi_ep); - return -EBUSY; } /** @@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } + if (beiscsi_error(phba)) { + ret = -EIO; + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BS_%d : The FW state Not Stable!!!\n"); + return ERR_PTR(ret); + } + if (phba->state != BE_ADAPTER_UP) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, @@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, unsigned int cid) { - if (phba->conn_table[cid]) - phba->conn_table[cid] = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) + phba->conn_table[cri_index] = NULL; else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : Connection table Not occupied.\n"); diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 38eab7232159..31ddc8494398 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4e2733d23003..d24a2867bc21 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00, DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, &dev_attr_beiscsi_drvr_ver, &dev_attr_beiscsi_adapter_family, + &dev_attr_beiscsi_fw_ver, + &dev_attr_beiscsi_active_cid_count, NULL, }; @@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; + phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; @@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba, static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, - unsigned short cid, struct pdu_base *ppdu, unsigned long pdu_len, void *pbuffer, unsigned long buf_len) @@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (pwrb_context->wrb_handles_available >= 2) { pwrb_handle = pwrb_context->pwrb_handle_base[ pwrb_context->alloc_index]; @@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, hdr->t2retain = 0; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->dlength[0] = 0; hdr->dlength[1] = 0; @@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); @@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; - uint16_t wrb_index, cid; + uint16_t wrb_index, cid, cri_index; phwi_ctrlr = phba->phwi_ctrlr; - if (chip_skh_r(phba->pcidev)) { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, cid, psol); } else { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, cid, psol); } - pwrb_context = &phwi_ctrlr->wrb_context[ - cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; task = pwrb_handle->pio_handle; io_task = task->dd_data; - spin_lock_bh(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, io_task->psgl_handle); - spin_unlock_bh(&phba->mgmt_sgl_lock); - spin_lock_bh(&session->lock); - free_wrb_handle(phba, pwrb_context, pwrb_handle); - spin_unlock_bh(&session->lock); + memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); + iscsi_put_task(task); } static void @@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = csol_cqe->i_flags; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); - hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; @@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, struct sol_cqe *psol, struct common_sol_cqe *csol_cqe) { - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, @@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, hw_sts, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_cmd_wnd, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, cmd_cmpl, psol)) @@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, if (AMAP_GET_BITS(struct amap_sol_cqe_v2, o, psol)) csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; - } else { - csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, - i_exp_cmd_sn, psol); - csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, - i_res_cnt, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, - i_cmd_wnd, psol); - csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, - wrb_index, psol); - csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, - cid, psol); - csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, - hw_sts, psol); - csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, - i_resp, psol); - csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, - i_sts, psol); - csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, - i_flags, psol); } } @@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; struct common_sol_cqe csol_cqe = {0}; + uint16_t cri_index = 0; phwi_ctrlr = phba->phwi_ctrlr; /* Copy the elements to a common structure */ adapter_get_sol_cqe(phba, psol, &csol_cqe); - pwrb_context = &phwi_ctrlr->wrb_context[ - csol_cqe.cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; @@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba, unsigned char is_header = 0; unsigned int index, dpl; - if (chip_skh_r(phba->pcidev)) { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, index, pdpdu_cqe); } else { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, index, pdpdu_cqe); } @@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start; + pasync_handle->cri = + BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; pasync_handle->buffer_len = dpl; *pcq_index = index; @@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, } status = beiscsi_process_async_pdu(beiscsi_conn, phba, - (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start), phdr, hdr_len, pfirst_buffer, offset); @@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 32] & CQE_CODE_MASK); /* Get the CID */ - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { if ((code == DRIVERMSG_NOTIFY) || (code == UNSOL_HDR_NOTIFY) || (code == UNSOL_DATA_NOTIFY)) @@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) else cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, sol); - } else - cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } - ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work) static int be_iopoll(struct blk_iopoll *iop, int budget) { - static unsigned int ret; + unsigned int ret; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; @@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) /* Check for the data_count */ dsp_value = (task->data_count) ? 1 : 0; - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, dsp_value); else - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, dsp_value); /* Map addr only if there is data_count */ @@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { - struct be_mem_descriptor *mem_descr; dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; @@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) if (!phba->phwi_ctrlr) return -ENOMEM; + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) + return -ENOMEM; + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2628,6 +2640,7 @@ free_mem: } kfree(mem_arr_orig); kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba) static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; @@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * @@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } } idx = 0; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; @@ -2752,7 +2777,7 @@ init_wrb_hndl_failed: return -ENOMEM; } -static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; @@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!pasync_ctx->async_entry) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); + return -ENOMEM; + } + pasync_ctx->num_entries = p->asyncpdus_per_ctrl; pasync_ctx->buffer_size = p->defpdu_hdr_sz; @@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_header.ep_read_ptr = -1; pasync_ctx->async_data.host_write_ptr = 0; pasync_ctx->async_data.ep_read_ptr = -1; + + return 0; } static int @@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; int status; idx = 0; @@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, kfree(pwrb_arr); return status; } - phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. - id; + pwrb_context = &phwi_ctrlr->wrb_context[i]; + pwrb_context->cid = phwi_context->be_wrbq[i].id; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; @@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba) struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); @@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; + struct hwi_async_pdu_context *pasync_ctx; int i, eq_num; phwi_ctrlr = phba->phwi_ctrlr; @@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } + kfree(phwi_context->be_wrbq); free_wrb_handles(phba); q = &phwi_context->be_def_hdrq; @@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } be_mcc_queues_destroy(phba); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; + kfree(pasync_ctx->async_entry); + be_cmd_fw_uninit(ctrl); } static int be_mcc_queues_create(struct beiscsi_hba *phba, @@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba) if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; - hwi_init_async_pdu_ctx(phba); + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); @@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) mem_descr++; } kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } @@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i, new_cid; + int i; phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, GFP_KERNEL); @@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) return -ENOMEM; } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * - phba->params.cxns_per_ctrl * 2, GFP_KERNEL); + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phba->ep_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); kfree(phba->cid_array); + phba->cid_array = NULL; return -ENOMEM; } - new_cid = phba->fw_config.iscsi_cid_start; - for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - phba->cid_array[i] = new_cid; - new_cid += 2; + + phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * + phba->params.cxns_per_ctrl, GFP_KERNEL); + if (!phba->conn_table) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory in" + "hba_setup_cid_tbls\n"); + + kfree(phba->cid_array); + kfree(phba->ep_array); + phba->cid_array = NULL; + phba->ep_array = NULL; + return -ENOMEM; } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) + phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; + phba->avlbl_cids = phba->params.cxns_per_ctrl; return 0; } @@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) kfree(phba->eh_sgl_hndl_base); kfree(phba->cid_array); kfree(phba->ep_array); + kfree(phba->conn_table); +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + memset(io_task->pwrb_handle->pwrb, 0, + sizeof(struct iscsi_wrb)); + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + spin_lock_bh(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, + io_task->psgl_handle); + io_task->psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); + } + + if (io_task->mtask_addr) + pci_unmap_single(phba->pcidev, + io_task->mtask_addr, + io_task->mtask_data_count, + PCI_DMA_TODEVICE); } /** @@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, @@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) io_task->psgl_handle = NULL; } } else { - if (!beiscsi_conn->login_in_progress) { - if (io_task->pwrb_handle) { - free_wrb_handle(phba, pwrb_context, - io_task->pwrb_handle); - io_task->pwrb_handle = NULL; - } - if (io_task->psgl_handle) { - spin_lock(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, - io_task->psgl_handle); - spin_unlock(&phba->mgmt_sgl_lock); - io_task->psgl_handle = NULL; - } - if (io_task->mtask_addr) { - pci_unmap_single(phba->pcidev, - io_task->mtask_addr, - io_task->mtask_data_count, - PCI_DMA_TODEVICE); - io_task->mtask_addr = 0; - } - } + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } @@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, beiscsi_cleanup_task(task); spin_unlock_bh(&session->lock); - pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start)); + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); /* Check for the adapter family */ - if (chip_skh_r(phba->pcidev)) - beiscsi_offload_cxn_v2(params, pwrb_handle); - else + if (is_chip_be2_be3r(phba)) beiscsi_offload_cxn_v0(params, pwrb_handle, phba->init_mem); + else + beiscsi_offload_cxn_v2(params, pwrb_handle); be_dws_le_to_cpu(pwrb_handle->pwrb, sizeof(struct iscsi_target_context_update_wrb)); @@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; + uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; @@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) @@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } - beiscsi_conn->task = task; } else { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); @@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4324,12 +4414,13 @@ free_io_hndls: free_mgmt_hndls: spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[ - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; @@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; - memset(pwrb, 0, sizeof(*pwrb)); io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); @@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task) pwrb = io_task->pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); - if (chip_skh_r(phba->pcidev)) { - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, - be32_to_cpu(task->cmdsn)); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, - io_task->pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, - io_task->psgl_handle->sgl_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, - task->data_count); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, - io_task->pwrb_handle->nxt_wrb_index); - pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, @@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->nxt_wrb_index); pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; } @@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 1); } else { ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); @@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task) } /* Set the task type */ - io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? - AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : - AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & @@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, case OC_SKH_ID1: phba->generation = BE_GEN4; phba->iotask_fn = beiscsi_iotask_v2; + break; default: phba->generation = 0; } diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5946577d79d6..2c06ef3c02ac 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -36,7 +36,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.0.272.0" +#define BUILD_STR "10.0.467.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -66,8 +66,9 @@ #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 -#define OC_SKH_MAX_NUM_CPUS 63 +#define OC_SKH_MAX_NUM_CPUS 31 +#define BEISCSI_VER_STRLEN 32 #define BEISCSI_SGLIST_ELEMENTS 30 @@ -265,7 +266,9 @@ struct invalidate_command_table { unsigned short cid; } __packed; -#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) +#define chip_be2(phba) (phba->generation == BE_GEN2) +#define chip_be3_r(phba) (phba->generation == BE_GEN3) +#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -304,10 +307,15 @@ struct beiscsi_hba { unsigned short avlbl_cids; unsigned short cid_alloc; unsigned short cid_free; - struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2]; struct list_head hba_queue; +#define BE_MAX_SESSION 2048 +#define BE_SET_CID_TO_CRI(cri_index, cid) \ + (phba->cid_to_cri_map[cid] = cri_index) +#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) + unsigned short cid_to_cri_map[BE_MAX_SESSION]; unsigned short *cid_array; struct iscsi_endpoint **ep_array; + struct beiscsi_conn **conn_table; struct iscsi_boot_kset *boot_kset; struct Scsi_Host *shost; struct iscsi_iface *ipv4_iface; @@ -339,6 +347,7 @@ struct beiscsi_hba { struct delayed_work beiscsi_hw_check_task; u8 mac_address[ETH_ALEN]; + char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ struct be_ctrl_info ctrl; @@ -563,7 +572,7 @@ struct hwi_async_pdu_context { * This is a varying size list! Do not add anything * after this entry!! */ - struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; + struct hwi_async_entry *async_entry; }; #define PDUCQE_CODE_MASK 0x0000003F @@ -749,6 +758,8 @@ void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); void beiscsi_process_all_cqs(struct work_struct *work); +void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task); static inline bool beiscsi_error(struct beiscsi_hba *phba) { @@ -933,7 +944,7 @@ struct hwi_controller { struct sgl_handle *psgl_handle_base; unsigned int wrb_mem_index; - struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; + struct hwi_wrb_context *wrb_context; struct mcc_wrb *pmcc_wrb_base; struct be_ring default_pdu_hdr; struct be_ring default_pdu_data; @@ -970,9 +981,7 @@ struct hwi_context_memory { struct be_queue_info be_def_hdrq; struct be_queue_info be_def_dataq; - struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; - struct be_mcc_wrb_context *pbe_mcc_context; - + struct be_queue_info *be_wrbq; struct hwi_async_pdu_context *pasync_ctx; }; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 55cc9902263d..245a9595a93a 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); + memcpy(phba->fw_ver_str, resp->params.hba_attribs. + firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); @@ -1260,6 +1262,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, } /** + * beiscsi_fw_ver_disp()- Display Firmware Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Firmware version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); +} + +/** + * beiscsi_active_cid_disp()- Display Sessions Active + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", + (phba->params.cxns_per_ctrl - phba->avlbl_cids)); +} + +/** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure * @attr: device attribute, not used. diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 2e4968add799..04af7e74fe48 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -156,25 +156,25 @@ union invalidate_commands_params { } __packed; struct mgmt_hba_attributes { - u8 flashrom_version_string[32]; - u8 manufacturer_name[32]; + u8 flashrom_version_string[BEISCSI_VER_STRLEN]; + u8 manufacturer_name[BEISCSI_VER_STRLEN]; u32 supported_modes; u8 seeprom_version_lo; u8 seeprom_version_hi; u8 rsvd0[2]; u32 fw_cmd_data_struct_version; u32 ep_fw_data_struct_version; - u32 future_reserved[12]; + u8 ncsi_version_string[12]; u32 default_extended_timeout; - u8 controller_model_number[32]; + u8 controller_model_number[BEISCSI_VER_STRLEN]; u8 controller_description[64]; - u8 controller_serial_number[32]; - u8 ip_version_string[32]; - u8 firmware_version_string[32]; - u8 bios_version_string[32]; - u8 redboot_version_string[32]; - u8 driver_version_string[32]; - u8 fw_on_flash_version_string[32]; + u8 controller_serial_number[BEISCSI_VER_STRLEN]; + u8 ip_version_string[BEISCSI_VER_STRLEN]; + u8 firmware_version_string[BEISCSI_VER_STRLEN]; + u8 bios_version_string[BEISCSI_VER_STRLEN]; + u8 redboot_version_string[BEISCSI_VER_STRLEN]; + u8 driver_version_string[BEISCSI_VER_STRLEN]; + u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN]; u32 functionalities_supported; u16 max_cdblength; u8 asic_revision; @@ -190,7 +190,8 @@ struct mgmt_hba_attributes { u32 firmware_post_status; u32 hba_mtu[8]; u8 iscsi_features; - u8 future_u8[3]; + u8 asic_generation; + u8 future_u8[2]; u32 future_u32[3]; } __packed; @@ -207,7 +208,7 @@ struct mgmt_controller_attributes { u64 unique_identifier; u8 netfilters; u8 rsvd0[3]; - u8 future_u32[4]; + u32 future_u32[4]; } __packed; struct be_mgmt_controller_attributes { @@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t beiscsi_fw_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_active_cid_disp(struct device *dev, + struct device_attribute *attr, char *buf); + ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 11596b2c4702..08b22a901c25 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -2,7 +2,7 @@ #define _BNX2FC_H_ /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -64,10 +64,12 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.13" +#define BNX2FC_VERSION "1.0.14" #define PFX "bnx2fc: " +#define BCM_CHIP_LEN 16 + #define BNX2X_DOORBELL_PCI_BAR 2 #define BNX2FC_MAX_BD_LEN 0xffff @@ -241,6 +243,8 @@ struct bnx2fc_hba { int wait_for_link_down; int num_ofld_sess; struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; }; struct bnx2fc_interface { diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index bdbbb13b8534..b1c9a4f8caee 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -3,7 +3,7 @@ * This file contains helper routines that handle ELS requests * and responses. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dffec1e5715..69ac55495c1d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -3,7 +3,7 @@ * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Dec 21, 2012" +#define DRV_MODULE_RELDATE "Mar 08, 2013" static char version[] = @@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; @@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; - sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", - BNX2FC_NAME, BNX2FC_VERSION, + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (Broadcom %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; @@ -1656,23 +1658,60 @@ mem_err: static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; + struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; - hba->pcidev = cnic->pcidev; - if (hba->pcidev) - pci_dev_get(hba->pcidev); + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { - if (hba->pcidev) + if (hba->pcidev) { + hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); + } hba->pcidev = NULL; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 50510ffe1bf5..c0d035a8f8f9 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -2,7 +2,7 @@ * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; - fcoe_init3.perf_config = 1; + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 723a9a8ba5ee..575142e92d9c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1,7 +1,7 @@ /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. * IO manager and SCSI IO processing. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, - &io_req->req_flags))) { + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index c57a3bb8a9fb..4d93177dfb53 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -2,7 +2,7 @@ * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h index 0f9c04175b11..372a67d122d3 100644 --- a/drivers/scsi/csiostor/csio_lnode.h +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -114,7 +114,7 @@ struct csio_lnode_stats { uint32_t n_rnode_match; /* matched rnode */ uint32_t n_dev_loss_tmo; /* Device loss timeout */ uint32_t n_fdmi_err; /* fdmi err */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ uint32_t n_rnode_alloc; /* rnode allocated */ uint32_t n_rnode_free; /* rnode freed */ diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h index 65940096a80d..433434221222 100644 --- a/drivers/scsi/csiostor/csio_rnode.h +++ b/drivers/scsi/csiostor/csio_rnode.h @@ -63,7 +63,7 @@ struct csio_rnode_stats { uint32_t n_err_nomem; /* error nomem */ uint32_t n_evt_unexp; /* unexpected event */ uint32_t n_evt_drop; /* unexpected event */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ uint32_t n_lun_rst; /* Number of resets of * of LUNs under this diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 98436c363035..b6d1f92ed33c 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -38,7 +38,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.2" +#define DRV_VERSION "1.5.0.22" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -192,6 +192,18 @@ enum fnic_state { struct mempool; +enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, +}; + +struct fnic_event { + struct list_head list; + struct fnic *fnic; + enum fnic_evt event; +}; + /* Per-instance private data structure */ struct fnic { struct fc_lport *lport; @@ -254,6 +266,18 @@ struct fnic { struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; + struct sk_buff_head fip_frame_queue; + struct timer_list fip_timer; + struct list_head vlans; + spinlock_t vlans_lock; + + struct work_struct event_work; + struct list_head evlist; + /*** FIP related data members -- end ***/ + /* copy work queue cache line section */ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; /* completion queue cache line section */ @@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) } extern struct workqueue_struct *fnic_event_queue; +extern struct workqueue_struct *fnic_fip_queue; extern struct device_attribute *fnic_attrs[]; void fnic_clear_intr_mode(struct fnic *fnic); @@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); void fnic_handle_link(struct work_struct *work); +void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); @@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); +void fnic_handle_fip_frame(struct work_struct *work); +void fnic_handle_fip_event(struct fnic *fnic); +void fnic_fcoe_reset_vlans(struct fnic *fnic); +void fnic_fcoe_evlist_free(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct fnic *fnic); + static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) { diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 483eb9dbe663..006fa92a02df 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -31,12 +31,20 @@ #include <scsi/libfc.h> #include "fnic_io.h" #include "fnic.h" +#include "fnic_fip.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +static u8 fcoe_all_fcfs[ETH_ALEN]; +struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; static void fnic_set_eth_mode(struct fnic *); +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); void fnic_handle_link(struct work_struct *work) { @@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work) FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); fcoe_ctlr_link_down(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); @@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work) } else if (fnic->link_status) { /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); } else { @@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work) } } +void fnic_fcoe_evlist_free(struct fnic *fnic) +{ + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + list_del(&fevt->list); + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fnic_handle_event(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, event_work); + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + list_del(&fevt->list); + kfree(fevt); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_del(&fevt->list); + switch (fevt->event) { + case FNIC_EVT_START_VLAN_DISC: + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + spin_lock_irqsave(&fnic->fnic_lock, flags); + break; + case FNIC_EVT_START_FCF_DISC: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start FCF Discovery\n"); + fnic_fcoe_start_fcf_disc(fnic); + break; + default: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unknown event 0x%x\n", fevt->event); + break; + } + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/** + * Check if the Received FIP FLOGI frame is rejected + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is rejected with unsupported cmd with + * insufficient resource els explanation. + */ +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + enum fip_desc_type els_dtype = 0; + u16 op; + u8 els_op; + u8 sub; + + size_t els_len = 0; + size_t rlen; + size_t dlen = 0; + + if (skb_linearize(skb)) + return 0; + + if (skb->len < sizeof(*fiph)) + return 0; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op != FIP_OP_LS) + return 0; + + if (sub != FIP_SC_REP) + return 0; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return 0; + + desc = (struct fip_desc *)(fiph + 1); + dlen = desc->fip_dlen * FIP_BPW; + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + shost_printk(KERN_DEBUG, lport->host, + " FIP TYPE FLOGI: fab name:%llx " + "vfid:%d map:%x\n", + fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, + fip->sel_fcf->fc_map); + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + return 0; + + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + + if (!fh) + return 0; + + /* + * ELS command code, reason and explanation should be = Reject, + * unsupported command and insufficient resource + */ + els_op = *(u8 *)(fh + 1); + if (els_op == ELS_LS_RJT) { + shost_printk(KERN_INFO, lport->host, + "Flogi Request Rejected by Switch\n"); + return 1; + } + shost_printk(KERN_INFO, lport->host, + "Flogi Request Accepted by Switch\n"); + } + return 0; +} + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; + u64 vlan_tov; + + fnic_fcoe_reset_vlans(fnic); + fnic->set_vlan(fnic, 0); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + /* set a timer so that we can retry if there no response */ + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); +} + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid; + size_t rlen; + size_t dlen; + struct fcoe_vlan *vlan; + u64 sol_time; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response...\n"); + + fiph = (struct fip_header *) skb->data; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + ntohs(fiph->fip_op), fiph->fip_subcode); + + rlen = ntohs(fiph->fip_dl_len) * 4; + fnic_fcoe_reset_vlans(fnic); + spin_lock_irqsave(&fnic->vlans_lock, flags); + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + shost_printk(KERN_INFO, fnic->lport->host, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kmalloc(sizeof(*vlan), + GFP_ATOMIC); + if (!vlan) { + /* retry from timer */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + memset(vlan, 0, sizeof(struct fcoe_vlan)); + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlans); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* any VLAN descriptors present ? */ + if (list_empty(&fnic->vlans)) { + /* retry from timer */ + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "No VLAN descriptors in FIP VLAN response\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + goto out; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(fip); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +out: + return; +} + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count = 1; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(&fnic->ctlr); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +} + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) +{ + unsigned long flags; + struct fcoe_vlan *fvlan; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; + } + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + if (fvlan->state == FIP_VLAN_USED) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + + if (fvlan->state == FIP_VLAN_SENT) { + fvlan->state = FIP_VLAN_USED; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; +} + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) +{ + struct fnic_event *fevt; + unsigned long flags; + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + if (!fevt) + return; + + fevt->fnic = fnic; + fevt->event = ev; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fevt->list, &fnic->evlist); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_work(&fnic->event_work); +} + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +{ + struct fip_header *fiph; + int ret = 1; + u16 op; + u8 sub; + + if (!skb || !(skb->data)) + return -1; + + if (skb_linearize(skb)) + goto drop; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + goto drop; + /* pass it on to fcoe */ + ret = 1; + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + /* set the vlan as used */ + fnic_fcoe_process_vlan_resp(fnic, skb); + ret = 0; + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + /* received CVL request, restart vlan disc */ + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + /* pass it on to fcoe */ + ret = 1; + } +drop: + return ret; +} + +void fnic_handle_fip_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + unsigned long flags; + struct sk_buff *skb; + struct ethhdr *eh; + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->fip_frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { + dev_kfree_skb(skb); + continue; + } + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + shost_printk(KERN_INFO, fnic->lport->host, + "Trigger a Link down - VLAN Disc\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + dev_kfree_skb(skb); + continue; + } + fcoe_ctlr_recv(&fnic->ctlr, skb); + continue; + } + } +} + /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. @@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) skb_reset_mac_header(skb); } if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - fcoe_ctlr_recv(&fnic->ctlr, skb); + if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { + printk(KERN_ERR "Dropped FIP frame, as firmware " + "uses non-FIP mode, Enable FIP " + "using UCSM\n"); + goto drop; + } + skb_queue_tail(&fnic->fip_frame_queue, skb); + queue_work(fnic_fip_queue, &fnic->fip_frame_work); return 1; /* let caller know packet was used */ } if (eh->h_proto != htons(ETH_P_FCOE)) @@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) dev_kfree_skb(fp_skb(fp)); buf->os_buf = NULL; } + +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fcoe_vlan *next; + + /* + * indicate a link down to fcoe so that all fcf's are free'd + * might not be required since we did this before sending vlan + * discovery request + */ + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlans)) { + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); +} + +void fnic_handle_fip_timer(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->ctlr.mode == FIP_ST_NON_FIP) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + /* no vlans available, try again */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + shost_printk(KERN_DEBUG, fnic->lport->host, + "fip_timer: vlan %d state %d sol_count %d\n", + vlan->vid, vlan->state, vlan->sol_count); + switch (vlan->state) { + case FIP_VLAN_USED: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "FIP VLAN is selected for FC transaction\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + break; + case FIP_VLAN_FAILED: + /* if all vlans are in failed state, restart vlan disc */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + break; + case FIP_VLAN_SENT: + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + shost_printk(KERN_INFO, fnic->lport->host, + "Dequeue this VLAN ID %d from list\n", + vlan->vid); + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlans)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + shost_printk(KERN_INFO, fnic->lport->host, + "fip_timer: vlan list empty, " + "trigger vlan disc\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + /* check the next vlan */ + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + vlan->sol_count++; + sol_time = jiffies + msecs_to_jiffies + (FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + break; + } +} diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h new file mode 100644 index 000000000000..87e74c2ab971 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fip.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _FNIC_FIP_H_ +#define _FNIC_FIP_H_ + + +#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ +#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ +#define FCOE_CTLR_MAX_SOL 8 + +#define FINC_MAX_FLOGI_REJECTS 8 + +/* + * FIP_DT_VLAN descriptor. + */ +struct fip_vlan_desc { + struct fip_desc fd_desc; + __be16 fd_vlan; +} __attribute__((packed)); + +struct vlan { + __be16 vid; + __be16 type; +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + u16 vid; /* vlan ID */ + u16 sol_count; /* no. of sols sent */ + u16 state; /* state */ +}; + +enum fip_vlan_state { + FIP_VLAN_AVAIL = 0, /* don't do anything */ + FIP_VLAN_SENT = 1, /* sent */ + FIP_VLAN_USED = 2, /* succeed */ + FIP_VLAN_FAILED = 3, /* failed to response */ +}; + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d601ac543c52..5f09d1814d26 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -39,6 +39,7 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" +#include "fnic_fip.h" #include "fnic.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } +static void fnic_fip_notify_timer(unsigned long data) +{ + struct fnic *fnic = (struct fnic *)data; + + fnic_handle_fip_timer(fnic); +} + static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport) return fnic->data_src_addr; } +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + u16 old_vlan; + old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; @@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, + (unsigned long)fnic); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + INIT_WORK(&fnic->event_work, fnic_handle_event); + skb_queue_head_init(&fnic->fip_frame_queue); + spin_lock_irqsave(&fnic_list_lock, flags); + if (!fnic_fip_queue) { + fnic_fip_queue = + create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + spin_unlock_irqrestore(&fnic_list_lock, flags); + printk(KERN_ERR PFX "fnic FIP work queue " + "create failed\n"); + err = -ENOMEM; + goto err_out_free_max_pool; + } + } + spin_unlock_irqrestore(&fnic_list_lock, flags); + INIT_LIST_HEAD(&fnic->evlist); + INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); @@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev) skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + fnic_fcoe_evlist_free(fnic); + } + /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work @@ -889,8 +932,8 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN, - NULL); + SLAB_HWCACHE_ALIGN, + NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; @@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); + if (fnic_fip_queue) { + flush_workqueue(fnic_fip_queue); + destroy_workqueue(fnic_fip_queue); + } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index b576be734e2e..9795d6f3e197 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg) return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); } +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan) +{ + u64 a0 = new_default_vlan, a1 = 0; + int wait = 1000; + int old_vlan = 0; + + old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait); + return (u16)old_vlan; +} + int vnic_dev_link_status(struct vnic_dev *vdev) { if (vdev->linkstatus) diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h index f9935a8a5a09..40d4195f562b 100644 --- a/drivers/scsi/fnic/vnic_dev.h +++ b/drivers/scsi/fnic/vnic_dev.h @@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, + u16 new_default_vlan); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h index 7c9ccbd4134b..3e2fcbda6aed 100644 --- a/drivers/scsi/fnic/vnic_devcmd.h +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -196,6 +196,73 @@ enum vnic_devcmd_cmd { /* undo initialize of virtual link */ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46) }; /* flags for CMD_OPEN */ diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index cc82d0f322b6..4e31caa21ddf 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) return 0; } - if (vhost->state == IBMVFC_ACTIVE) { + if (vhost->logged_in) { evt = ibmvfc_get_event(vhost); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); @@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) tmf->common.length = sizeof(*tmf); tmf->scsi_id = rport->port_id; int_to_scsilun(sdev->lun, &tmf->lun); - tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + else + tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); tmf->cancel_key = (unsigned long)sdev->hostdata; tmf->my_cancel_key = (unsigned long)starget->hostdata; @@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) timeout = wait_for_completion_timeout(&evt->comp, timeout); if (!timeout) { - rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + rc = ibmvfc_cancel_all(sdev, 0); if (!rc) { rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) @@ -2383,24 +2388,30 @@ out: * @cmd: scsi command to abort * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, abort_rc; + int cancel_rc, block_rc; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - abort_rc = ibmvfc_abort_task_set(sdev); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); - if (!cancel_rc && !abort_rc) + if (!cancel_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2410,29 +2421,47 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, reset_rc; + int cancel_rc, block_rc, reset_rc = 0; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } /** + * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); +} + +/** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code @@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); - int reset_rc; + int block_rc; + int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + if (block_rc != FAST_IO_FAIL) { + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + } else + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc; + int rc, block_rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); + + if (block_rc == FAST_IO_FAIL) + return FAST_IO_FAIL; + return rc ? FAILED : SUCCESS; } @@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) dev_rport = starget_to_rport(scsi_target(sdev)); if (dev_rport != rport) continue; - ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - ibmvfc_abort_task_set(sdev); + ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3be8af624e6f..017a5290e8c1 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.10" -#define IBMVFC_DRIVER_DATE "(August 24, 2012)" +#define IBMVFC_DRIVER_VERSION "1.0.11" +#define IBMVFC_DRIVER_DATE "(April 12, 2013)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp { u16 error; u32 flags; #define IBMVFC_NATIVE_FC 0x01 -#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; u64 capabilities; #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 +#define IBMVFC_CAN_SUPPRESS_ABTS 0x10 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -351,6 +351,7 @@ struct ibmvfc_tmf { #define IBMVFC_TMF_LUN_RESET 0x10 #define IBMVFC_TMF_TGT_RESET 0x20 #define IBMVFC_TMF_LUA_VALID 0x40 +#define IBMVFC_TMF_SUPPRESS_ABTS 0x80 u32 cancel_key; u32 my_cancel_key; u32 pad; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2197b57fb225..82a3c1ec8706 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (!ioa_cfg->in_reset_reload) { + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); dev_err(&ioa_cfg->pdev->dev, "Adapter being reset as a result of error recovery.\n"); @@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, { u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; struct ipr_ioadl64_desc *last_ioadl64 = NULL; int len = qc->nbytes; struct scatterlist *sg; @@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); ioarcb->u.sis64_addr_data.data_ioadl_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); for_each_sg(qc->sg, sg, qc->n_elem, si) { ioadl64->flags = cpu_to_be32(ioadl_flags); @@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; ENTER; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { @@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) ioa_cfg->in_reset_reload = 0; ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); LEAVE; @@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; - ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; ioa_cfg->in_ioa_bringdown = 1; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 21a6ff1ed5c6..a1fb840596ef 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */ u8 hob_lbam; u8 hob_lbah; u8 ctl; -}__attribute__ ((packed, aligned(4))); +}__attribute__ ((packed, aligned(2))); struct ipr_ioadl_desc { __be32 flags_and_data_len; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c3aa6c5457b9..96a26f454673 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); @@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 7674caae1d88..47a013fffae7 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte static inline bool dev_is_expander(struct domain_device *dev) { - return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; + return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE; } static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 9594ab62702b..e3e3bcbd5a9f 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) /* all unaccelerated request types (non ssp or ncq) handled with * substates */ - if (!task && dev->dev_type == SAS_END_DEV) { + if (!task && dev->dev_type == SAS_END_DEVICE) { state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { state = SCI_REQ_SMP_WAIT_RESP; @@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost, if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) /* pass */; else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); @@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); - if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { + if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index b6f19a1db780..9bb020ac089c 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, } /* XXX convert to get this from task->tproto like other drivers */ - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { isci_tmf->proto = SAS_PROTOCOL_SSP; status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bdb81cda8401..161c98efade9 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; if (phy->attached_sata_dev) - dev->tproto |= SATA_DEV; + dev->tproto |= SAS_SATA_DEV; - if (phy->attached_dev_type == SATA_PENDING) - dev->dev_type = SATA_PENDING; + if (phy->attached_dev_type == SAS_SATA_PENDING) + dev->dev_type = SAS_SATA_PENDING; else { int res; - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; res = sas_get_report_phy_sata(dev->parent, phy->phy_id, &dev->sata_dev.rps_resp); if (res) { @@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) int res; /* we weren't pending, so successfully end the reset sequence now */ - if (dev->dev_type != SATA_PENDING) + if (dev->dev_type != SAS_SATA_PENDING) return 1; /* hmmm, if this succeeds do we need to repost the domain_device to the @@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link) return 0; switch (ex_phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: return 0; - case SAS_END_DEV: + case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); default: @@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev) struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; - if (dev->dev_type == SATA_PENDING) + if (dev->dev_type == SAS_SATA_PENDING) return; if ((fis->sector_count == 1 && /* ATA */ @@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev) { int res; - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) return -ENODEV; sas_get_ata_command_set(dev); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index a0c3003e0c7d..62b58d38ce2e 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -39,11 +39,11 @@ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; @@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port) if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) - dev->dev_type = SATA_PM; + dev->dev_type = SAS_SATA_PM; else - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = @@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->port = port; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: rc = sas_ata_init(dev); if (rc) { rphy = NULL; break; } /* fall through */ - case SAS_END_DEV: + case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->rphy = rphy; get_device(&dev->rphy->dev); - if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) + if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE) list_add_tail(&dev->disco_list_node, &port->disco_list); else { spin_lock_irq(&port->dev_list_lock); @@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref) dev->phy = NULL; /* remove the phys and ports, everything else should be gone */ - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { @@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d spin_unlock_irq(&port->dev_list_lock); spin_lock_irq(&ha->lock); - if (dev->dev_type == SAS_END_DEV && + if (dev->dev_type == SAS_END_DEVICE && !list_empty(&dev->ssp_dev.eh_list_node)) { list_del_init(&dev->ssp_dev.eh_list_node); ha->eh_active--; @@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work) task_pid_nr(current)); switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: error = sas_discover_end_dev(dev); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: error = sas_discover_root_expander(dev); break; - case SATA_DEV: - case SATA_PM: + case SAS_SATA_DEV: + case SAS_SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f42b0e15410f..446b85110a1f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) } } -static enum sas_dev_type to_dev_type(struct discover_resp *dr) +static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ - if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && + if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) - return SATA_PENDING; + return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; @@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - phy->attached_dev_type = NO_DEVICE; + phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; @@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == NO_DEVICE || + if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else @@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) out: switch (phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: type = "stp pending"; break; - case NO_DEVICE: + case SAS_PHY_UNUSED: type = "no device"; break; - case SAS_END_DEV: + case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; @@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) type = "ssp"; } break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: @@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev( } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { - child->dev_type = SAS_END_DEV; + child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) @@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander( switch (phy->attached_dev_type) { - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); - if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); @@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; - if (ex_phy->attached_dev_type != SAS_END_DEV && - ex_phy->attached_dev_type != FANOUT_DEV && - ex_phy->attached_dev_type != EDGE_DEV && - ex_phy->attached_dev_type != SATA_PENDING) { + if (ex_phy->attached_dev_type != SAS_END_DEVICE && + ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), @@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } switch (ex_phy->attached_dev_type) { - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", @@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: @@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == EDGE_DEV || - phy->attached_dev_type == FANOUT_DEV) && + if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); @@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { - if (child->dev_type != EDGE_DEV && - child->dev_type != FANOUT_DEV) + if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); @@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) int i; u8 *sub_sas_addr = NULL; - if (dev->dev_type != EDGE_DEV) + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { @@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == FANOUT_DEV || - phy->attached_dev_type == EDGE_DEV) && + if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) @@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *child_phy) { static const char *ex_type[] = { - [EDGE_DEV] = "edge", - [FANOUT_DEV] = "fanout", + [SAS_EDGE_EXPANDER_DEVICE] = "edge", + [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; @@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child) if (!child->parent) return 0; - if (child->parent->dev_type != EDGE_DEV && - child->parent->dev_type != FANOUT_DEV) + if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; @@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child) child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { - case EDGE_DEV: - if (child->dev_type == FANOUT_DEV) { + case SAS_EDGE_EXPANDER_DEVICE: + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child) } } break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { - if (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); @@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev, } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_dev_type *type) + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; @@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { - if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { + if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; @@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); @@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); @@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root, int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) { + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); @@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } @@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) return res; } -static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) +static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went - * SAS_END_DEV to SATA_PENDING the link needs recovery + * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ - if ((old == SATA_PENDING && new == SAS_END_DEV) || - (old == SAS_END_DEV && new == SATA_PENDING)) + if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || + (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; @@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; - enum sas_dev_type type = NO_DEVICE; + enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; @@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) sas_ex_phy_discover(dev, phy_id); - if (ata_dev && phy->attached_dev_type == SATA_PENDING) + if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 1de67964e5a1..7e7ba83f0a21 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev, rphy->identify.initiator_port_protocols = dev->iproto; rphy->identify.target_port_protocols = dev->tproto; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: /* FIXME: need sata device type */ - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: rphy->identify.device_type = SAS_END_DEVICE; break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; break; default: diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 1398b714c018..d3c5297c6c89 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) continue; } - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 7706c99ec8bb..bcc56cac4fd8 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -46,10 +46,15 @@ struct lpfc_sli2_slim; #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi cmnd for menlo needs nearly twice as for firmware downloads using bsg */ -#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ + +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ + #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ -#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -66,8 +71,10 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ -#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) +/* 5 minutes */ +#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -671,6 +678,7 @@ struct lpfc_hba { uint32_t lmt; uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; @@ -701,9 +709,11 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_wq_count; uint32_t cfg_fcp_eq_count; uint32_t cfg_fcp_io_channel; + uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -804,8 +814,10 @@ struct lpfc_hba { uint64_t bg_reftag_err_cnt; /* fastpath list. */ - spinlock_t scsi_buf_list_lock; - struct list_head lpfc_scsi_buf_list; + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 9290713af253..3c5625b8b1f4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int i; int rc; + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return 0; + init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); @@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba) int status = 0; int rc; - if (!phba->cfg_enable_hba_reset) + if ((!phba->cfg_enable_hba_reset) || + (phba->pport->fc_flag & FC_OFFLINE_MODE)) return -EACCES; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) @@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, lpfc_fcp_imax_show, lpfc_fcp_imax_store); +/** + * lpfc_state_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int idx, len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + case 2: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: Driver centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + } + + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d IRQ %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id, cpup->irq); + + cpup++; + } + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int status = -EINVAL; + return status; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# 2 - Affintize HBA vectors with respect to the entire driver +# (round robin thru all CPUs across all HBAs) +*/ +static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 fcp_cpu_map: %d out of range, using default\n", + val); + phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # 0 = disabled (default) # 1 = enabled # Value range is [0,1]. Default value is 0. +# +# This feature in under investigation and may be supported in the future. */ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; -module_param(lpfc_fcp_look_ahead, uint, S_IRUGO); -MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions"); - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery, /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count - * This value can be set to values between 64 and 256. The default value is + * This value can be set to values between 64 and 4096. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, - LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); +/* + * This parameter will be depricated, the driver cannot limit the + * protection data s/g list. + */ +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, @@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_fcp_io_channel, @@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 888666892004..094be2cad65b 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, unsigned int transfer_bytes, bytes_copied = 0; unsigned int sg_offset, dma_offset; unsigned char *dma_address, *sg_address; - struct scatterlist *sgel; LIST_HEAD(temp_list); - + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; list_splice_init(&dma_buffers->list, &temp_list); list_add(&dma_buffers->list, &temp_list); sg_offset = 0; - sgel = bsg_buffers->sg_list; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); list_for_each_entry(mp, &temp_list, list) { dma_offset = 0; - while (bytes_to_transfer && sgel && + while (bytes_to_transfer && sg_valid && (dma_offset < LPFC_BPL_SIZE)) { dma_address = mp->virt + dma_offset; if (sg_offset) { /* Continue previous partial transfer of sg */ - sg_address = sg_virt(sgel) + sg_offset; - transfer_bytes = sgel->length - sg_offset; + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; } else { - sg_address = sg_virt(sgel); - transfer_bytes = sgel->length; + sg_address = miter.addr; + transfer_bytes = miter.length; } if (bytes_to_transfer < transfer_bytes) transfer_bytes = bytes_to_transfer; @@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, sg_offset += transfer_bytes; bytes_to_transfer -= transfer_bytes; bytes_copied += transfer_bytes; - if (sg_offset >= sgel->length) { + if (sg_offset >= miter.length) { sg_offset = 0; - sgel = sg_next(sgel); + sg_valid = sg_miter_next(&miter); } } } + sg_miter_stop(&miter); + local_irq_restore(flags); list_del_init(&dma_buffers->list); list_splice(&temp_list, &dma_buffers->list); return bytes_copied; @@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) cmdiocbq->context1 = dd_data; cmdiocbq->context2 = cmp; cmdiocbq->context3 = bmp; + cmdiocbq->context_un.ndlp = ndlp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; @@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, ctiocb->context1 = dd_data; ctiocb->context2 = cmp; ctiocb->context3 = bmp; + ctiocb->context_un.ndlp = ndlp; ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; dd_data->type = TYPE_IOCB; @@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 7631893ae005..d41456e5f814 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 7bff3a19af56..ae1a07c57cae 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, jiffies + + msecs_to_jiffies(1000 * 60)); } return; } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index bbed8471bf0b..3cae0a92e8bd 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -29,6 +29,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.remoteID = did; /* DID */ icmd->ulpCommand = CMD_ELS_REQUEST64_CR; - icmd->ulpTimeout = phba->fc_ratov * 2; + if (elscmd == ELS_CMD_FLOGI) + icmd->ulpTimeout = FF_DEF_RATOV * 2; + else + icmd->ulpTimeout = phba->fc_ratov * 2; } else { icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); @@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, port state: x%x\n", + "NPORT x%x I/O tag: x%x, port state:x%x" + " fc_flag:x%x\n", elscmd, did, elsiocb->iotag, - vport->port_state); + vport->port_state, + vport->fc_flag); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " - "NPORT x%x I/O tag: x%x, size: x%x\n", + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, - cmdSize); + cmdSize, vport->port_state, + vport->fc_flag); } return elsiocb; @@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + /* The FC_VFI_REGISTERED flag will get clear in the cmpl + * handler for unreg_vfi, but if we don't force the + * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be + * built with the update bit set instead of just the vp bit to + * change the Nport ID. We need to have the vp set and the + * Upd cleared on topology changes. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); @@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi: vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || - (vport->fc_prevDID != vport->fc_myDID))) { - if (vport->fc_flag & FC_VFI_REGISTERED) - lpfc_sli4_unreg_all_rpis(vport); + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; @@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi: /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes successfully " - "Data: x%x x%x x%x x%x\n", + "0101 FLOGI completes successfully, I/O tag:x%x, " + "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, - sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag); if (vport->port_state == LPFC_FLOGI) { /* @@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; uint32_t cmd, did; int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; cmd = *lp++; sp = (struct serv_parm *) lp; @@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * will be. */ vport->fc_myDID = PT2PT_LocalID; - } + } else + vport->fc_myDID = PT2PT_RemoteID; /* * The vport state should go to LPFC_FLOGI only * AFTER we issue a FLOGI, not receive one. */ spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); /* * We temporarily set fc_myDID to make it look like we are @@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) } if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); } /** @@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", cmd, did, vport->port_state); + "Data: x%x x%x x%x x%x\n", + cmd, did, vport->port_state, vport->fc_flag, + vport->fc_myDID, vport->fc_prevDID); switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } lpfc_send_els_event(vport, ndlp, payload); @@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * another NPort and the other side has initiated * the PLOGI before responding to our FLOGI. */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->fc_topology_changed || + vport->fc_myDID != vport->fc_prevDID)) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } } - shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); @@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3334 Delay fc port discovery for %d seconds\n", + phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + HZ * phba->fc_ratov); + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); @@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; shost = lpfc_shost_from_vport(phba->pport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) - mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 326e05a65a73..0f6e2548f35d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (!list_empty(&evtp->evt_listp)) return; + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) - lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; - if (phba->pport->port_state != LPFC_FLOGI) { + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); @@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " - "FCF (x%x)\n", - phba->fcf.current_rec.fcf_indx); + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, @@ -2796,7 +2798,19 @@ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; - int rc; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { @@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } - /* If the VFI is already registered, there is nothing else to do */ + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ if (vport->fc_flag & FC_VFI_REGISTERED) - goto out_free_mem; + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); @@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* * For private loop or for NPort pt2pt, @@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(vport); + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); @@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) break; } + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; @@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); @@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport) uint32_t clear_la_pending; int did_changed; - if (!lpfc_is_link_up(phba)) + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); return; + } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; @@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport) if (num_sent) return; - /* Register the VPI for SLI3, NON-NPIV only. */ + /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, + jiffies + msecs_to_jiffies(1000 * 60)); /* decrement the node reference count held for this callback * function. @@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - int i, rc; + int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) @@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index e8c476031703..83700c18f468 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type { #define BG_OP_IN_CSUM_OUT_CSUM 0x5 #define BG_OP_IN_CRC_OUT_CSUM 0x6 #define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 struct lpfc_pde5 { uint32_t word0; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1dd2f6f0a127..713a4613ec3a 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -200,6 +200,11 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 50000 +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 2 +#define LPFC_HBA_CPU_MAP 1 +#define LPFC_DRIVER_CPU_MAP 2 /* Default */ + /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 @@ -621,7 +626,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_SHIFT 23 #define lpfc_sliport_status_rdy_MASK 0x1 #define lpfc_sliport_status_rdy_WORD word0 -#define MAX_IF_TYPE_2_RESETS 1000 +#define MAX_IF_TYPE_2_RESETS 6 #define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 90b8b0515e23..cb465b253910 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/miscdevice.h> +#include <linux/percpu.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -58,6 +59,9 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t lpfc_used_cpu[LPFC_MAX_CPU]; + static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ - mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); return 0; } @@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } @@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_lock_irq(&phba->pport->work_port_lock); - if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, - jiffies)) { + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); else mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } spin_unlock_irq(&phba->pport->work_port_lock); @@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } @@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } phba->skipped_hb = 0; @@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } else { /* @@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } } } @@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ -static void +void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); @@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba) struct lpfc_vport *vport; struct lpfc_vport **vports; int i; + bool vpis_cleared = false; if (!phba) return 0; @@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba) lpfc_unblock_mgmt_io(phba); return 1; } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba) struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_scsi_bufs maintained by this host. */ - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } - spin_unlock(&phba->scsi_buf_list_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { + list_del(&sb->list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_get_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) phba->sli4_hba.scsi_xri_cnt, phba->sli4_hba.scsi_xri_max); - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); + list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { /* max scsi xri shrinked below the allocated scsi buffers */ @@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->dma_handle); kfree(psb); } - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } /* update xris associated to remaining allocated scsi buffers */ @@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); return 0; @@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= 30 * HZ) { + if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); @@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; - if (vport->fc_map_cnt == 0 && time < 2 * HZ) + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) return -ENOMEM; /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + /* Initialize the host templates the configured values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ @@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_vector_map_info *cpup; struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; + int rc, i, hbq_count, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; - int longs, sli_family; - int sges_per_segment; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* With BlockGuard we can have multiple SGEs per Data Segemnt */ - sges_per_segment = 1; - if (phba->cfg_enable_bg) - sges_per_segment = 2; - /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. @@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.ring) return -ENOMEM; + /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of must be a power of 2. */ - buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * - sizeof(struct sli4_sge))); - - sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); - max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; - switch (sli_family) { - case LPFC_SLI_INTF_FAMILY_BE2: - case LPFC_SLI_INTF_FAMILY_BE3: - /* There is a single hint for BE - 2 pages per BPL. */ - if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_SLI_HINT1_1) - max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; - break; - case LPFC_SLI_INTF_FAMILY_LNCR_A0: - case LPFC_SLI_INTF_FAMILY_LNCR_B0: - default: - break; + + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a SGE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need + * to post 1 page for the SGL. + */ } - for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; - dma_buf_size < max_buf_size && buf_size > dma_buf_size; - dma_buf_size = dma_buf_size << 1) - ; - if (dma_buf_size == max_buf_size) - phba->cfg_sg_seg_cnt = (dma_buf_size - - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - - (2 * sizeof(struct sli4_sge))) / - sizeof(struct sli4_sge); - phba->cfg_sg_dma_buf_size = dma_buf_size; + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); @@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } + phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_msix; + } + /* Initialize io channels for round robin */ + cpup = phba->sli4_hba.cpu_map; + rc = 0; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = rc; + rc++; + if (rc >= phba->cfg_fcp_io_channel) + rc = 0; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_msix: + kfree(phba->sli4_hba.msix_entries); out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: @@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.num_online_cpu = 0; + /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) int cfg_fcp_io_channel; uint32_t cpu; uint32_t i = 0; + uint32_t j = 0; /* @@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Sanity check on HBA EQ parameters */ cfg_fcp_io_channel = phba->cfg_fcp_io_channel; - /* It doesn't make sense to have more io channels then CPUs */ - for_each_online_cpu(cpu) { - i++; + /* It doesn't make sense to have more io channels then online CPUs */ + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + i++; + j++; } + phba->sli4_hba.num_online_cpu = i; + phba->sli4_hba.num_present_cpu = j; + if (i < cfg_fcp_io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " - "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); + "online CPUs: from %d to %d\n", + cfg_fcp_io_channel, i); cfg_fcp_io_channel = i; } @@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) + if (num_resets >= MAX_IF_TYPE_2_RESETS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3317 HBA not functional: IP Reset Failed " + "after (%d) retries, try: " + "echo fw_reset > board_mode\n", num_resets); rc = -ENODEV; + } return rc; } @@ -8209,6 +8349,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) } /** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + + /* + * If we get here, we have used ALL CPUs for the specific + * phys_id. Now we need to clear out lpfc_used_cpu and start + * reusing CPUs. + */ + + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + if (lpfc_used_cpu[cpu] == phys_id) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + } + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba: pointer to lpfc hba data structure. + * @vectors: number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ + int i, idx, saved_chann, used_chann, cpu, phys_id; + int max_phys_id, num_io_channel, first_cpu; + struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif + struct cpumask *mask; + uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + + /* If there is no mapping, just return */ + if (!phba->cfg_fcp_cpu_map) + return 1; + + /* Init cpu_map array */ + memset(phba->sli4_hba.cpu_map, 0xff, + (sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu)); + + max_phys_id = 0; + phys_id = 0; + num_io_channel = 0; + first_cpu = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = 0; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + cpup++; + } + + /* Now associate the HBA vectors with specific CPUs */ + for (idx = 0; idx < vectors; idx++) { + cpup = phba->sli4_hba.cpu_map; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) { + + /* Try for all phys_id's */ + for (i = 1; i < max_phys_id; i++) { + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) + continue; + goto found; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 Cannot set affinity:" + "Error mapping vector %d (%d)\n", + idx, vectors); + return 0; + } +found: + cpup += cpu; + if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) + lpfc_used_cpu[cpu] = phys_id; + + /* Associate vector with selected CPU */ + cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + + /* Associate IO channel with selected CPU */ + cpup->channel_id = idx; + num_io_channel++; + + if (first_cpu == LPFC_VECTOR_MAP_EMPTY) + first_cpu = cpu; + + /* Now affinitize to the selected CPU */ + mask = &cpup->maskbits; + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. + vector, mask); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3330 Set Affinity: CPU %d channel %d " + "irq %d (%x)\n", + cpu, cpup->channel_id, + phba->sli4_hba.msix_entries[idx].vector, i); + + /* Spread vector mapping across multple physical CPU nodes */ + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + } + + /* + * Finally fill in the IO channel for any remaining CPUs. + * At this point, all IO channels have been assigned to a specific + * MSIx vector, mapped to a specific CPU. + * Base the remaining IO channel assigned, to IO channels already + * assigned to other CPUs on the same phys_id. + */ + for (i = 0; i <= max_phys_id; i++) { + /* + * If there are no io channels already mapped to + * this phys_id, just round robin thru the io_channels. + * Setup chann[] for round robin. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + chann[idx] = idx; + + saved_chann = 0; + used_chann = 0; + + /* + * First build a list of IO channels already assigned + * to this phys_id before reassigning the same IO + * channels to the remaining CPUs. + */ + cpup = phba->sli4_hba.cpu_map; + cpu = first_cpu; + cpup += cpu; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; + idx++) { + if (cpup->phys_id == i) { + /* + * Save any IO channels that are + * already mapped to this phys_id. + */ + if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + goto out; + } + + /* See if we are using round-robin */ + if (saved_chann == 0) + saved_chann = + phba->cfg_fcp_io_channel; + + /* Associate next IO channel with CPU */ + cpup->channel_id = chann[used_chann]; + num_io_channel++; + used_chann++; + if (used_chann == saved_chann) + used_chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3331 Set IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } +out: + cpu++; + if (cpu >= phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpu = 0; + } else { + cpup++; + } + } + } + + if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { + cpup->channel_id = 0; + num_io_channel++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3332 Assign IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } + cpup++; + } + } + + /* Sanity check */ + if (num_io_channel != phba->sli4_hba.num_present_cpu) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set affinity mismatch:" + "%d chann != %d cpus: %d vactors\n", + num_io_channel, phba->sli4_hba.num_present_cpu, + vectors); + + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; + return 1; +} + + +/** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -8259,9 +8662,7 @@ enable_msix_vectors: phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ + /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); sprintf((char *)&phba->sli4_hba.handler_name[index], @@ -8289,6 +8690,8 @@ enable_msix_vectors: phba->cfg_fcp_io_channel, vectors); phba->cfg_fcp_io_channel = vectors; } + + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: @@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { + int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -10561,6 +10965,11 @@ lpfc_init(void) return -ENOMEM; } } + + /* Initialize in case vector mapping is needed */ + for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index baf53e6c2bd1..2a4e5d21eab2 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -37,6 +37,7 @@ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index a7a9fa468308..41363db7d426 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) /* Only FC supports upd bit */ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && - (vport->fc_flag & FC_VFI_REGISTERED)) { + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) { bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " - " vfi:%d, vpi:%d, fc_pname:%x%x\n", + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], - reg_vfi->wwn[0], reg_vfi->wwn[1]); + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed); } /** diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index cd86069a0ba8..812d0cd7c86d 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Calculate alignment */ + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, + i, 0); - else + } else { phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); + } + if (!phba->lpfc_scsi_dma_buf_pool) goto fail; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 82f4d3542289..31e9b92f5a9b 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; @@ -574,7 +576,7 @@ out: lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 74b8710e1e90..8523b278ec9d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -24,6 +24,8 @@ #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -48,7 +50,7 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done; +int _dump_buf_done = 1; static char *dif_op_str[] = { "PROT_NORMAL", @@ -66,6 +68,10 @@ struct scsi_dif_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; +#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK) +#define scsi_prot_flagged(sc, flg) sc +#endif + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void @@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); @@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, struct list_head *post_sblist, int sb_count) { struct lpfc_scsi_buf *psb, *psb_next; - int status; + int status, sgl_size; int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; dma_addr_t pdma_phys_bpl1; int last_xritag = NO_XRI; @@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, if (sb_count <= 0) return -EINVAL; + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + list_for_each_entry_safe(psb, psb_next, post_sblist, list) { list_del_init(&psb->list); block_cnt++; @@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, post_cnt = block_cnt; } else if (block_cnt == 1) { /* last single sgl with non-contiguous xri */ - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + if (sgl_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; else @@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) int num_posted, rc = 0; /* get all SCSI buffers need to repost to a local list */ - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); + list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); /* post the list of scsi buffer sgls to port if available */ if (!list_empty(&post_sblist)) { @@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + dma_addr_t pdma_phys_bpl; uint16_t iotag, lxri = 0; - int bcnt, num_posted; + int bcnt, num_posted, sgl_size; LIST_HEAD(prep_sblist); LIST_HEAD(post_sblist); LIST_HEAD(scsi_sblist); + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp)); + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) @@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) } memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + /* Page alignment is CRITICAL, double check to be sure */ + if (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { @@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_cmnd = (psb->data + sgl_size); psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + sizeof(struct fcp_cmnd)); /* Initialize local short-hand pointers. */ sgl = (struct sli4_sge *)psb->fcp_bpl; pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); /* @@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpLe = 1; iocb->ulpClass = CLASS3; psb->cur_iocbq.context1 = psb; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; psb->dma_phys_bpl = pdma_phys_bpl; /* add the scsi buffer to a post list */ list_add_tail(&psb->list, &post_sblist); - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt++; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_BG, "3021 Allocate %d out of %d requested new SCSI " @@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; - unsigned long iflag = 0; - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - if (lpfc_cmd) { - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; + unsigned long gflag = 0; + unsigned long pflag = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list); + if (!lpfc_cmd) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_scsi_buf, list); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); return lpfc_cmd; } /** @@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf *lpfc_cmd ; - unsigned long iflag = 0; + unsigned long gflag = 0; + unsigned long pflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, - list) { + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_cmd->list); found = 1; - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; break; } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); + if (!found) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, + list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_cmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); if (!found) return NULL; - else - return lpfc_cmd; + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** @@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + if (psb->exch_busy) { spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); @@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } } @@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ if (datadir == DMA_FROM_DEVICE) { - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); @@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); @@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9065 BLKGRD:%s Invalid data segment\n", @@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; int i = 0, num_sge = 0, status; - int datadir = sc->sc_data_direction; uint32_t reftag; unsigned blksize; uint8_t txop, rxop; @@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - if (datadir == DMA_FROM_DEVICE) { - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); - } + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + return num_sge + 3; + /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); @@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_sge >= phba->cfg_total_seg_cnt) + return num_sge + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9086 BLKGRD:%s Invalid data segment\n", @@ -2670,6 +2801,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) } /** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length. + */ + if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI)) + fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + + return fcpdl; +} + +/** * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. @@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; - int diflen, fcpdl; - unsigned blksize; + int fcpdl; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9067 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + goto err; + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9068 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) + goto err; num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); @@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcpdl = scsi_bufflen(scsi_cmnd); - - if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { - /* - * We are in DIF Type 1 mode - * Every data block has a 8 byte DIF (trailer) - * attached to it. Must ajust FCP data length - */ - blksize = lpfc_cmd_blksize(scsi_cmnd); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - } + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -2812,14 +2969,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9023 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } /* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag, guard_type; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = lpfc_cmd_blksize(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + guard_type = scsi_host_get_guard(cmd->device->host); + + start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ + start_app_tag = src->app_tag; + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { + start_ref_tag++; + goto skipit; + } + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (guard_type == SHOST_DIX_GUARD_IP) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + app_tag, start_app_tag); + } +} + + +/* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the * sense buffer will be set accordingly, paired with @@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" - " 0x%x lba 0x%llx blk cnt 0x%x " - "bgstat=0x%x bghm=0x%x\n", - cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); - spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" @@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" - " BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " - "Invalid BlockGuard DIF Block. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } @@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9055 BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9056 BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9061 BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ - cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: Unknown error reported!\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); + + /* Calcuate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); } - out: return ret; } @@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -3094,45 +3489,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /** - * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be adjusted. - * - * Adjust the data length to account for how much data - * is actually on the wire. - * - * returns the adjusted data length - **/ -static int -lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *sc = lpfc_cmd->pCmd; - int diflen, fcpdl; - unsigned blksize; - - fcpdl = scsi_bufflen(sc); - - /* Check if there is protection data on the wire */ - if (sc->sc_data_direction == DMA_FROM_DEVICE) { - /* Read */ - if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) - return fcpdl; - - } else { - /* Write */ - if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) - return fcpdl; - } - - /* If protection data on the wire, adjust the count accordingly */ - blksize = lpfc_cmd_blksize(sc); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - return fcpdl; -} - -/** * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. @@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; - uint32_t num_bde = 0; + uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd - * fcp_rsp regions to the first data bde entry + * fcp_rsp regions to the first data sge entry */ if (scsi_sg_count(scsi_cmnd)) { /* @@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, sgl += 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9087 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: - num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + /* Here we need to add a DISEED to the count */ + if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + goto err; + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, datasegcnt); + /* we should have 2 or more entries in buffer list */ - if (num_bde < 2) + if (num_sge < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9088 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) + goto err; - num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, datasegcnt, protsegcnt); + /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_sge < 3) || + (num_sge > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9083 Unexpected protection group %i\n", prot_group_type); @@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9084 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", - prot_group_type, num_bde); + "9084 Cannot setup S/G List for HBA" + "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } @@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " "sector x%llx cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], @@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x sector x%llx cnt %u pt %x\n", cmnd->cmnd[0], @@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), - (2*vport->cfg_devloss_tmo*HZ)); + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); lpfc_cmd->waitq = NULL; if (lpfc_cmd->pCmd == cmnd) { @@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) ret = FAILED; - lpfc_online(phba); + rc = lpfc_online(phba); + if (rc) + ret = FAILED; lpfc_unblock_mgmt_io(phba); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3172 SCSI layer issued Host Reset Data: x%x\n", ret); + if (ret == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset, bring it offline\n"); + lpfc_sli4_offline_eratt(phba); + } return ret; } @@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 35dd17eb0f27..572579f87de4 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov + 1); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov * 2); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); list_splice_init(&phba->active_rrq_list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, else rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); rrq->ndlp = ndlp; rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; @@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) ndlp = piocbq->context_un.ndlp; - else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && - (piocbq->iocb_flag & LPFC_IO_LIBDFC)) + else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) ndlp = piocbq->context_un.ndlp; else ndlp = piocbq->context1; @@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, BUG(); else mod_timer(&piocb->vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } @@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], - pmbox->un.varWords[7]); + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); @@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr) lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, jiffies + - HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); return; } @@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } + phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); @@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); @@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); @@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); spin_unlock_irq(&phba->hbalock); + total_cnt = phba->sli4_hba.els_xri_cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt--; - spin_unlock_irq(&phba->hbalock); + total_cnt--; } } } @@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) (sglq_entry_first->sli4_xritag + post_cnt - 1)); list_splice_init(&blck_sgl_list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt -= post_cnt; - spin_unlock_irq(&phba->hbalock); + total_cnt -= post_cnt; } /* don't reset xirtag due to hole in xri block */ @@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* reset els sgl post count for next round of posting */ post_cnt = 0; } + /* update the number of XRIs posted for ELS */ + phba->sli4_hba.els_xri_cnt = total_cnt; /* free the els sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); @@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov * 2)); + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd <cmd> issue */ @@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, static inline uint32_t lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - int i; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) - i = smp_processor_id(); - else - i = atomic_add_return(1, &phba->fcp_qidx); + struct lpfc_vector_map_info *cpup; + int chann, cpu; - i = (i % phba->cfg_fcp_io_channel); - return i; + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + chann = cpu; + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; } /** @@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], &wqe)) return IOCB_ERROR; } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) return IOCB_ERROR; } @@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = timeout * HZ; + timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, - timeout * HZ); + msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->context1 = NULL; @@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, } wq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3264 WQ[%d]: barset:x%x, offset:x%x\n", - wq->queue_id, pci_barset, db_offset); + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, pci_barset, + db_offset, wq->db_format); } else { wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; @@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } hrq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", - hrq->queue_id, pci_barset, db_offset); + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); } else { hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s " - "Frame Data:%08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type], + "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5])); + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index be02b59ea279..67af460184ba 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -346,11 +346,6 @@ struct lpfc_bmbx { #define SLI4_CT_VFI 2 #define SLI4_CT_FCFI 3 -#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 -#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 -#define LPFC_SLI4_MIN_BUF_SIZE 0x400 -#define LPFC_SLI4_MAX_BUF_SIZE 0x20000 - /* * SLI4 specific data structures */ @@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_NAME_SZ 16 +/* Used for IRQ vector to CPU mapping */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t irq; + uint16_t channel_id; + struct cpumask maskbits; +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff +#define LPFC_MAX_CPU 256 + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -573,6 +579,11 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_online_cpu; + uint16_t num_present_cpu; }; enum lpfc_sge_type { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 664cd04f7cd8..a38dc3b16969 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.38" +#define LPFC_DRIVER_VERSION "8.3.39" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 0fe188e66000..e28e431564b0 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport, } } -static int +int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; @@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; + bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, @@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_debugfs_terminate(vport); + /* + * The call to fc_remove_host might release the NameServer ndlp. Since + * we might need to use the ndlp to send the DA_ID CT command, + * increment the reference for the NameServer ndlp to prevent it from + * being released. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_get(ndlp); + ns_ndlp_referenced = true; + } + /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); @@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_discovery_wait(vport); skip_logo: + + /* + * If the NameServer ndlp has been incremented to allow the DA_ID CT + * command to be sent, decrement the ndlp now. + */ + if (ns_ndlp_referenced) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + lpfc_nlp_put(ndlp); + } + lpfc_cleanup(vport); lpfc_sli_host_down(vport); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 90828340acea..6b2c94eb8134 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7c90d57b867e..3a9ddae86f1f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; - goto out_kfree_ioc; + goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + out_up: up(&instance->ioctl_sem); out_kfree_ioc: diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 74550922ad55..7b7381d7671f 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; init_timer(&mvi->devices[i].timer); diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 532110f4562a..c9e244984e30 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, return 0; } -#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct mvs_tmf_task *tmf, int *pass) { @@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf * libsas will use dev->port, should * not call task_done for sata */ - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) task->task_done(task); return rc; } @@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; - if (phy->identify.device_type == SAS_END_DEV) + if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) + else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) @@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { - if (mvi->devices[dev].dev_type == NO_DEVICE) { + if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } @@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev) u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; - mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_type = SAS_PHY_UNUSED; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } @@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); @@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { - if (SATA_DEV == dev->dev_type) { + if (SAS_SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 9f3cc13a5ce7..60e2fb7f2dca 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch; extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ - ((type == EDGE_DEV) || (type == FANOUT_DEV)) + ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define bit(n) ((u64)1 << n) @@ -241,7 +241,7 @@ struct mvs_phy { struct mvs_device { struct list_head dev_entry; - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct mvs_info *mvi_info; struct domain_device *sas_device; struct timer_list timer; diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile index 52f04296171c..ce4cd87c7c66 100644 --- a/drivers/scsi/pm8001/Makefile +++ b/drivers/scsi/pm8001/Makefile @@ -4,9 +4,10 @@ # Copyright (C) 2008-2009 USI Co., Ltd. -obj-$(CONFIG_SCSI_PM8001) += pm8001.o -pm8001-y += pm8001_init.o \ +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o +pm80xx-y += pm8001_init.o \ pm8001_sas.o \ pm8001_ctl.o \ - pm8001_hwi.o + pm8001_hwi.o \ + pm80xx_hwi.o diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 45bc197bc22f..d99f41c2ca13 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.interface_rev); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } } static DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); @@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } } static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); /** @@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.max_out_io); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } } static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); /** @@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16) + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16) + ); + } } static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); /** @@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF + ); + } } static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); @@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; return show_sas_spec_support_status(mode, buf); } static DEVICE_ATTR(sas_spec_support, S_IRUGO, @@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) goto out; } payload = (struct pm8001_ioctl_payload *)ioctlbuffer; - memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, pm8001_ha->fw_image->size); payload->length = pm8001_ha->fw_image->size; payload->id = 0; + payload->minor_function = 0x1; pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); wait_for_completion(&completion); @@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) payload->length = 1024*16; payload->id = 0; fwControl = - (struct fw_control_info *)payload->func_specific; + (struct fw_control_info *)&payload->func_specific; fwControl->len = IOCTL_BUF_SIZE; /* IN */ fwControl->size = partitionSize + HEADER_LEN;/* IN */ fwControl->retcode = 0;/* OUT */ diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index c3d20c8d4abe..479c5a7a863a 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -43,9 +43,12 @@ enum chip_flavors { chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019 }; -#define USI_MAX_MEMCNT 9 -#define PM8001_MAX_DMA_SG SG_ALL + enum phy_speed { PHY_SPEED_15 = 0x01, PHY_SPEED_30 = 0x02, @@ -69,23 +72,34 @@ enum port_type { #define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ #define PM8001_MAX_INB_NUM 1 #define PM8001_MAX_OUTB_NUM 1 +#define PM8001_MAX_SPCV_INB_NUM 1 +#define PM8001_MAX_SPCV_OUTB_NUM 4 #define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + /* unchangeable hardware details */ -#define PM8001_MAX_PHYS 8 /* max. possible phys */ -#define PM8001_MAX_PORTS 8 /* max. possible ports */ -#define PM8001_MAX_DEVICES 1024 /* max supported device */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define USI_MAX_MEMCNT_BASE 5 +#define IB (USI_MAX_MEMCNT_BASE + 1) +#define CI (IB + PM8001_MAX_SPCV_INB_NUM) +#define OB (CI + PM8001_MAX_SPCV_INB_NUM) +#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM) +#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM) +#define PM8001_MAX_DMA_SG SG_ALL enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ IOP, /* IO processor */ - CI, /* consumer index */ - PI, /* producer index */ - IB, /* inbound queue */ - OB, /* outbound queue */ NVMD, /* NVM device */ DEV_MEM, /* memory for devices */ CCB_MEM, /* memory for command control block */ + FW_FLASH /* memory for fw flash update */ }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index b8dd05074abb..69dd49c05f1e 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -50,32 +50,39 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; - pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); - pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); - pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); - pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); - pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); - pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); - pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); - pm8001_ha->main_cfg_tbl.inbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = pm8001_mr32(address, MAIN_IBQ_OFFSET); - pm8001_ha->main_cfg_tbl.outbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = pm8001_mr32(address, MAIN_OBQ_OFFSET); - pm8001_ha->main_cfg_tbl.hda_mode_flag = + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); /* read analog Setting offset from the configuration table */ - pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); /* read Error Dump Offset and Length */ - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); } @@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->general_stat_tbl_addr; - pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); - pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); - pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); - pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); - pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); - pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); - pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); - pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); - pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); - pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); - pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); - pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); - pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); - pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); - pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); - pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); - pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); - pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); - pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); - pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); - pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); - pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); - pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); - pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); - pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); } /** @@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) */ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int inbQ_num = 1; int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; - for (i = 0; i < inbQ_num; i++) { + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int outbQ_num = 1; int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; - for (i = 0; i < outbQ_num; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) { - int qn = 1; int i; u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; - - pm8001_ha->main_cfg_tbl.upper_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; - pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_lo; - pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < qn; i++) { + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_hi; + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_lo; + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB].total_len; + pm8001_ha->memoryMap.region[IB + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_hi; + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_lo; + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI].virt_ptr; + pm8001_ha->memoryMap.region[CI + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < qn; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_hi; + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_lo; + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB].total_len; + pm8001_ha->memoryMap.region[OB + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_hi; + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_lo; + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = - 0 | (10 << 16) | (0 << 24); + 0 | (10 << 16) | (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI].virt_ptr; + pm8001_ha->memoryMap.region[PI + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_mw32(address, 0x24, - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); pm8001_mw32(address, 0x28, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); pm8001_mw32(address, 0x2C, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); pm8001_mw32(address, 0x30, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); pm8001_mw32(address, 0x34, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); pm8001_mw32(address, 0x38, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); pm8001_mw32(address, 0x3C, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); pm8001_mw32(address, 0x40, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); pm8001_mw32(address, 0x44, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); pm8001_mw32(address, 0x48, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); pm8001_mw32(address, 0x4C, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); pm8001_mw32(address, 0x50, - pm8001_ha->main_cfg_tbl.upper_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); pm8001_mw32(address, 0x54, - pm8001_ha->main_cfg_tbl.lower_event_log_addr); - pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); - pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); pm8001_mw32(address, 0x60, - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); pm8001_mw32(address, 0x64, - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); - pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); pm8001_mw32(address, 0x6C, - pm8001_ha->main_cfg_tbl.iop_event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); pm8001_mw32(address, 0x70, - pm8001_ha->main_cfg_tbl.fatal_err_interrupt); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); } /** @@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) */ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) { + u8 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } /* check the firmware status */ if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, @@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - update_inbnd_queue_table(pm8001_ha, 0); - update_outbnd_queue_table(pm8001_ha, 0); - mpi_set_phys_g3_with_ssc(pm8001_ha, 0); - /* 7->130ms, 34->500ms, 119->1.5s */ - mpi_set_open_retry_interval_reg(pm8001_ha, 119); + for (i = 0; i < PM8001_MAX_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, @@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) u32 max_wait_count; u32 value; u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } init_pci_device_addresses(pm8001_ha); /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the table is stop */ @@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all * the FW register status to the originated status. * @pm8001_ha: our hba card information - * @signature: signature in host scratch pad0 register. */ static int -pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) { u32 regVal, toggleVal; u32 max_wait_count; u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ unsigned long flags; /* step1: Check FW is ready for soft reset */ @@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_iounmap - which maped when initialized. * @pm8001_ha: our hba card information */ -static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; for (bar = 0; bar < 6; bar++) { @@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); @@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); @@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) } /** - * mpi_msg_free_get- get the free message buffer for transfer inbound queue. + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. * @circularQ: the inbound queue we want to transfer to HBA. * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ -static int mpi_msg_free_get(struct inbound_queue_table *circularQ, +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; @@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u8 bcCount = 1; /* only support single buffer */ /* Checks is the requested message size can be allocated in this queue*/ - if (messageSize > 64) { + if (messageSize > IOMB_SIZE_SPCV) { *messagePtr = NULL; return -1; } @@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, return -1; } /* get memory IOMB buffer address */ - offset = circularQ->producer_idx * 64; + offset = circularQ->producer_idx * messageSize; /* increment to next bcCount element */ circularQ->producer_idx = (circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE; @@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, } /** - * mpi_build_cmd- build the message queue for transfer, update the PI to FW - * to tell the fw to get this message from IOMB. + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. * @pm8001_ha: our hba card information * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ -static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload) + u32 opCode, void *payload, u32 responseQueue) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; - u32 responseQueue = 0; void *pMessage; - if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { + if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); /*Copy to the payload*/ - memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); + memcpy(pMessage, payload, (pm8001_ha->iomb_size - + sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) @@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, - circularQ->consumer_index)); + pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index)); return 0; } -static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; @@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); if (pOutBoundMsgHeader != msgHeader) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d msgHeader = %p\n", @@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, } /** - * mpi_msg_consume- get the MPI message from outbound queue message table. + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. * @pm8001_ha: our hba card information * @circularQ: the outbound queue table. * @messagePtr1: the message contents of this outbound message. * @pBC: the message size. */ -static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, struct outbound_queue_table *circularQ, void **messagePtr1, u8 *pBC) { @@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); @@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_fn(struct work_struct *work) +void pm8001_work_fn(struct work_struct *work) { struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; @@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work) pm8001_dev = pw->data; /* Most stash device structure */ if ((pm8001_dev == NULL) || ((pw->handler != IO_XFER_ERROR_BREAK) - && (pm8001_dev->dev_type == NO_DEVICE))) { + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { kfree(pw); return; } @@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work) } break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: dev = pm8001_dev->sas_device; - pm8001_I_T_nexus_reset(dev); + pm8001_I_T_nexus_event_handler(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: dev = pm8001_dev->sas_device; @@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work) kfree(pw); } -static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { struct pm8001_work *pw; @@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } +static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; } PM8001_IO_DBG(pm8001_ha, - pm8001_printk("scsi_status = %x \n ", + pm8001_printk("scsi_status = %x\n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) status = le32_to_cpu(psataPayload->status); tag = le32_to_cpu(psataPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psataPayload->param); - t = ccb->task; + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + ts = &t->task_status; - pm8001_dev = ccb->device; - if (status) + if (!ts) { PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("sata IO status 0x%x\n", status)); - if (unlikely(!t || !t->lldd_task || !t->dev)) + pm8001_printk("ts null\n")); return; + } switch (status) { case IO_SUCCESS: @@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm8001_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2424,6 +2656,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm8001_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; if (event) @@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("port_id = %x,device_id = %x\n", - port_id, dev_id)); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); @@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static void -mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { struct set_dev_state_resp *pPayload = (struct set_dev_state_resp *)(piomb + 4); @@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); @@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = @@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct local_phy_ctl_resp *pPayload = (struct local_phy_ctl_resp *)(piomb + 4); @@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) * while receive a broadcast(change) primitive just tell the sas * layer to discover the changed domain rather than the whole domain. */ -static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) { struct pm8001_phy *phy = &pm8001_ha->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) } /* Get the link rate speed */ -static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) { struct sas_phy *sas_phy = phy->sas_phy.phy; @@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame * buffer. */ -static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr) { if (phy->sas_phy.frame_rcvd[0] == 0x34 @@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, PHY_NOTIFY_ENABLE_SPINUP); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_EDGE_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_FANOUT_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("fanout expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; default: PM8001_MSG_DBG(pm8001_ha, @@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->sas_phy.oob_mode = SATA_OOB_MODE; @@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) sizeof(struct dev_to_host_fis)); phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; - phy->identify.device_type = SATA_DEV; + phy->identify.device_type = SAS_SATA_DEV; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); pm8001_bytes_dmaed(pm8001_ha, phy_id); @@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_reg_resp -process register device ID response. + * pm8001_mpi_reg_resp -process register device ID response. * @pm8001_ha: our hba card information * @piomb: IO message buffer * @@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * has assigned, from now,inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ -static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +/** + * fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { u32 status; struct fw_control_ex fw_control_context; @@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) break; } ccb->fw_control_context->fw_control->retcode = status; - pci_free_consistent(pm8001_ha->pdev, - fw_control_context.len, - fw_control_context.virtAddr, - fw_control_context.phys_addr); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; @@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { u32 status; int i; @@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) return 0; } -static int -mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; @@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 status ; u32 tag, scp; struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; struct task_abort_resp *pPayload = (struct task_abort_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TAG NULL. RETURNING !!!")); + return -1; + } + scp = le32_to_cpu(pPayload->scp); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk(" status = 0x%x\n", status)); - if (t == NULL) + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TASK NULL. RETURNING !!!")); return -1; + } ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, @@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb(); - t->task_done(t); + + if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) { + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + /* clear the flag */ + pm8001_dev->id &= 0xBFFFFFFF; + } else + t->task_done(t); + return 0; } @@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_LOCAL_PHY_CNTRL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); - mpi_local_phy_ctl(pm8001_ha, piomb); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_REGIST\n")); - mpi_reg_resp(pm8001_ha, piomb); + pm8001_mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister the device\n")); - mpi_dereg_resp(pm8001_ha, piomb); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, @@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_FW_FLASH_UPDATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); - mpi_fw_flash_update_resp(pm8001_ha, piomb); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); break; case OPC_OUB_GPIO_RESPONSE: PM8001_MSG_DBG(pm8001_ha, @@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_GENERAL_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); - mpi_general_event(pm8001_ha, piomb); + pm8001_mpi_general_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SATA_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SAS_DIAG_MODE_START_END: PM8001_MSG_DBG(pm8001_ha, @@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SMP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); - mpi_get_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_SET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); - mpi_set_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_DEVICE_HANDLE_REMOVAL: PM8001_MSG_DBG(pm8001_ha, @@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); - mpi_set_dev_state_resp(pm8001_ha, piomb); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, @@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static int process_oq(struct pm8001_hba_info *pm8001_ha) +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; @@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) unsigned long flags; spin_lock_irqsave(&pm8001_ha->lock, flags); - circularQ = &pm8001_ha->outbnd_q_tbl[0]; + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { - ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ - mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { /* Update the producer index from SPC */ @@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = { [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ }; -static void +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) { int i; @@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, smp_cmd.long_smp_req.long_resp_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); - mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); return 0; err_out_2: @@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); return ret; } @@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; + unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } - if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); ncg_tag = hdr_tag; + } dir = data_dir_flags[task->data_dir] << 8; sata_cmd.tag = cpu_to_le32(tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); @@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); return ret; } @@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); - payload.sas_identify.dev_type = SAS_END_DEV; + payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } @@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ -static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; @@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 flag) @@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, if (flag == 1) stp_sspsmp_sata = 0x02; /*direct attached sata */ else { - if (pm8001_dev->dev_type == SATA_DEV) + if (pm8001_dev->dev_type == SAS_SATA_DEV) stp_sspsmp_sata = 0x00; /* stp*/ - else if (pm8001_dev->dev_type == SAS_END_DEV || - pm8001_dev->dev_type == EDGE_DEV || - pm8001_dev->dev_type == FANOUT_DEV) + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) stp_sspsmp_sata = 0x01; /*ssp or smp*/ } if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) @@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ -static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id) { struct dereg_dev_req payload; @@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) * @stat: stat. */ static irqreturn_t -pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { - pm8001_chip_interrupt_disable(pm8001_ha); - process_oq(pm8001_ha); - pm8001_chip_interrupt_enable(pm8001_ha); + pm8001_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; } @@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); return ret; } @@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, * @task: the task we wanted to aborted. * @flag: the abort flag. */ -static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; - PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" - " = %x", cmd_tag, task_tag)); - if (pm8001_dev->dev_type == SAS_END_DEV) + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("cmd_tag = %x, abort task tag = 0x%x", + cmd_tag, task_tag)); + if (pm8001_dev->dev_type == SAS_END_DEVICE) opc = OPC_INB_SSP_ABORT; - else if (pm8001_dev->dev_type == SATA_DEV) + else if (pm8001_dev->dev_type == SAS_SATA_DEV) opc = OPC_INB_SATA_ABORT; else opc = OPC_INB_SMP_ABORT;/* SMP */ @@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, * @ccb: the ccb information. * @tmf: task management function. */ -static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { struct sas_task *task = ccb->task; @@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); return ret; } -static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_GET_NVMD_DATA; @@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); @@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } -static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_SET_NVMD_DATA; @@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, - ioctl_payload->func_specific, + &ioctl_payload->func_specific, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); @@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } @@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information. * @fw_flash_updata_info: firmware flash update param */ -static int +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, void *fw_flash_updata_info, u32 tag) { @@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } -static int +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, void *payload) { @@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, int rc; u32 tag; struct pm8001_ccb_info *ccb; - void *buffer = NULL; - dma_addr_t phys_addr; - u32 phys_addr_hi; - u32 phys_addr_lo; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; - if (fw_control->len != 0) { - if (pm8001_mem_alloc(pm8001_ha->pdev, - (void **)&buffer, - &phys_addr, - &phys_addr_hi, - &phys_addr_lo, - fw_control->len, 0) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("Mem alloc failure\n")); - kfree(fw_control_context); - return -ENOMEM; - } - } + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); @@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, flash_update_info.total_image_len = fw_control->size; fw_control_context->fw_control = fw_control; fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { @@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return rc; } -static int +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) { @@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = { .set_dev_state_req = pm8001_chip_set_dev_state_req, .sas_re_init_req = pm8001_chip_sas_re_initialization, }; - diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index d437309cb1e1..d7c1e2034226 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,8 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ __le32 header; /* Bits [11:0] - Message operation code */ /* Bits [15:12] - Message Category */ @@ -298,7 +300,7 @@ struct local_phy_ctl_resp { #define OP_BITS 0x0000FF00 -#define ID_BITS 0x0000000F +#define ID_BITS 0x000000FF /* * brief the data structure of PORT Control Command diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3d5e522e00fc..e4b9bc7f5410 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -44,8 +44,16 @@ static struct scsi_transport_template *pm8001_stt; +/** + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ static const struct pm8001_chip_info pm8001_chips[] = { - [chip_8001] = { 8, &pm8001_8001_dispatch,}, + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, }; static int pm8001_id; @@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } #ifdef PM8001_USE_TASKLET + +/** + * tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; + u32 vec; pm8001_ha = (struct pm8001_hba_info *)opaque; if (unlikely(!pm8001_ha)) BUG_ON(1); - PM8001_CHIP_DISP->isr(pm8001_ha); + vec = pm8001_ha->int_vector; + PM8001_CHIP_DISP->isr(pm8001_ha, vec); +} +#endif + +static struct pm8001_hba_info *outq_to_hba(u8 *outq) +{ + return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); } + +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); + u8 outq = *(u8 *)opaque; + irqreturn_t ret = IRQ_HANDLED; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) + return IRQ_NONE; + pm8001_ha->int_vector = outq; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); #endif + return ret; +} +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + */ - /** - * pm8001_interrupt - when HBA originate a interrupt,we should invoke this - * dispatcher to handle each case. - * @irq: irq number. - * @opaque: the passed general host adapter struct - */ -static irqreturn_t pm8001_interrupt(int irq, void *opaque) +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) { struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; - struct sas_ha_struct *sha = opaque; + struct sas_ha_struct *sha = dev_id; pm8001_ha = sha->lldd_ha; if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; + + pm8001_ha->int_vector = 0; #ifdef PM8001_USE_TASKLET tasklet_schedule(&pm8001_ha->tasklet); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif return ret; } @@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque) * @pm8001_ha:our hba structure. * */ -static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) { int i; spin_lock_init(&pm8001_ha->lock); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy)); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; @@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; - /* MPI Memory region 3 for consumer Index of inbound queues */ - pm8001_ha->memoryMap.region[CI].num_elements = 1; - pm8001_ha->memoryMap.region[CI].element_size = 4; - pm8001_ha->memoryMap.region[CI].total_len = 4; - pm8001_ha->memoryMap.region[CI].alignment = 4; - - /* MPI Memory region 4 for producer Index of outbound queues */ - pm8001_ha->memoryMap.region[PI].num_elements = 1; - pm8001_ha->memoryMap.region[PI].element_size = 4; - pm8001_ha->memoryMap.region[PI].total_len = 4; - pm8001_ha->memoryMap.region[PI].alignment = 4; - - /* MPI Memory region 5 inbound queues */ - pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB].element_size = 64; - pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[IB].alignment = 64; - - /* MPI Memory region 6 outbound queues */ - pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB].element_size = 64; - pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[OB].alignment = 64; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[CI+i].num_elements = 1; + pm8001_ha->memoryMap.region[CI+i].element_size = 4; + pm8001_ha->memoryMap.region[CI+i].total_len = 4; + pm8001_ha->memoryMap.region[CI+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 128; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[IB+i].alignment = 128; + } else { + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 64; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[IB+i].alignment = 64; + } + } + + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[PI+i].num_elements = 1; + pm8001_ha->memoryMap.region[PI+i].element_size = 4; + pm8001_ha->memoryMap.region[PI+i].total_len = 4; + pm8001_ha->memoryMap.region[PI+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 128; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[OB+i].alignment = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 64; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[OB+i].alignment = 64; + } + } /* Memory region write DMA*/ pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; @@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, @@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_DEVICES; i++) { - pm8001_ha->devices[i].dev_type = NO_DEVICE; + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; @@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) ioremap(pm8001_ha->io_mem[logicalBar].membase, pm8001_ha->io_mem[logicalBar].memsize); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("PCI: bar %d, logicalBar %d " - "virt_addr=%lx,len=%d\n", bar, logicalBar, - (unsigned long) - pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_printk("PCI: bar %d, logicalBar %d ", + bar, logicalBar)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { pm8001_ha->io_mem[logicalBar].membase = 0; @@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) * @shost: scsi host struct which has been initialized before. */ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, - u32 chip_id, - struct Scsi_Host *shost) + const struct pci_device_id *ent, + struct Scsi_Host *shost) + { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); @@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->pdev = pdev; pm8001_ha->dev = &pdev->dev; - pm8001_ha->chip_id = chip_id; + pm8001_ha->chip_id = ent->driver_data; pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; pm8001_ha->irq = pdev->irq; pm8001_ha->sas = sha; @@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->id = pm8001_id++; pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + #ifdef PM8001_USE_TASKLET + /** + * default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler + **/ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + (unsigned long)pm8001_ha); #endif pm8001_ioremap(pm8001_ha); - if (!pm8001_alloc(pm8001_ha)) + if (!pm8001_alloc(pm8001_ha, ent)) return pm8001_ha; pm8001_free(pm8001_ha); return NULL; @@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { - u8 i; + u8 i, j; #ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); pm8001_ha->nvmd_completion = &completion; - payload.minor_function = 0; - payload.length = 128; - payload.func_specific = kzalloc(128, GFP_KERNEL); + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) { + payload.minor_function = 4; + payload.length = 4096; + } else { + payload.minor_function = 0; + payload.length = 128; + } + } else { + payload.minor_function = 1; + payload.length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.length, GFP_KERNEL); PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { - memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, - SAS_ADDR_SIZE); + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("phy %d sas_addr = %016llx \n", i, + pm8001_printk("phy %d sas_addr = %016llx\n", i, pm8001_ha->phy[i].dev_sas_addr)); } #else @@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * @chip_info: our ha struct. * @irq_handler: irq_handler */ -static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, - irq_handler_t irq_handler) +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) { u32 i = 0, j = 0; - u32 number_of_intr = 1; + u32 number_of_intr; int flag = 0; u32 max_entry; int rc; + static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + number_of_intr = 1; + flag |= IRQF_DISABLED; + } else { + number_of_intr = PM8001_MAX_MSIX_VEC; + flag &= ~IRQF_SHARED; + flag |= IRQF_DISABLED; + } + max_entry = sizeof(pm8001_ha->msix_entries) / sizeof(pm8001_ha->msix_entries[0]); - flag |= IRQF_DISABLED; for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; if (!rc) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "pci_enable_msix request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr)); + + for (i = 0; i < number_of_intr; i++) + pm8001_ha->outq[i] = i; + for (i = 0; i < number_of_intr; i++) { + snprintf(intr_drvname[i], sizeof(intr_drvname[0]), + DRV_NAME"%d", i); if (request_irq(pm8001_ha->msix_entries[i].vector, - irq_handler, flag, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost))) { + pm8001_interrupt_handler_msix, flag, + intr_drvname[i], &pm8001_ha->outq[i])) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + &pm8001_ha->outq[j]); pci_disable_msix(pm8001_ha->pdev); break; } @@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; - irq_handler_t irq_handler = pm8001_interrupt; int rc; pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha, irq_handler); - else + return pm8001_setup_msix(pm8001_ha); + else { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MSIX not supported!!!\n")); goto intx; + } #endif intx: /* initialize the INT-X interrupt */ - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } @@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev, { unsigned int rc; u32 pci_reg; + u8 i = 0; struct pm8001_hba_info *pm8001_ha; struct Scsi_Host *shost = NULL; const struct pm8001_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, - "pm8001: driver version %s\n", DRV_VERSION); + "pm80xx: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; @@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev, goto err_out_free; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); if (!pm8001_ha) { rc = -ENOMEM; goto err_out_free; } list_add_tail(&pm8001_ha->list, &hba_list); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "chip_init failed [ret: %d]\n", rc)); goto err_out_ha_free; + } rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; rc = pm8001_request_irq(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "pm8001_request_irq failed [ret: %d]\n", rc)); goto err_out_shost; + } + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); pm8001_init_sas_add(pm8001_ha); pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); @@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev) sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); scsi_remove_host(pm8001_ha->shost); - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) printk(KERN_ERR " PCI PM not supported\n"); return -ENODEV; } - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; + u8 i = 0; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip soft reset successful\n")); + } rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_disable; - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_disable; - #ifdef PM8001_USE_TASKLET +#ifdef PM8001_USE_TASKLET + /* default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler */ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); - #endif - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); + (unsigned long)pm8001_ha); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } scsi_unblock_requests(pm8001_ha->shost); return 0; @@ -843,14 +1018,45 @@ err_out_enable: return rc; } +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ static struct pci_device_id pm8001_pci_table[] = { - { - PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 - }, + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, { PCI_DEVICE(0x117c, 0x0042), .driver_data = chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, {} /* terminate list */ }; @@ -870,7 +1076,7 @@ static int __init pm8001_init(void) { int rc = -ENOMEM; - pm8001_wq = alloc_workqueue("pm8001", 0, 0); + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); if (!pm8001_wq) goto err; @@ -902,7 +1108,8 @@ module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); -MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index b961112395d5..a85d73de7c80 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) clear_bit(tag, bitmap); } -static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } @@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_GET_EVENTS: spin_lock_irqsave(&pm8001_ha->lock, flags); - if (-1 == pm8001_bar4_shift(pm8001_ha, + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, (phy_id < 4) ? 0x30000 : 0x40000)) { - spin_unlock_irqrestore(&pm8001_ha->lock, flags); - return -EINVAL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } } { struct sas_phy *phy = sas_phy->phy; @@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, phy->loss_of_dword_sync_count = qp[3]; phy->phy_reset_problem_count = qp[4]; } - pm8001_bar4_shift(pm8001_ha, 0); + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: @@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; - PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } @@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev) * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ - ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { @@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) t->task_done(t); return 0; } @@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } @@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) } return NULL; } +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " + "DEVICE FOUND !!!\n")); + } + return NULL; +} static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; - pm8001_dev->dev_type = NO_DEVICE; + pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } @@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) res = -1; } } else { - if (dev->dev_type == SATA_DEV) { + if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ @@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) msleep(50); pm8001_ha->flags = PM8001F_RUN_TIME; return 0; @@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -static void pm8001_task_done(struct sas_task *task) +void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; @@ -904,7 +927,7 @@ void pm8001_open_reject_retry( struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; pm8001_dev = ccb->device; - if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) continue; if (!device_to_close) { uintptr_t d = (uintptr_t)pm8001_dev @@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev) return rc; } +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + u32 device_id = 0; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + device_id = pm8001_dev->device_id; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("I_T_Nexus handler invoked !!")); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); +out: + sas_put_local_phy(phy); + + return rc; +} /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 11008205aeb3..570819464d90 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -57,8 +57,8 @@ #include <linux/atomic.h> #include "pm8001_defs.h" -#define DRV_NAME "pm8001" -#define DRV_VERSION "0.1.36" +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.37" #define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ @@ -66,8 +66,8 @@ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ -#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ - __func__, __LINE__, ## arg) +#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ + format, __func__, __LINE__, ## arg) #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ do { \ if (unlikely(HBA->logging_level & LEVEL)) \ @@ -103,11 +103,12 @@ do { \ #define PM8001_READ_VPD -#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) +#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define PM8001_NAME_LENGTH 32/* generic length of strings */ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; struct pm8001_hba_info; struct pm8001_ccb_info; @@ -131,15 +132,15 @@ struct pm8001_ioctl_payload { struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); - int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); - irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); - int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); int (*smp_req)(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -173,6 +174,7 @@ struct pm8001_dispatch { }; struct pm8001_chip_info { + u32 encrypt; u32 n_phy; const struct pm8001_dispatch *dispatch; }; @@ -204,7 +206,7 @@ struct pm8001_phy { }; struct pm8001_device { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct domain_device *sas_device; u32 attached_phy; u32 id; @@ -256,7 +258,20 @@ struct mpi_mem_req { struct mpi_mem region[USI_MAX_MEMCNT]; }; -struct main_cfg_table { +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { u32 signature; u32 interface_rev; u32 firmware_rev; @@ -292,19 +307,69 @@ struct main_cfg_table { u32 fatal_err_dump_length1; u32 hda_mode_flag; u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + } pm80xx_tbl; }; -struct general_status_table { + +union general_status_table { + struct { u32 gst_len_mpistate; u32 iq_freeze_state0; u32 iq_freeze_state1; u32 msgu_tcnt; u32 iop_tcnt; - u32 reserved; + u32 rsvd; u32 phy_state[8]; - u32 reserved1; - u32 reserved2; - u32 reserved3; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; u32 recover_err_info[8]; + } pm80xx_tbl; }; struct inbound_queue_table { u32 element_pri_size_cnt; @@ -351,15 +416,21 @@ struct pm8001_hba_info { struct device *dev; struct pm8001_hba_memspace io_mem[6]; struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ - struct main_cfg_table main_cfg_tbl; - struct general_status_table gs_tbl; - struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; - struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ u8 sas_addr[SAS_ADDR_SIZE]; struct sas_ha_struct *sas;/* SCSI/SAS glue */ struct Scsi_Host *shost; @@ -372,10 +443,12 @@ struct pm8001_hba_info { struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ struct pm8001_device *devices; struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX - struct msix_entry msix_entries[16];/*for msi-x interrupt*/ + struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC]; + /*for msi-x interrupt*/ int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET @@ -383,7 +456,10 @@ struct pm8001_hba_info { #endif u32 logging_level; u32 fw_status; + u32 smp_exp_mode; + u32 int_vector; const struct firmware *fw_image; + u8 outq[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { @@ -419,6 +495,9 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 +#define NCQ_READ_LOG_FLAG 0x80000000 +#define NCQ_ABORT_ALL_FLAG 0x40000000 +#define NCQ_2ND_RLE_FLAG 0x20000000 /** * brief param structure for firmware flash update. */ @@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev); void pm8001_dev_gone(struct domain_device *dev); int pm8001_lu_reset(struct domain_device *dev, u8 *lun); int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); int pm8001_query_task(struct sas_task *task); void pm8001_open_reject_retry( struct pm8001_hba_info *pm8001_ha, @@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align); +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + struct inbound_queue_table *circularQ, + u32 opCode, void *payload, u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct pm8001_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, + u8 flag, u32 task_tag, u32 cmd_tag); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +struct sas_task *pm8001_alloc_task(void); +void pm8001_task_done(struct sas_task *task); +void pm8001_free_task(struct sas_task *task); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); + int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); /* ctl shared API */ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 000000000000..302514d8157b --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4130 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include <linux/slab.h> + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[CI + i].virt_ptr; + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[PI + i].virt_ptr; + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization upto 100ms*/ + max_wait_count = 100 * 1000;/* 100 msec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -1; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila status */ + max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_ILA_READY) != + SCRATCH_PAD_ILA_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" ila ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check RAAE status */ + max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_RAAE_READY) != + SCRATCH_PAD_RAAE_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" raae ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop0 status */ + max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) && + (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" iop0 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop1 status only for 16 port controllers */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + /* 200 milli sec */ + max_wait_time = max_wait_count = 200 * 1000; + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP1_READY) != + SCRATCH_PAD_IOP1_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "iop1 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + } + + return ret; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value)); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr)); +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | THERMAL_OP_CODE; + payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; + SASConfigPage.MST_MSI = 3 << 15; + SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; + SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; + SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; + + if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) + SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; + + + SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | + SAS_OPNRJT_RTRY_INTVL; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) + | SAS_COPNRJT_RTRY_TMO; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) + | SAS_COPNRJT_RTRY_THR; + SASConfigPage.MAX_AIP = SAS_MAX_AIP; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.pageCode " + "0x%08x\n", SASConfigPage.pageCode)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.MST_MSI " + " 0x%08x\n", SASConfigPage.MST_MSI)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO " + " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_FRM_TMO " + " 0x%08x\n", SASConfigPage.STP_FRM_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_IDLE_TMO " + " 0x%08x\n", SASConfigPage.STP_IDLE_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL " + " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP " + " 0x%08x\n", SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X." + "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value)); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + return 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption informtion + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MPI initialize successful!\n")); + } else + return -EBUSY; + + /* send SAS protocol timer configuration page to FW */ + ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Checking for encryption\n")); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Encryption error !!\n")); + if (pm8001_ha->encrypt_info.status == 0x81) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled with error." + "Saving encryption key to flash\n")); + pm80xx_encrypt_update(pm8001_ha); + } + } + } + return 0; +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:IBDB value/=%x\n", value)); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK)); + return -1; + } + + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MPI state is not ready\n")); + return -1; + } + + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register before write : 0x%x\n", regval)); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + mdelay(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register after write 0x%x\n", regval)); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset successful [regval: 0x%x]\n", + regval)); + } else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset failed [regval: 0x%x]\n", + regval)); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode SEEPROM\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode Bootstrap Pin\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode soft reset\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state-HDA mode critical error\n")); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPCv soft reset Complete\n")); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset start\n")); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPC soft reset Complete\n")); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset finished\n")); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + mask = (u32)(1 << vec); + + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + if (vec == 0xFF) + mask = 0xFFFFFFFF; + else + mask = (u32)(1 << vec); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event represent + * that he has finished the job,please check the coresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SUCCESS ,param = 0x%x\n", + param)); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n", + param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status)); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + return; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with event 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + tag = le32_to_cpu(psataPayload->tag); + + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psataPayload->param); + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + if (!ts) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ts null\n")); + return; + } + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm80xx_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", + param)); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == PCI_DMA_FROMDEVICE) { + len = sizeof(struct pio_setup_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("PIO read len = %d\n", len)); + } else if (t->ata_task.use_ncq) { + len = sizeof(struct set_dev_bits_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("FPDMA len = %d\n", len)); + } else { + len = sizeof(struct dev_to_host_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("other len = %d\n", len)); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("response to large\n")); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW param = %d\n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*in order to force CPU ordering*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + unsigned long flags; + + ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + return; + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm80xx_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + if (unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + break; + case IO_XFER_PIO_SETUP_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + char *pdma_respaddr = NULL; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("smp IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + if (pm8001_dev) + pm8001_dev->running_req--; + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("DIRECT RESPONSE Length:%d\n", + param)); + pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64 + ((u64)sg_dma_address + (&t->smp_task.smp_resp)))); + for (i = 0; i < param; i++) { + *(pdma_respaddr+i) = psmpPayload->_r_a[i]; + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(pdma_respaddr+i), + psmpPayload->_r_a[i])); + } + } + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk(\ + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x" + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + struct inbound_queue_table *circularQ; + + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + port->port_state = portstate; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "portid:%d; phyid:%d; linkrate:%d; " + "portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType)); + + switch (deviceType) { + case SAS_PHY_UNUSED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("device type no device.\n")); + break; + case SAS_END_DEVICE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("fanout expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unknown device type(%x)\n", deviceType)); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "port id %d, phy id %d link_rate %d portstate 0x%x\n", + port_id, phy_id, link_rate, portstate)); + + port->port_state = portstate; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInvalid portID %d\n", port_id)); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Port In Reset portID %d\n", port_id)); + break; + case PORT_NOT_ESTABLISHED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and(default) = 0x%x\n", + portstate)); + break; + + } +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id)); + if (status == 0) { + phy->phy_state = 1; + if (pm8001_ha->flags == PM8001F_RUN_TIME) + complete(phy->enable_completion); + } + return 0; + +} + +/** + * mpi_thermal_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Local high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8))); + } + if (thermal_event & 0x10) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Remote high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24))); + } + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status)); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case HW_EVENT_PHY_DOWN: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_DOWN\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_INVALID\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_PHY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_ERROR\n")); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + case HW_EVENT_BROADCAST_EXP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_MALFUNCTION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_MALFUNCTION\n")); + break; + case HW_EVENT_BROADCAST_SES: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_SES\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVER: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVER\n")); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown event type 0x%x\n", eventType)); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("phy:0x%x status:0x%x\n", + phyid, status)); + if (status == 0) + phy->phy_state = 0; + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd)); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr)); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); + break; + case OPC_OUB_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_HW_EVENT\n")); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_THERMAL_EVENT\n")); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_COMP\n")); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_COMP\n")); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_REGIST\n")); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unresgister the deviece\n")); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); + break; + case OPC_OUB_SATA_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_COMP\n")); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_EVENT\n")); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_EVENT\n")); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); + break; + case OPC_OUB_GPIO_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_EVENT\n")); + break; + case OPC_OUB_GENERAL_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); + break; + case OPC_OUB_GET_TIME_STAMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); + break; + case OPC_OUB_PORT_CONTROL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_PORT_CONTROL\n")); + break; + case OPC_OUB_SMP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); + break; + case OPC_OUB_SET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); + break; + case OPC_OUB_SET_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); + break; + /* spcv specifc commands */ + case OPC_OUB_PHY_START_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc)); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc)); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc)); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc)); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc)); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc)); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 uninitialized_var(bc); + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* PCI_DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + struct inbound_queue_table *circularQ; + char *preq_dma_addr = NULL; + __le64 tmp_addr; + u32 i, length; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP Frame Length %d\n", sg_req->length)); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + /* DIRECT MODE support only in spcv/ve */ + pm8001_ha->smp_exp_mode = SMP_DIRECT; + + tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + preq_dma_addr = (char *)phys_to_virt(tmp_addr); + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST INDIRECT MODE\n")); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(preq_dma_addr + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) - 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST DIRECT MODE\n")); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(preq_dma_addr))); + } else { + smp_cmd.smp_req[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(preq_dma_addr))); + } + } + + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + if ((task->ssp_task.cdb[0] == READ_10) + || (task->ssp_task.cdb[0] == WRITE_10) + || (task->ssp_task.cdb[0] == WRITE_VERIFY)) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +/** + * pm80xx_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + int ret; + u64 phys_addr; + struct inbound_queue_table *circularQ; + static u32 inb; + static u32 outb; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + if (task->ssp_task.enable_first_burst) + ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cdb[0])); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) | + (task->ssp_task.cdb[3] << 16) | + (task->ssp_task.cdb[4] << 8) | + (task->ssp_task.cdb[5])); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cdb[0], inb)); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + + return ret; +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + int ret; + static u32 inb; + static u32 outb; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr; + u32 ATAP = 0x0; + u32 dir; + struct inbound_queue_table *circularQ; + unsigned long flags; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + if (task->data_dir == PCI_DMA_NONE) { + ATAP = 0x04; /* no data*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); + } else { + ATAP = 0x05; /* PIO*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); + } + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + ATAP = 0x07; /* FPDMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command)); + opc = OPC_INB_SATA_DIF_ENC_IO; + + /* set encryption bit */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16)| + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* dad (bit 0-1) is 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.enc_addr_low = lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.enc_addr_low = lower_32_bits(dma_addr); + sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, inb)); + /* dad (bit 0-1) is 0 */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16) | + ((ATAP & 0x3f) << 10) | dir); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + return 0; + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &sata_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + return ret; +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode + ** [14] 0b disable spin up hold; 1b enable spin up hold + ** [15] ob no change in current PHY analig setup 1b enable using SPAST + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + /* SSC Disable and SAS Analog ST configuration */ + /** + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + **/ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + struct inbound_queue_table *circularQ; + u32 linkrate, phy_id; + int rc, tag = 0xdeadbeef; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = tag; + payload.tag = cpu_to_le32(tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to operate + * @phy_op: + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + memset(&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return ret; +} + +static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; +#ifdef PM8001_USE_MSIX + return 1; +#endif + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; + +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @irq: irq number. + * @stat: stat. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interupt = pm80xx_chip_is_our_interupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, +}; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 000000000000..2b760ba75d7b --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1523 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include <linux/types.h> +#include <scsi/libsas.h> + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x06 << 8) + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_OP_CODE 0x6 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/* These flags used for SSP SMP & SATA Abort */ +#define ABORT_MASK 0x3 +#define ABORT_SINGLE 0x0 +#define ABORT_ALL 0x1 + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 +#define SPCv_MSGU_CFG_TABLE_RESET 0x02 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#endif diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 317a7fdc3b82..23d607218ae8 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -24,7 +24,9 @@ config SCSI_QLA_FC Firmware images can be retrieved from: - ftp://ftp.qlogic.com/outgoing/linux/firmware/ + http://ldriver.qlogic.com/firmware/ + + They are also included in the linux-firmware tree as well. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 729b74389f83..937fed8cb038 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_WRITE_DATA); + lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_READ_DATA); + lcmd_pkt->cntrl_flags = TMF_READ_DATA; vha->qla_stats.input_bytes += scsi_bufflen(cmd); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5307bf86d5e0..ad72c1d85111 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) qla2x00_rel_sp(sp->fcport->vha, sp); } -void +static void qla2x00_sp_compl(void *data, void *ptr, int res) { struct qla_hw_data *ha = (struct qla_hw_data *)data; diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 14fec976f634..fad71ed067ec 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, mrb->mbox_cmd = in_mbox[0]; wmb(); + ha->iocb_cnt += mrb->iocb_cnt; ha->isp_ops->queue_iocb(ha); exit_mbox_iocb: spin_unlock_irqrestore(&ha->hardware_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a47f99957ba8..4d231c12463e 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); - fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf); - fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf); + fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); + fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); fw_ddb_entry->ipv4_tos = conn->ipv4_tos; fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); - fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn); - fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn); + fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); + fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); @@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) * If this is invoked as a result of a userspace call then the entry is marked * as nonpersistent using flash_state field. **/ -int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry, - uint16_t *idx, int user) +static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t *idx, int user) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct iscsi_bus_flash_conn *fnode_conn = NULL; @@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, ql4_printk(KERN_ERR, ha, "%s: A non-persistent entry %s found\n", __func__, dev->kobj.name); + put_device(dev); goto exit_ddb_add; } @@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, int parent_type, parent_index = 0xffff; int rc = 0; - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) return -EIO; @@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: - if ((fnode_sess->discovery_parent_idx) >= 0 && - (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)) + if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES) parent_index = fnode_sess->discovery_parent_idx; rc = sprintf(buf, "%u\n", parent_index); @@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, parent_type = ISCSI_DISC_PARENT_ISNS; else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) parent_type = ISCSI_DISC_PARENT_UNKNOWN; - else if (fnode_sess->discovery_parent_type >= 0 && - fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) + else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) parent_type = ISCSI_DISC_PARENT_SENDTGT; else parent_type = ISCSI_DISC_PARENT_UNKNOWN; @@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = -ENOSYS; break; } + + put_device(dev); return rc; } @@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); - struct dev_db_entry *fw_ddb_entry = NULL; struct iscsi_flashnode_param_info *fnode_param; struct nlattr *attr; int rc = QLA_ERROR; uint32_t rem = len; - fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL); - if (!fw_ddb_entry) { - DEBUG2(ql4_printk(KERN_ERR, ha, - "%s: Unable to allocate ddb buffer\n", - __func__)); - return -ENOMEM; - } - nla_for_each_attr(attr, data, len, rem) { fnode_param = nla_data(attr); @@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint16_t *ddb_cookie = NULL; - size_t ddb_size; + size_t ddb_size = 0; void *pddb = NULL; int target_id; int rc = 0; - if (!fnode_sess) { - rc = -EINVAL; - goto exit_ddb_del; - } - if (fnode_sess->is_boot_target) { rc = -EPERM; DEBUG2(ql4_printk(KERN_ERR, ha, @@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); - dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) - - (void *)fw_ddb_entry; + dev_db_start_offset += offsetof(struct dev_db_entry, cookie); ddb_size = sizeof(*ddb_cookie); } diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 83e0fec35d56..fe873cf7570d 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.03.00-k8" +#define QLA4XXX_DRIVER_VERSION "5.03.00-k9" diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 5add6f4e7928..0a537a0515ca 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1997,24 +1997,39 @@ out: return ret; } -static unsigned int map_state(sector_t lba, unsigned int *num) +static unsigned long lba_to_map_index(sector_t lba) +{ + if (scsi_debug_unmap_alignment) { + lba += scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + do_div(lba, scsi_debug_unmap_granularity); + + return lba; +} + +static sector_t map_index_to_lba(unsigned long index) { - unsigned int granularity, alignment, mapped; - sector_t block, next, end; + return index * scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; +} - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - block = lba + alignment; - do_div(block, granularity); +static unsigned int map_state(sector_t lba, unsigned int *num) +{ + sector_t end; + unsigned int mapped; + unsigned long index; + unsigned long next; - mapped = test_bit(block, map_storep); + index = lba_to_map_index(lba); + mapped = test_bit(index, map_storep); if (mapped) - next = find_next_zero_bit(map_storep, map_size, block); + next = find_next_zero_bit(map_storep, map_size, index); else - next = find_next_bit(map_storep, map_size, block); + next = find_next_bit(map_storep, map_size, index); - end = next * granularity - scsi_debug_unmap_alignment; + end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); *num = end - lba; return mapped; @@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num) static void map_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (block < map_size) - set_bit(block, map_storep); + if (index < map_size) + set_bit(index, map_storep); - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } static void unmap_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (rem == 0 && lba + granularity < end && block < map_size) { - clear_bit(block, map_storep); - if (scsi_debug_lbprz) + if (lba == map_index_to_lba(index) && + lba + scsi_debug_unmap_granularity <= end && + index < map_size) { + clear_bit(index, map_storep); + if (scsi_debug_lbprz) { memset(fake_storep + - block * scsi_debug_sector_size, 0, - scsi_debug_sector_size); + lba * scsi_debug_sector_size, 0, + scsi_debug_sector_size * + scsi_debug_unmap_granularity); + } } - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } @@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) @@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); - if (unmap && scsi_debug_unmap_granularity) { + if (unmap && scsi_debug_lbp()) { unmap_region(lba, num); goto out; } @@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); out: write_unlock_irqrestore(&atomic_rw, iflags); @@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void) /* Logical Block Provisioning */ if (scsi_debug_lbp()) { - unsigned int map_bytes; - scsi_debug_unmap_max_blocks = clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); @@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void) clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); if (scsi_debug_unmap_alignment && - scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + scsi_debug_unmap_granularity <= + scsi_debug_unmap_alignment) { printk(KERN_ERR - "%s: ERR: unmap_granularity < unmap_alignment\n", + "%s: ERR: unmap_granularity <= unmap_alignment\n", __func__); return -EINVAL; } - map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); - map_bytes = map_size >> 3; - map_storep = vmalloc(map_bytes); + map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; + map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", map_size); @@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void) goto free_vm; } - memset(map_storep, 0x0, map_bytes); + bitmap_zero(map_storep, map_size); /* Map first 1KB for partition table */ if (scsi_debug_num_parts) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c1b05a83d403..f43de1e56420 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; DECLARE_COMPLETION_ONSTACK(done); - unsigned long timeleft; + unsigned long timeleft = timeout; struct scsi_eh_save ses; + const unsigned long stall_for = msecs_to_jiffies(100); int rtn; +retry: scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); shost->eh_action = &done; scsi_log_send(scmd); scmd->scsi_done = scsi_eh_done; - shost->hostt->queuecommand(shost, scmd); - - timeleft = wait_for_completion_timeout(&done, timeout); + rtn = shost->hostt->queuecommand(shost, scmd); + if (rtn) { + if (timeleft > stall_for) { + scsi_eh_restore_cmnd(scmd, &ses); + timeleft -= stall_for; + msleep(jiffies_to_msecs(stall_for)); + goto retry; + } + /* signal not to enter either branch of the if () below */ + timeleft = 0; + rtn = NEEDS_RETRY; + } else { + timeleft = wait_for_completion_timeout(&done, timeout); + } shost->eh_action = NULL; - scsi_log_completion(scmd, SUCCESS); + scsi_log_completion(scmd, rtn); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, timeleft: %ld\n", __func__, scmd, timeleft)); /* - * If there is time left scsi_eh_done got called, and we will - * examine the actual status codes to see whether the command - * actually did complete normally, else tell the host to forget - * about this command. + * If there is time left scsi_eh_done got called, and we will examine + * the actual status codes to see whether the command actually did + * complete normally, else if we have a zero return and no time left, + * the command must still be pending, so abort it and return FAILED. + * If we never actually managed to issue the command, because + * ->queuecommand() kept returning non zero, use the rtn = FAILED + * value above (so don't execute either branch of the if) */ if (timeleft) { rtn = scsi_eh_completed_normally(scmd); @@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, rtn = FAILED; break; } - } else { + } else if (!rtn) { scsi_abort_eh_cmnd(scmd); rtn = FAILED; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c31187d79343..86d522004a20 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid) + int *resid, int flags) { char *sense = NULL; int result; @@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0, resid); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 8f6b12cbd224..42539ee2cb11 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev) #ifdef CONFIG_PM_RUNTIME +static int sdev_blk_runtime_suspend(struct scsi_device *sdev, + int (*cb)(struct device *)) +{ + int err; + + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_suspend(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL; + struct scsi_device *sdev = to_scsi_device(dev); + int err; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_suspend(sdev, cb); + + err = scsi_dev_type_suspend(dev, cb); + if (err == -EAGAIN) + pm_schedule_suspend(dev, jiffies_to_msecs( + round_jiffies_up_relative(HZ/10))); + return err; +} + static int scsi_runtime_suspend(struct device *dev) { int err = 0; - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; dev_dbg(dev, "scsi_runtime_suspend\n"); - if (scsi_is_sdev_device(dev)) { - err = scsi_dev_type_suspend(dev, - pm ? pm->runtime_suspend : NULL); - if (err == -EAGAIN) - pm_schedule_suspend(dev, jiffies_to_msecs( - round_jiffies_up_relative(HZ/10))); - } + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_suspend(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } -static int scsi_runtime_resume(struct device *dev) +static int sdev_blk_runtime_resume(struct scsi_device *sdev, + int (*cb)(struct device *)) { int err = 0; + + blk_pre_runtime_resume(sdev->request_queue); + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_resume(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_resume(sdev, cb); + else + return scsi_dev_type_resume(dev, cb); +} + +static int scsi_runtime_resume(struct device *dev) +{ + int err = 0; dev_dbg(dev, "scsi_runtime_resume\n"); if (scsi_is_sdev_device(dev)) - err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); + err = sdev_runtime_resume(dev); /* Insert hooks here for targets, hosts, and transport classes */ @@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev) /* Insert hooks here for targets, hosts, and transport classes */ - if (scsi_is_sdev_device(dev)) - err = pm_schedule_suspend(dev, 100); - else + if (scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) { + pm_runtime_mark_last_busy(dev); + err = pm_runtime_autosuspend(dev); + } else { + err = pm_runtime_suspend(dev); + } + } else { err = pm_runtime_suspend(dev); + } return err; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 47799a33d6ca..133926b1bb78 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1019,8 +1019,7 @@ exit_match_index: /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison + * @idx: index to match * * Finds the flashnode session object for the passed index * @@ -1029,13 +1028,13 @@ exit_match_index: * %NULL on failure */ static struct iscsi_bus_flash_session * -iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; - dev = device_find_child(&shost->shost_gendev, data, fn); + dev = device_find_child(&shost->shost_gendev, &idx, + flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); @@ -1059,18 +1058,13 @@ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, int (*fn)(struct device *dev, void *data)) { - struct device *dev; - - dev = device_find_child(&shost->shost_gendev, data, fn); - return dev; + return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer @@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); * %NULL on failure */ struct device * -iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, - void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { - struct device *dev; - - dev = device_find_child(&fnode_sess->dev, data, fn); - return dev; + return device_find_child(&fnode_sess->dev, NULL, + iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); @@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { @@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.set_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.set_flashnode.host_no); + __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->del_flashnode) { @@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.del_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.del_flashnode.host_no); + __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->login_flashnode) { @@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.login_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.login_flashnode.host_no); + __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->logout_flashnode) { @@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.logout_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.logout_flashnode.host_no); + __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void) } iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); - if (!iscsi_eh_timer_workq) + if (!iscsi_eh_timer_workq) { + err = -ENOMEM; goto release_nls; + } return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e6689776b4f6..c1c555242d0d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + const char *temp = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) sdev = sdkp->device; - retval = scsi_autopm_get_device(sdev); - if (retval) - goto error_autopm; - /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. @@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) return 0; error_out: - scsi_autopm_put_device(sdev); -error_autopm: scsi_disk_put(sdkp); return retval; } @@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) * XXX is followed by a "rmmod sd_mod"? */ - scsi_autopm_put_device(sdev); scsi_disk_put(sdkp); } @@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) retval = -ENODEV; if (scsi_block_when_processing_errors(sdp)) { - retval = scsi_autopm_get_device(sdp); - if (retval) - goto out; - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, sshdr); - scsi_autopm_put_device(sdp); } /* failed to execute TUR, assume media not present */ @@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, + &sshdr, SD_FLUSH_TIMEOUT, + SD_MAX_RETRIES, NULL, REQ_PM); if (res == 0) break; } @@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; @@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); + blk_pm_runtime_init(sdp->request_queue, dev); scsi_autopm_put_device(sdp); put_device(&sdkp->dev); } @@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); if (res) { sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); sd_print_result(sdkp, res); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 74a1e4ca5401..2386aeb41fe8 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -73,6 +73,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 04998f36e507..6174ca4ea275 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) if (sdt->app_tag == 0xffff) return 0; - /* Bad ref tag received from disk */ - if (sdt->ref_tag == 0xffffffff) { - printk(KERN_ERR - "%s: bad phys ref tag on sector %lu\n", - bix->disk_name, (unsigned long)sector); - return -EIO; - } - if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { printk(KERN_ERR "%s: ref tag error on sector %lu (rcvd %u)\n", diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 0371047c5922..35faf24c6044 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on SCSI_UFSHCD + ---help--- + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 9eda0dfbd6df..1e5bd48457d6 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -1,3 +1,4 @@ # UFSHCD makefile obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c new file mode 100644 index 000000000000..03319acd9c72 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -0,0 +1,217 @@ +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * + * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * See the COPYING file in the top-level directory or visit + * <http://www.gnu.org/licenses/gpl-2.0.html> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This program is provided "AS IS" and "WITH ALL FAULTS" and + * without warranty of any kind. You are solely responsible for + * determining the appropriateness of using and distributing + * the program and assume all risks associated with your exercise + * of rights with respect to the program, including but not limited + * to infringement of third party rights, the risks and costs of + * program errors, damage to or loss of data, programs or equipment, + * and unavailability or interruption of operations. Under no + * circumstances will the contributor of this Program be liable for + * any damages of any kind arising from your use or distribution of + * this program. + */ + +#include "ufshcd.h" +#include <linux/platform_device.h> + +#ifdef CONFIG_PM +/** + * ufshcd_pltfrm_suspend - suspend power management function + * @dev: pointer to device handle + * + * + * Returns 0 + */ +static int ufshcd_pltfrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_suspend + * 2. Do bus specific power management + */ + + disable_irq(hba->irq); + + return 0; +} + +/** + * ufshcd_pltfrm_resume - resume power management function + * @dev: pointer to device handle + * + * Returns 0 + */ +static int ufshcd_pltfrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_resume. + * 2. Do bus specific wake up + */ + + enable_irq(hba->irq); + + return 0; +} +#else +#define ufshcd_pltfrm_suspend NULL +#define ufshcd_pltfrm_resume NULL +#endif + +/** + * ufshcd_pltfrm_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_probe(struct platform_device *pdev) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + struct resource *mem_res; + struct resource *irq_res; + resource_size_t mem_size; + int err; + struct device *dev = &pdev->dev; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) { + dev_err(&pdev->dev, + "Memory resource not available\n"); + err = -ENODEV; + goto out_error; + } + + mem_size = resource_size(mem_res); + if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) { + dev_err(&pdev->dev, + "Cannot reserve the memory resource\n"); + err = -EBUSY; + goto out_error; + } + + mmio_base = ioremap_nocache(mem_res->start, mem_size); + if (!mmio_base) { + dev_err(&pdev->dev, "memory map failed\n"); + err = -ENOMEM; + goto out_release_regions; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + dev_err(&pdev->dev, "IRQ resource not available\n"); + err = -ENODEV; + goto out_iounmap; + } + + err = dma_set_coherent_mask(dev, dev->coherent_dma_mask); + if (err) { + dev_err(&pdev->dev, "set dma mask failed\n"); + goto out_iounmap; + } + + err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start); + if (err) { + dev_err(&pdev->dev, "Intialization failed\n"); + goto out_iounmap; + } + + platform_set_drvdata(pdev, hba); + + return 0; + +out_iounmap: + iounmap(mmio_base); +out_release_regions: + release_mem_region(mem_res->start, mem_size); +out_error: + return err; +} + +/** + * ufshcd_pltfrm_remove - remove platform driver routine + * @pdev: pointer to platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_remove(struct platform_device *pdev) +{ + struct resource *mem_res; + resource_size_t mem_size; + struct ufs_hba *hba = platform_get_drvdata(pdev); + + disable_irq(hba->irq); + + /* Some buggy controllers raise interrupt after + * the resources are removed. So first we unregister the + * irq handler and then the resources used by driver + */ + + free_irq(hba->irq, hba); + ufshcd_remove(hba); + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) + dev_err(&pdev->dev, "ufshcd: Memory resource not available\n"); + else { + mem_size = resource_size(mem_res); + release_mem_region(mem_res->start, mem_size); + } + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id ufs_of_match[] = { + { .compatible = "jedec,ufs-1.1"}, +}; + +static const struct dev_pm_ops ufshcd_dev_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, +}; + +static struct platform_driver ufshcd_pltfrm_driver = { + .probe = ufshcd_pltfrm_probe, + .remove = ufshcd_pltfrm_remove, + .driver = { + .name = "ufshcd", + .owner = THIS_MODULE, + .pm = &ufshcd_dev_pm_ops, + .of_match_table = ufs_of_match, + }, +}; + +module_platform_driver(ufshcd_pltfrm_driver); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 60fd40c4e4c2..c32a478df81b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) ucd_cmd_ptr->header.dword_2 = 0; ucd_cmd_ptr->exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->transfersize); + cpu_to_be32(lrbp->cmd->sdb.length); memcpy(ucd_cmd_ptr->cdb, lrbp->cmd->cmnd, diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index ef2e08e9b590..5dc9c4bfa66e 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c @@ -14,7 +14,6 @@ * 2.4/2.5 port David McCullough */ -#include <asm/dbg.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/serial.h> diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 52a3ecd40421..6fa2ae77fffd 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c @@ -30,7 +30,6 @@ #include <linux/serial.h> #include <linux/serial_core.h> -#include <bcm63xx_clk.h> #include <bcm63xx_irq.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index 6953dc82850c..a4fdce74f883 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c @@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf) tty_audit_buf_free(buf); } -static void tty_audit_log(const char *description, struct task_struct *tsk, - kuid_t loginuid, unsigned sessionid, int major, - int minor, unsigned char *data, size_t size) +static void tty_audit_log(const char *description, int major, int minor, + unsigned char *data, size_t size) { struct audit_buffer *ab; + struct task_struct *tsk = current; + uid_t uid = from_kuid(&init_user_ns, task_uid(tsk)); + uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk)); + u32 sessionid = audit_get_sessionid(tsk); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); if (ab) { char name[sizeof(tsk->comm)]; - kuid_t uid = task_uid(tsk); - - audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " - "major=%d minor=%d comm=", description, - tsk->pid, - from_kuid(&init_user_ns, uid), - from_kuid(&init_user_ns, loginuid), - sessionid, - major, minor); + + audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d" + " minor=%d comm=", description, tsk->pid, uid, + loginuid, sessionid, major, minor); get_task_comm(name, tsk); audit_log_untrustedstring(ab, name); audit_log_format(ab, " data="); @@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk, * tty_audit_buf_push - Push buffered data out * * Generate an audit message from the contents of @buf, which is owned by - * @tsk with @loginuid. @buf->mutex must be locked. + * the current task. @buf->mutex must be locked. */ -static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, - unsigned int sessionid, - struct tty_audit_buf *buf) +static void tty_audit_buf_push(struct tty_audit_buf *buf) { if (buf->valid == 0) return; @@ -102,25 +98,11 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, buf->valid = 0; return; } - tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, - buf->data, buf->valid); + tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid); buf->valid = 0; } /** - * tty_audit_buf_push_current - Push buffered data out - * - * Generate an audit message from the contents of @buf, which is owned by - * the current task. @buf->mutex must be locked. - */ -static void tty_audit_buf_push_current(struct tty_audit_buf *buf) -{ - kuid_t auid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - tty_audit_buf_push(current, auid, sessionid, buf); -} - -/** * tty_audit_exit - Handle a task exit * * Make sure all buffered data is written out and deallocate the buffer. @@ -130,15 +112,13 @@ void tty_audit_exit(void) { struct tty_audit_buf *buf; - spin_lock_irq(¤t->sighand->siglock); buf = current->signal->tty_audit_buf; current->signal->tty_audit_buf = NULL; - spin_unlock_irq(¤t->sighand->siglock); if (!buf) return; mutex_lock(&buf->mutex); - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -151,9 +131,8 @@ void tty_audit_exit(void) */ void tty_audit_fork(struct signal_struct *sig) { - spin_lock_irq(¤t->sighand->siglock); sig->audit_tty = current->signal->audit_tty; - spin_unlock_irq(¤t->sighand->siglock); + sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd; } /** @@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) { struct tty_audit_buf *buf; int major, minor, should_audit; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); should_audit = current->signal->audit_tty; buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); major = tty->driver->major; minor = tty->driver->minor_start + tty->index; if (buf) { mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } @@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); - tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, - minor, &ch, 1); + tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1); } } /** - * tty_audit_push_task - Flush task's pending audit data - * @tsk: task pointer - * @loginuid: sender login uid - * @sessionid: sender session id + * tty_audit_push_current - Flush current's pending audit data * - * Called with a ref on @tsk held. Try to lock sighand and get a - * reference to the tty audit buffer if available. + * Try to lock sighand and get a reference to the tty audit buffer if available. * Flush the buffer or return an appropriate error code. */ -int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) +int tty_audit_push_current(void) { struct tty_audit_buf *buf = ERR_PTR(-EPERM); + struct task_struct *tsk = current; unsigned long flags; if (!lock_task_sighand(tsk, &flags)) @@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) return PTR_ERR(buf); mutex_lock(&buf->mutex); - tty_audit_buf_push(tsk, loginuid, sessionid, buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, unsigned icanon) { struct tty_audit_buf *buf, *buf2; + unsigned long flags; buf = NULL; buf2 = NULL; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) goto out; buf = current->signal->tty_audit_buf; @@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); goto out; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); buf2 = tty_audit_buf_alloc(tty->driver->major, tty->driver->minor_start + tty->index, @@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, return NULL; } - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (!current->signal->audit_tty) goto out; buf = current->signal->tty_audit_buf; @@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); /* Fall through */ out: - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf2) tty_audit_buf_free(buf2); return buf; @@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, { struct tty_audit_buf *buf; int major, minor; + int audit_log_tty_passwd; + unsigned long flags; if (unlikely(size == 0)) return; + spin_lock_irqsave(¤t->sighand->siglock, flags); + audit_log_tty_passwd = current->signal->audit_tty_log_passwd; + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + if (!audit_log_tty_passwd && icanon && !L_ECHO(tty)) + return; + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) return; @@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, minor = tty->driver->minor_start + tty->index; if (buf->major != major || buf->minor != minor || buf->icanon != icanon) { - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); buf->major = major; buf->minor = minor; buf->icanon = icanon; @@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, data += run; size -= run; if (buf->valid == N_TTY_BUF_SIZE) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); } while (size != 0); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, void tty_audit_push(struct tty_struct *tty) { struct tty_audit_buf *buf; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return; } buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf) { int major, minor; @@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty) minor = tty->driver->minor_start + tty->index; mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c index ddabaa867b0d..700cac067b46 100644 --- a/drivers/video/au1100fb.c +++ b/drivers/video/au1100fb.c @@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) switch (blank_mode) { case VESA_NO_BLANKING: - /* Turn on panel */ - fbdev->regs->lcd_control |= LCD_CONTROL_GO; -#ifdef CONFIG_MIPS_PB1100 - if (fbdev->panel_idx == 1) { - au_writew(au_readw(PB1100_G_CONTROL) - | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), - PB1100_G_CONTROL); - } -#endif + /* Turn on panel */ + fbdev->regs->lcd_control |= LCD_CONTROL_GO; au_sync(); break; case VESA_VSYNC_SUSPEND: case VESA_HSYNC_SUSPEND: case VESA_POWERDOWN: - /* Turn off panel */ - fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; -#ifdef CONFIG_MIPS_PB1100 - if (fbdev->panel_idx == 1) { - au_writew(au_readw(PB1100_G_CONTROL) - & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), - PB1100_G_CONTROL); - } -#endif + /* Turn off panel */ + fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; au_sync(); break; default: diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index dd4d9cb86243..f03bf501527f 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -141,7 +141,7 @@ config XEN_GRANT_DEV_ALLOC config SWIOTLB_XEN def_bool y - depends on PCI + depends on PCI && X86 select SWIOTLB config XEN_TMEM diff --git a/drivers/xen/events.c b/drivers/xen/events.c index d8cc8127f19c..6a6bbe4ede92 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info, info->cpu = cpu; evtchn_to_irq[evtchn] = irq; + + irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); } static void xen_irq_info_evtchn_init(unsigned irq, @@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn) struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } - irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); out: mutex_unlock(&irq_mapping_update_lock); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index d5c25db4398f..f71ec125290d 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -243,7 +243,7 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) struct ecryptfs_key_sig *key_sig, *key_sig_tmp; if (crypt_stat->tfm) - crypto_free_blkcipher(crypt_stat->tfm); + crypto_free_ablkcipher(crypt_stat->tfm); if (crypt_stat->hash_tfm) crypto_free_hash(crypt_stat->hash_tfm); list_for_each_entry_safe(key_sig, key_sig_tmp, @@ -319,6 +319,22 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, return i; } +struct extent_crypt_result { + struct completion completion; + int rc; +}; + +static void extent_crypt_complete(struct crypto_async_request *req, int rc) +{ + struct extent_crypt_result *ecr = req->data; + + if (rc == -EINPROGRESS) + return; + + ecr->rc = rc; + complete(&ecr->completion); +} + /** * encrypt_scatterlist * @crypt_stat: Pointer to the crypt_stat struct to initialize. @@ -334,11 +350,8 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, struct scatterlist *src_sg, int size, unsigned char *iv) { - struct blkcipher_desc desc = { - .tfm = crypt_stat->tfm, - .info = iv, - .flags = CRYPTO_TFM_REQ_MAY_SLEEP - }; + struct ablkcipher_request *req = NULL; + struct extent_crypt_result ecr; int rc = 0; BUG_ON(!crypt_stat || !crypt_stat->tfm @@ -349,24 +362,47 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); } - /* Consider doing this once, when the file is opened */ + + init_completion(&ecr.completion); + mutex_lock(&crypt_stat->cs_tfm_mutex); - if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { - rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, - crypt_stat->key_size); - crypt_stat->flags |= ECRYPTFS_KEY_SET; - } - if (rc) { - ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", - rc); + req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS); + if (!req) { mutex_unlock(&crypt_stat->cs_tfm_mutex); - rc = -EINVAL; + rc = -ENOMEM; goto out; } - ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size); - crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size); + + ablkcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + extent_crypt_complete, &ecr); + /* Consider doing this once, when the file is opened */ + if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { + rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key, + crypt_stat->key_size); + if (rc) { + ecryptfs_printk(KERN_ERR, + "Error setting key; rc = [%d]\n", + rc); + mutex_unlock(&crypt_stat->cs_tfm_mutex); + rc = -EINVAL; + goto out; + } + crypt_stat->flags |= ECRYPTFS_KEY_SET; + } mutex_unlock(&crypt_stat->cs_tfm_mutex); + ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size); + ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv); + rc = crypto_ablkcipher_encrypt(req); + if (rc == -EINPROGRESS || rc == -EBUSY) { + struct extent_crypt_result *ecr = req->base.data; + + wait_for_completion(&ecr->completion); + rc = ecr->rc; + INIT_COMPLETION(ecr->completion); + } out: + ablkcipher_request_free(req); return rc; } @@ -624,35 +660,61 @@ static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, struct scatterlist *src_sg, int size, unsigned char *iv) { - struct blkcipher_desc desc = { - .tfm = crypt_stat->tfm, - .info = iv, - .flags = CRYPTO_TFM_REQ_MAY_SLEEP - }; + struct ablkcipher_request *req = NULL; + struct extent_crypt_result ecr; int rc = 0; - /* Consider doing this once, when the file is opened */ + BUG_ON(!crypt_stat || !crypt_stat->tfm + || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); + if (unlikely(ecryptfs_verbosity > 0)) { + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", + crypt_stat->key_size); + ecryptfs_dump_hex(crypt_stat->key, + crypt_stat->key_size); + } + + init_completion(&ecr.completion); + mutex_lock(&crypt_stat->cs_tfm_mutex); - rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, - crypt_stat->key_size); - if (rc) { - ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", - rc); + req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS); + if (!req) { mutex_unlock(&crypt_stat->cs_tfm_mutex); - rc = -EINVAL; + rc = -ENOMEM; goto out; } - ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size); - rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size); + + ablkcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + extent_crypt_complete, &ecr); + /* Consider doing this once, when the file is opened */ + if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { + rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key, + crypt_stat->key_size); + if (rc) { + ecryptfs_printk(KERN_ERR, + "Error setting key; rc = [%d]\n", + rc); + mutex_unlock(&crypt_stat->cs_tfm_mutex); + rc = -EINVAL; + goto out; + } + crypt_stat->flags |= ECRYPTFS_KEY_SET; + } mutex_unlock(&crypt_stat->cs_tfm_mutex); - if (rc) { - ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n", - rc); - goto out; + ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size); + ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv); + rc = crypto_ablkcipher_decrypt(req); + if (rc == -EINPROGRESS || rc == -EBUSY) { + struct extent_crypt_result *ecr = req->base.data; + + wait_for_completion(&ecr->completion); + rc = ecr->rc; + INIT_COMPLETION(ecr->completion); } - rc = size; out: + ablkcipher_request_free(req); return rc; + } /** @@ -746,8 +808,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat) crypt_stat->cipher, "cbc"); if (rc) goto out_unlock; - crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0, - CRYPTO_ALG_ASYNC); + crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0); kfree(full_alg_name); if (IS_ERR(crypt_stat->tfm)) { rc = PTR_ERR(crypt_stat->tfm); @@ -757,7 +818,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat) crypt_stat->cipher); goto out_unlock; } - crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY); + crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY); rc = 0; out_unlock: mutex_unlock(&crypt_stat->cs_tfm_mutex); diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index dd299b389d4e..f622a733f7ad 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -38,6 +38,7 @@ #include <linux/nsproxy.h> #include <linux/backing-dev.h> #include <linux/ecryptfs.h> +#include <linux/crypto.h> #define ECRYPTFS_DEFAULT_IV_BYTES 16 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096 @@ -233,7 +234,7 @@ struct ecryptfs_crypt_stat { size_t extent_shift; unsigned int extent_mask; struct ecryptfs_mount_crypt_stat *mount_crypt_stat; - struct crypto_blkcipher *tfm; + struct crypto_ablkcipher *tfm; struct crypto_hash *hash_tfm; /* Crypto context for generating * the initialization vectors */ unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; diff --git a/fs/namei.c b/fs/namei.c index 57ae9c8c66bf..85e40d1c0a8f 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path, if (error) return error; - audit_inode(name, dir, 0); + audit_inode(name, dir, LOOKUP_PARENT); error = -EISDIR; /* trailing slashes? */ if (nd->last.name[nd->last.len]) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 8ae5abfe6ba2..27d74a294515 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -279,6 +279,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str { struct svc_fh *current_fh = &cstate->current_fh; __be32 status; + int accmode = 0; /* We don't know the target directory, and therefore can not * set the change info @@ -290,9 +291,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) && (open->op_iattr.ia_size == 0); + /* + * In the delegation case, the client is telling us about an + * open that it *already* performed locally, some time ago. We + * should let it succeed now if possible. + * + * In the case of a CLAIM_FH open, on the other hand, the client + * may be counting on us to enforce permissions (the Linux 4.1 + * client uses this for normal opens, for example). + */ + if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH) + accmode = NFSD_MAY_OWNER_OVERRIDE; - status = do_open_permission(rqstp, current_fh, open, - NFSD_MAY_OWNER_OVERRIDE); + status = do_open_permission(rqstp, current_fh, open, accmode); return status; } diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 899ca26dd194..4e9a21db867a 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -146,7 +146,7 @@ out_no_tfm: * then disable recovery tracking. */ static void -legacy_recdir_name_error(int error) +legacy_recdir_name_error(struct nfs4_client *clp, int error) { printk(KERN_ERR "NFSD: unable to generate recoverydir " "name (%d).\n", error); @@ -159,9 +159,7 @@ legacy_recdir_name_error(int error) if (error == -ENOENT) { printk(KERN_ERR "NFSD: disabling legacy clientid tracking. " "Reboot recovery will not function correctly!\n"); - - /* the argument is ignored by the legacy exit function */ - nfsd4_client_tracking_exit(NULL); + nfsd4_client_tracking_exit(clp->net); } } @@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) status = nfs4_make_rec_clidname(dname, &clp->cl_name); if (status) - return legacy_recdir_name_error(status); + return legacy_recdir_name_error(clp, status); status = nfs4_save_creds(&original_cred); if (status < 0) @@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp) status = nfs4_make_rec_clidname(dname, &clp->cl_name); if (status) - return legacy_recdir_name_error(status); + return legacy_recdir_name_error(clp, status); status = mnt_want_write_file(nn->rec_file); if (status) @@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp) status = nfs4_make_rec_clidname(dname, &clp->cl_name); if (status) { - legacy_recdir_name_error(status); + legacy_recdir_name_error(clp, status); return status; } diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index d0be29fa94cf..6c80083a984f 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> +#include <linux/compat.h> #include <asm/ioctls.h> @@ -857,6 +858,22 @@ fput_and_out: return ret; } +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE6(fanotify_mark, + int, fanotify_fd, unsigned int, flags, + __u32, mask0, __u32, mask1, int, dfd, + const char __user *, pathname) +{ + return sys_fanotify_mark(fanotify_fd, flags, +#ifdef __BIG_ENDIAN + ((__u64)mask1 << 32) | mask0, +#else + ((__u64)mask0 << 32) | mask1, +#endif + dfd, pathname); +} +#endif + /* * fanotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index e1a7779dd3cb..f373bde8f545 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c @@ -49,8 +49,11 @@ static unsigned long romfs_get_unmapped_area(struct file *file, return (unsigned long) -EINVAL; offset += ROMFS_I(inode)->i_dataoffset; - if (offset > mtd->size - len) + if (offset >= mtd->size) return (unsigned long) -EINVAL; + /* the mapping mustn't extend beyond the EOF */ + if ((offset + len) > mtd->size) + len = mtd->size - offset; ret = mtd_get_unmapped_area(mtd, len, offset, flags); if (ret == -EOPNOTSUPP) diff --git a/include/linux/audit.h b/include/linux/audit.h index 5a6d718adf34..b20b03852f21 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -84,8 +84,13 @@ extern int audit_classify_arch(int arch); #define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ #define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ +/* maximized args number that audit_socketcall can process */ +#define AUDITSC_ARGS 6 + struct filename; +extern void audit_log_session_info(struct audit_buffer *ab); + #ifdef CONFIG_AUDITSYSCALL /* These are defined in auditsc.c */ /* Public API */ @@ -120,7 +125,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { - if (unlikely(!audit_dummy_context())) + if (unlikely(current->audit_context)) __audit_syscall_entry(arch, major, a0, a1, a2, a3); } static inline void audit_syscall_exit(void *pt_regs) @@ -185,12 +190,10 @@ static inline int audit_get_sessionid(struct task_struct *tsk) return tsk->sessionid; } -extern void audit_log_task_context(struct audit_buffer *ab); -extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk); extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern int __audit_bprm(struct linux_binprm *bprm); -extern void __audit_socketcall(int nargs, unsigned long *args); +extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); @@ -224,10 +227,11 @@ static inline int audit_bprm(struct linux_binprm *bprm) return __audit_bprm(bprm); return 0; } -static inline void audit_socketcall(int nargs, unsigned long *args) +static inline int audit_socketcall(int nargs, unsigned long *args) { if (unlikely(!audit_dummy_context())) - __audit_socketcall(nargs, args); + return __audit_socketcall(nargs, args); + return 0; } static inline int audit_sockaddr(int len, void *addr) { @@ -340,11 +344,6 @@ static inline int audit_get_sessionid(struct task_struct *tsk) { return -1; } -static inline void audit_log_task_context(struct audit_buffer *ab) -{ } -static inline void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk) -{ } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, @@ -354,8 +353,10 @@ static inline int audit_bprm(struct linux_binprm *bprm) { return 0; } -static inline void audit_socketcall(int nargs, unsigned long *args) -{ } +static inline int audit_socketcall(int nargs, unsigned long *args) +{ + return 0; +} static inline void audit_fd_pair(int fd1, int fd2) { } static inline int audit_sockaddr(int len, void *addr) @@ -390,6 +391,11 @@ static inline void audit_ptrace(struct task_struct *t) #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ +static inline bool audit_loginuid_set(struct task_struct *tsk) +{ + return uid_valid(audit_get_loginuid(tsk)); +} + #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ @@ -429,14 +435,17 @@ static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) { } #endif +extern int audit_log_task_context(struct audit_buffer *ab); +extern void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk); + extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ -extern int audit_filter_user(void); +extern int audit_filter_user(int type); extern int audit_filter_type(int type); extern int audit_receive_filter(int type, int pid, int seq, - void *data, size_t datasz, kuid_t loginuid, - u32 sessionid, u32 sid); + void *data, size_t datasz); extern int audit_enabled; #else /* CONFIG_AUDIT */ static inline __printf(4, 5) @@ -476,6 +485,13 @@ static inline void audit_log_link_denied(const char *string, { } static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid) { } +static inline int audit_log_task_context(struct audit_buffer *ab) +{ + return 0; +} +static inline void audit_log_task_info(struct audit_buffer *ab, + struct task_struct *tsk) +{ } #define audit_enabled 0 #endif /* CONFIG_AUDIT */ static inline void audit_log_string(struct audit_buffer *ab, const char *buf) diff --git a/include/linux/compat.h b/include/linux/compat.h index d53c35352ea9..7f0c1dd09079 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -673,6 +673,8 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval); +asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, + int, const char __user *); #else #define is_compat_task() (0) diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3c86faa59798..8f0406230a0a 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -17,7 +17,7 @@ #include <linux/completion.h> #include <linux/hrtimer.h> -#define CPUIDLE_STATE_MAX 8 +#define CPUIDLE_STATE_MAX 10 #define CPUIDLE_NAME_LEN 16 #define CPUIDLE_DESC_LEN 32 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 1e483fa7afb4..3cd32478f2fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size); +/* + * These iteration functions are typically used to check (and combine) + * properties of underlying devices. + * E.g. Does at least one underlying device support flush? + * Does any underlying device not support WRITE_SAME? + * + * The callout function is called once for each contiguous section of + * an underlying device. State can be maintained in *data. + * Return non-zero to stop iterating through any further devices. + */ typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data); +/* + * This function must iterate through each section of device used by the + * target until it encounters a non-zero return code, which it then returns. + * Returns zero if no callout returned non-zero. + */ typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, iterate_devices_callout_fn fn, void *data); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f83e17a40e8b..99d0fbcbaf79 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * not set this, then the ftrace infrastructure will add recursion * protection for the caller. * STUB - The ftrace_ops is just a place holder. + * INITIALIZED - The ftrace_ops has already been initialized (first use time + * register_ftrace_function() is called, it will initialized the ops) */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -100,6 +102,7 @@ enum { FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, FTRACE_OPS_FL_STUB = 1 << 7, + FTRACE_OPS_FL_INITIALIZED = 1 << 8, }; struct ftrace_ops { @@ -110,6 +113,7 @@ struct ftrace_ops { #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; + struct mutex regex_lock; #endif }; diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 34e00fb49bec..4372658c73ae 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -293,6 +293,7 @@ struct ftrace_event_file { * caching and such. Which is mostly OK ;-) */ unsigned long flags; + atomic_t sm_ref; /* soft-mode reference counter */ }; #define __TRACE_EVENT_FLAGS(name, value) \ diff --git a/include/linux/hid.h b/include/linux/hid.h index af1b86d46f6e..0c48991b0402 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -515,7 +515,7 @@ struct hid_device { /* device report descriptor */ struct dentry *debug_rdesc; struct dentry *debug_events; struct list_head debug_list; - struct mutex debug_list_lock; + spinlock_t debug_list_lock; wait_queue_head_t debug_wait; }; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 06b0ed0154a4..b0dc87a2a376 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -146,6 +146,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 #define IEEE80211_MAX_TIM_LEN 251 +#define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. @@ -1829,6 +1830,15 @@ enum ieee80211_key_len { WLAN_KEY_LEN_AES_CMAC = 16, }; +#define IEEE80211_WEP_IV_LEN 4 +#define IEEE80211_WEP_ICV_LEN 4 +#define IEEE80211_CCMP_HDR_LEN 8 +#define IEEE80211_CCMP_MIC_LEN 8 +#define IEEE80211_CCMP_PN_LEN 6 +#define IEEE80211_TKIP_IV_LEN 8 +#define IEEE80211_TKIP_ICV_LEN 4 +#define IEEE80211_CMAC_PN_LEN 6 + /* Public action codes */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2b85c521f737..c12916248469 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2147,11 +2147,13 @@ #define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e #define PCI_DEVICE_ID_NX2_57712 0x1662 #define PCI_DEVICE_ID_NX2_57712E 0x1663 +#define PCI_DEVICE_ID_NX2_57712_MF 0x1663 #define PCI_DEVICE_ID_TIGON3_5714 0x1668 #define PCI_DEVICE_ID_TIGON3_5714S 0x1669 #define PCI_DEVICE_ID_TIGON3_5780 0x166a #define PCI_DEVICE_ID_TIGON3_5780S 0x166b #define PCI_DEVICE_ID_TIGON3_5705F 0x166e +#define PCI_DEVICE_ID_NX2_57712_VF 0x166f #define PCI_DEVICE_ID_TIGON3_5754M 0x1672 #define PCI_DEVICE_ID_TIGON3_5755M 0x1673 #define PCI_DEVICE_ID_TIGON3_5756 0x1674 @@ -2177,13 +2179,15 @@ #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c #define PCI_DEVICE_ID_TIGON3_5789 0x169d +#define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1 +#define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2 +#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 #define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 #define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa -#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h new file mode 100644 index 000000000000..c6fbc3ce4ab0 --- /dev/null +++ b/include/linux/platform_data/net-cw1200.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com> + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CW1200_PLAT_H_INCLUDED +#define CW1200_PLAT_H_INCLUDED + +struct cw1200_platform_data_spi { + u8 spi_bits_per_word; /* REQUIRED */ + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +struct cw1200_platform_data_sdio { + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int irq; /* IRQ line or 0 to use SDIO IRQ */ + int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + + +/* An example of SPI support in your board setup file: + + static struct cw1200_platform_data_spi cw1200_platform_data = { + .ref_clk = 38400, + .spi_bits_per_word = 16, + .reset = GPIO_RF_RESET, + .powerup = GPIO_RF_POWERUP, + .macaddr = wifi_mac_addr, + .sdd_file = "sdd_sagrad_1091_1098.bin", + }; + static struct spi_board_info myboard_spi_devices[] __initdata = { + { + .modalias = "cw1200_wlan_spi", + .max_speed_hz = 52000000, + .bus_num = 0, + .irq = WIFI_IRQ, + .platform_data = &cw1200_platform_data, + .chip_select = 0, + }, + }; + + */ + +/* An example of SDIO support in your board setup file: + + static struct cw1200_platform_data_sdio my_cw1200_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_myplatform.bin", + }; + cw1200_sdio_set_platform_data(&my_cw1200_platform_data); + + */ + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); + +#endif /* CW1200_PLAT_H_INCLUDED */ diff --git a/include/linux/sched.h b/include/linux/sched.h index caa8f4d0186b..178a8d909f14 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -593,6 +593,7 @@ struct signal_struct { #endif #ifdef CONFIG_AUDIT unsigned audit_tty; + unsigned audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; #endif #ifdef CONFIG_CGROUPS diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 3a7256955b10..f9f931c89e3e 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -172,6 +172,7 @@ #define SSB_SPROMSIZE_WORDS_R4 220 #define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) #define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R10 230 #define SSB_SPROM_BASE1 0x1000 #define SSB_SPROM_BASE31 0x0800 #define SSB_SPROM_REVISION 0x007E diff --git a/include/linux/tty.h b/include/linux/tty.h index 7e92bd86a808..8780bd2a272a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -575,8 +575,7 @@ extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); extern void tty_audit_push(struct tty_struct *tty); -extern int tty_audit_push_task(struct task_struct *tsk, - kuid_t loginuid, u32 sessionid); +extern int tty_audit_push_current(void); #else static inline void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, size_t size, unsigned icanon) @@ -594,8 +593,7 @@ static inline void tty_audit_fork(struct signal_struct *sig) static inline void tty_audit_push(struct tty_struct *tty) { } -static inline int tty_audit_push_task(struct task_struct *tsk, - kuid_t loginuid, u32 sessionid) +static inline int tty_audit_push_current(void) { return 0; } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 26b5b692c22b..e3a39fc9a296 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -188,6 +188,8 @@ struct ieee80211_channel { * when used with 802.11g (on the 2.4 GHz band); filled by the * core code when registering the wiphy. * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode. + * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode + * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode */ enum ieee80211_rate_flags { IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, @@ -195,6 +197,8 @@ enum ieee80211_rate_flags { IEEE80211_RATE_MANDATORY_B = 1<<2, IEEE80211_RATE_MANDATORY_G = 1<<3, IEEE80211_RATE_ERP_G = 1<<4, + IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5, + IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6, }; /** @@ -433,6 +437,30 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, u32 prohibited_flags); /** + * ieee80211_chandef_rate_flags - returns rate flags for a channel + * + * In some channel types, not all rates may be used - for example CCK + * rates may not be used in 5/10 MHz channels. + * + * @chandef: channel definition for the channel + * + * Returns: rate flags which apply for this channel + */ +static inline enum ieee80211_rate_flags +ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) +{ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + return IEEE80211_RATE_SUPPORTS_5MHZ; + case NL80211_CHAN_WIDTH_10: + return IEEE80211_RATE_SUPPORTS_10MHZ; + default: + break; + } + return 0; +} + +/** * enum survey_info_flags - survey information flags * * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in @@ -753,6 +781,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @STATION_INFO_LOCAL_PM: @local_pm filled * @STATION_INFO_PEER_PM: @peer_pm filled * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled + * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled + * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -781,6 +811,8 @@ enum station_info_flags { STATION_INFO_NONPEER_PM = 1<<23, STATION_INFO_RX_BYTES64 = 1<<24, STATION_INFO_TX_BYTES64 = 1<<25, + STATION_INFO_CHAIN_SIGNAL = 1<<26, + STATION_INFO_CHAIN_SIGNAL_AVG = 1<<27, }; /** @@ -857,6 +889,8 @@ struct sta_bss_parameters { u16 beacon_interval; }; +#define IEEE80211_MAX_CHAINS 4 + /** * struct station_info - station information * @@ -874,6 +908,9 @@ struct sta_bss_parameters { * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @signal_avg: Average signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. + * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg + * @chain_signal: per-chain signal strength of last received packet in dBm + * @chain_signal_avg: per-chain signal strength average in dBm * @txrate: current unicast bitrate from this station * @rxrate: current unicast bitrate to this station * @rx_packets: packets received from this station @@ -909,6 +946,11 @@ struct station_info { u8 plink_state; s8 signal; s8 signal_avg; + + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 chain_signal_avg[IEEE80211_MAX_CHAINS]; + struct rate_info txrate; struct rate_info rxrate; u32 rx_packets; @@ -947,6 +989,7 @@ struct station_info { * @MONITOR_FLAG_CONTROL: pass control frames * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering * @MONITOR_FLAG_COOK_FRAMES: report frames after processing + * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address */ enum monitor_flags { MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL, @@ -954,6 +997,7 @@ enum monitor_flags { MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL, MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS, MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES, + MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE, }; /** @@ -1108,6 +1152,9 @@ struct bss_parameters { * setting for new peer links. * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake * after transmitting its beacon. + * @plink_timeout: If no tx activity is seen from a STA we've established + * peering with for longer than this time (in seconds), then remove it + * from the STA's list of peers. Default is 30 minutes. */ struct mesh_config { u16 dot11MeshRetryTimeout; @@ -1137,6 +1184,7 @@ struct mesh_config { u16 dot11MeshHWMPconfirmationInterval; enum nl80211_mesh_power_mode power_mode; u16 dot11MeshAwakeWindowDuration; + u32 plink_timeout; }; /** @@ -1147,6 +1195,7 @@ struct mesh_config { * @sync_method: which synchronization method to use * @path_sel_proto: which path selection protocol to use * @path_metric: which metric to use + * @auth_id: which authentication method this mesh is using * @ie: vendor information elements (optional) * @ie_len: length of vendor information elements * @is_authenticated: this mesh requires authentication @@ -1155,6 +1204,7 @@ struct mesh_config { * @dtim_period: DTIM period to use * @beacon_interval: beacon interval to use * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] + * @basic_rates: basic rates to use when creating the mesh * * These parameters are fixed when the mesh is created. */ @@ -1165,6 +1215,7 @@ struct mesh_setup { u8 sync_method; u8 path_sel_proto; u8 path_metric; + u8 auth_id; const u8 *ie; u8 ie_len; bool is_authenticated; @@ -1173,6 +1224,7 @@ struct mesh_setup { u8 dtim_period; u16 beacon_interval; int mcast_rate[IEEE80211_NUM_BANDS]; + u32 basic_rates; }; /** @@ -1241,6 +1293,7 @@ struct cfg80211_ssid { * @scan_start: time (in jiffies) when the scan started * @wdev: the wireless device to scan for * @aborted: (internal) scan request was notified as aborted + * @notified: (internal) scan request was notified as done or aborted * @no_cck: used to send probe requests at non CCK rate in 2GHz band */ struct cfg80211_scan_request { @@ -1258,7 +1311,7 @@ struct cfg80211_scan_request { /* internal */ struct wiphy *wiphy; unsigned long scan_start; - bool aborted; + bool aborted, notified; bool no_cck; /* keep last */ @@ -1850,7 +1903,9 @@ struct cfg80211_update_ft_ies_params { * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters + * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network + * (invoked with the wireless_dev mutex held) * * @get_mesh_config: Get the current mesh configuration * @@ -1877,20 +1932,28 @@ struct cfg80211_update_ft_ies_params { * the scan/scan_done bracket too. * * @auth: Request to authenticate with the specified peer + * (invoked with the wireless_dev mutex held) * @assoc: Request to (re)associate with the specified peer + * (invoked with the wireless_dev mutex held) * @deauth: Request to deauthenticate from the specified peer + * (invoked with the wireless_dev mutex held) * @disassoc: Request to disassociate from the specified peer + * (invoked with the wireless_dev mutex held) * * @connect: Connect to the ESS with the specified parameters. When connected, * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS. * If the connection fails for some reason, call cfg80211_connect_result() * with the status from the AP. + * (invoked with the wireless_dev mutex held) * @disconnect: Disconnect from the BSS/ESS. + * (invoked with the wireless_dev mutex held) * * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call * cfg80211_ibss_joined(), also call that function when changing BSSID due * to a merge. + * (invoked with the wireless_dev mutex held) * @leave_ibss: Leave the IBSS. + * (invoked with the wireless_dev mutex held) * * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or * MESH mode) @@ -2307,6 +2370,7 @@ struct cfg80211_ops { * responds to probe-requests in hardware. * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX. * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call. + * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. */ enum wiphy_flags { WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), @@ -2330,6 +2394,7 @@ enum wiphy_flags { WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), WIPHY_FLAG_OFFCHAN_TX = BIT(20), WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), + WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), }; /** @@ -2556,6 +2621,9 @@ struct wiphy_wowlan_support { * may request, if implemented. * * @wowlan: WoWLAN support information + * @wowlan_config: current WoWLAN configuration; this should usually not be + * used since access to it is necessarily racy, use the parameter passed + * to the suspend() operation instead. * * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. @@ -2622,7 +2690,8 @@ struct wiphy { u32 hw_version; #ifdef CONFIG_PM - struct wiphy_wowlan_support wowlan; + const struct wiphy_wowlan_support *wowlan; + struct cfg80211_wowlan *wowlan_config; #endif u16 max_remain_on_channel_duration; @@ -2820,7 +2889,7 @@ struct cfg80211_cached_keys; * @current_bss: (private) Used by the internal configuration code * @channel: (private) Used by the internal configuration code to track * the user-set AP, monitor and WDS channel - * @preset_chan: (private) Used by the internal configuration code to + * @preset_chandef: (private) Used by the internal configuration code to * track the channel to be used for AP later * @bssid: (private) Used by the internal configuration code * @ssid: (private) Used by the internal configuration code @@ -2834,14 +2903,23 @@ struct cfg80211_cached_keys; * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_lock: lock for the list - * @mtx: mutex used to lock data in this struct - * @cleanup_work: work struct used for cleanup that can't be done directly + * @mtx: mutex used to lock data in this struct, may be used by drivers + * and some API functions require it held * @beacon_interval: beacon interval used on this device for transmitting * beacons, 0 when not valid * @address: The address for this device, valid only if @netdev is %NULL * @p2p_started: true if this is a P2P Device that has been started * @cac_started: true if DFS channel availability check has been started * @cac_start_time: timestamp (jiffies) when the dfs state was entered. + * @ps: powersave mode is enabled + * @ps_timeout: dynamic powersave timeout + * @ap_unexpected_nlportid: (private) netlink port ID of application + * registered for unexpected class 3 frames (AP mode) + * @conn: (private) cfg80211 software SME connection state machine data + * @connect_keys: (private) keys to set after connection is established + * @ibss_fixed: (private) IBSS is using fixed BSSID + * @event_list: (private) list for internal event processing + * @event_lock: (private) lock for event list */ struct wireless_dev { struct wiphy *wiphy; @@ -2858,8 +2936,6 @@ struct wireless_dev { struct mutex mtx; - struct work_struct cleanup_work; - bool use_4addr, p2p_started; u8 address[ETH_ALEN] __aligned(sizeof(u16)); @@ -2867,11 +2943,6 @@ struct wireless_dev { /* currently used for IBSS and SME - might be rearranged later */ u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len, mesh_id_len, mesh_id_up_len; - enum { - CFG80211_SME_IDLE, - CFG80211_SME_CONNECTING, - CFG80211_SME_CONNECTED, - } sme_state; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; @@ -2989,6 +3060,15 @@ struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate); +/** + * ieee80211_mandatory_rates - get mandatory rates for a given band + * @sband: the band to look for rates in + * + * This function returns a bitmap of the mandatory rates for the given + * band, bits are set according to the rate position in the bitrates array. + */ +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband); + /* * Radiotap parsing functions -- for controlled injection support * @@ -3392,122 +3472,87 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); /** - * cfg80211_send_rx_auth - notification of processed authentication + * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame * @dev: network device * @buf: authentication frame (header + body) * @len: length of the frame data * - * This function is called whenever an authentication has been processed in - * station mode. The driver is required to call either this function or - * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth() - * call. This function may sleep. + * This function is called whenever an authentication, disassociation or + * deauthentication frame has been received and processed in station mode. + * After being asked to authenticate via cfg80211_ops::auth() the driver must + * call either this function or cfg80211_auth_timeout(). + * After being asked to associate via cfg80211_ops::assoc() the driver must + * call either this function or cfg80211_auth_timeout(). + * While connected, the driver must calls this for received and processed + * disassociation and deauthentication frames. If the frame couldn't be used + * because it was unprotected, the driver must call the function + * cfg80211_rx_unprot_mlme_mgmt() instead. + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len); +void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** - * cfg80211_send_auth_timeout - notification of timed out authentication + * cfg80211_auth_timeout - notification of timed out authentication * @dev: network device * @addr: The MAC address of the device with which the authentication timed out * - * This function may sleep. + * This function may sleep. The caller must hold the corresponding wdev's + * mutex. */ -void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr); +void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** - * cfg80211_send_rx_assoc - notification of processed association + * cfg80211_rx_assoc_resp - notification of processed association response * @dev: network device - * @bss: the BSS struct association was requested for, the struct reference - * is owned by cfg80211 after this call - * @buf: (re)association response frame (header + body) + * @bss: the BSS that association was requested with, ownership of the pointer + * moves to cfg80211 in this call + * @buf: authentication frame (header + body) * @len: length of the frame data * - * This function is called whenever a (re)association response has been - * processed in station mode. The driver is required to call either this - * function or cfg80211_send_assoc_timeout() to indicate the result of - * cfg80211_ops::assoc() call. This function may sleep. + * After being asked to associate via cfg80211_ops::assoc() the driver must + * call either this function or cfg80211_auth_timeout(). + * + * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, +void cfg80211_rx_assoc_resp(struct net_device *dev, + struct cfg80211_bss *bss, const u8 *buf, size_t len); /** - * cfg80211_send_assoc_timeout - notification of timed out association + * cfg80211_assoc_timeout - notification of timed out association * @dev: network device * @addr: The MAC address of the device with which the association timed out * - * This function may sleep. + * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr); +void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr); /** - * cfg80211_send_deauth - notification of processed deauthentication + * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame * @dev: network device - * @buf: deauthentication frame (header + body) + * @buf: 802.11 frame (header + body) * @len: length of the frame data * * This function is called whenever deauthentication has been processed in * station mode. This includes both received deauthentication frames and - * locally generated ones. This function may sleep. - */ -void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); - -/** - * __cfg80211_send_deauth - notification of processed deauthentication - * @dev: network device - * @buf: deauthentication frame (header + body) - * @len: length of the frame data - * - * Like cfg80211_send_deauth(), but doesn't take the wdev lock. - */ -void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len); - -/** - * cfg80211_send_disassoc - notification of processed disassociation - * @dev: network device - * @buf: disassociation response frame (header + body) - * @len: length of the frame data - * - * This function is called whenever disassociation has been processed in - * station mode. This includes both received disassociation frames and locally - * generated ones. This function may sleep. - */ -void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len); - -/** - * __cfg80211_send_disassoc - notification of processed disassociation - * @dev: network device - * @buf: disassociation response frame (header + body) - * @len: length of the frame data - * - * Like cfg80211_send_disassoc(), but doesn't take the wdev lock. + * locally generated ones. This function may sleep. The caller must hold the + * corresponding wdev's mutex. */ -void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, - size_t len); +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** - * cfg80211_send_unprot_deauth - notification of unprotected deauthentication + * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame * @dev: network device * @buf: deauthentication frame (header + body) * @len: length of the frame data * - * This function is called whenever a received Deauthentication frame has been - * dropped in station mode because of MFP being used but the Deauthentication + * This function is called whenever a received deauthentication or dissassoc + * frame has been dropped in station mode because of MFP being used but the * frame was not protected. This function may sleep. */ -void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, - size_t len); - -/** - * cfg80211_send_unprot_disassoc - notification of unprotected disassociation - * @dev: network device - * @buf: disassociation frame (header + body) - * @len: length of the frame data - * - * This function is called whenever a received Disassociation frame has been - * dropped in station mode because of MFP being used but the Disassociation - * frame was not protected. This function may sleep. - */ -void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, - size_t len); +void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, + const u8 *buf, size_t len); /** * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP) @@ -4153,6 +4198,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver. * * @wdev: the wireless device for which critical protocol is stopped. + * @gfp: allocation flags * * This function can be called by the driver to indicate it has reverted * operation back to normal. One reason could be that the duration given diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index c3999632e616..c6d07cb074bc 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -269,6 +269,7 @@ enum ieee80211_radiotap_type { #define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04 #define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08 #define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10 +#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20 #define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03 #define IEEE80211_RADIOTAP_MCS_BW_20 0 @@ -278,6 +279,12 @@ enum ieee80211_radiotap_type { #define IEEE80211_RADIOTAP_MCS_SGI 0x04 #define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10 +#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60 +#define IEEE80211_RADIOTAP_MCS_STBC_1 1 +#define IEEE80211_RADIOTAP_MCS_STBC_2 2 +#define IEEE80211_RADIOTAP_MCS_STBC_3 3 + +#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5 /* For IEEE80211_RADIOTAP_AMPDU_STATUS */ #define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN 0x0001 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 885898a40d13..5b7a3dadadde 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -217,8 +217,8 @@ struct ieee80211_chanctx_conf { * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS) * changed (currently only in P2P client mode, GO mode will be later) - * @BSS_CHANGED_DTIM_PERIOD: the DTIM period value was changed (set when - * it becomes valid, managed mode only) + * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available: + * currently dtim_period only is under consideration. * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, * note that this is only called when it changes after the channel * context had been assigned. @@ -244,7 +244,7 @@ enum ieee80211_bss_change { BSS_CHANGED_PS = 1<<17, BSS_CHANGED_TXPOWER = 1<<18, BSS_CHANGED_P2P_PS = 1<<19, - BSS_CHANGED_DTIM_PERIOD = 1<<20, + BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, /* when adding here, make sure to change ieee80211_reconfig */ @@ -288,7 +288,7 @@ enum ieee80211_rssi_event { * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag * @dtim_period: num of beacons before the next DTIM, for beaconing, * valid in station mode only if after the driver was notified - * with the %BSS_CHANGED_DTIM_PERIOD flag, will be non-zero then. + * with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then. * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old * as it may have been received during scanning long ago). If the * HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can @@ -305,6 +305,7 @@ enum ieee80211_rssi_event { * @basic_rates: bitmap of basic rates, each bit stands for an * index into the rate table configured by the driver in * the current band. + * @beacon_rate: associated AP's beacon TX rate * @mcast_rate: per-band multicast rate index + 1 (0: disabled) * @bssid: The BSSID for this BSS * @enable_beacon: whether beaconing should be enabled or not @@ -352,6 +353,7 @@ struct ieee80211_bss_conf { u32 sync_device_ts; u8 sync_dtim_count; u32 basic_rates; + struct ieee80211_rate *beacon_rate; int mcast_rate[IEEE80211_NUM_BANDS]; u16 ht_operation_mode; s32 cqm_rssi_thold; @@ -460,6 +462,8 @@ struct ieee80211_bss_conf { * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it * would be fragmented by size (this is optional, only used for * monitor injection). + * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll + * frame (PS-Poll or uAPSD). * * Note: If you have to add new flags to the enumeration, then don't * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. @@ -495,6 +499,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_STATUS_EOSP = BIT(28), IEEE80211_TX_CTL_USE_MINRATE = BIT(29), IEEE80211_TX_CTL_DONTFRAG = BIT(30), + IEEE80211_TX_CTL_PS_RESPONSE = BIT(31), }; #define IEEE80211_TX_CTL_STBC_SHIFT 23 @@ -805,6 +810,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * on this subframe * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC * is stored in the @ampdu_delimiter_crc field) + * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3 */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -832,8 +838,11 @@ enum mac80211_rx_flags { RX_FLAG_80MHZ = BIT(23), RX_FLAG_80P80MHZ = BIT(24), RX_FLAG_160MHZ = BIT(25), + RX_FLAG_STBC_MASK = BIT(26) | BIT(27), }; +#define RX_FLAG_STBC_SHIFT 26 + /** * struct ieee80211_rx_status - receive status * @@ -850,6 +859,10 @@ enum mac80211_rx_flags { * @signal: signal strength when receiving this frame, either in dBm, in dB or * unspecified depending on the hardware capabilities flags * @IEEE80211_HW_SIGNAL_* + * @chains: bitmask of receive chains for which separate signal strength + * values were filled. + * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't + * support dB or unspecified units) * @antenna: antenna used * @rate_idx: index of data rate into band's supported rates or MCS index if * HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT) @@ -881,6 +894,8 @@ struct ieee80211_rx_status { u8 band; u8 antenna; s8 signal; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 ampdu_delimiter_crc; u8 vendor_radiotap_align; u8 vendor_radiotap_oui[3]; @@ -1235,7 +1250,7 @@ enum ieee80211_sta_rx_bandwidth { * struct ieee80211_sta_rates - station rate selection table * * @rcu_head: RCU head used for freeing the table on update - * @rates: transmit rates/flags to be used by default. + * @rate: transmit rates/flags to be used by default. * Overriding entries per-packet is possible by using cb tx control. */ struct ieee80211_sta_rates { @@ -1276,7 +1291,7 @@ struct ieee80211_sta_rates { * notifications and capabilities. The value is only valid after * the station moves to associated state. * @smps_mode: current SMPS mode (off, static or dynamic) - * @tx_rates: rate control selection table + * @rates: rate control selection table */ struct ieee80211_sta { u32 supp_rates[IEEE80211_NUM_BANDS]; diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index ef937b56f9b5..e2c1e66d58ae 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -118,7 +118,7 @@ struct ex_phy { enum ex_phy_state phy_state; - enum sas_dev_type attached_dev_type; + enum sas_device_type attached_dev_type; enum sas_linkrate linkrate; u8 attached_sata_host:1; @@ -195,7 +195,7 @@ enum { struct domain_device { spinlock_t done_lock; - enum sas_dev_type dev_type; + enum sas_device_type dev_type; enum sas_linkrate linkrate; enum sas_linkrate min_linkrate; diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h index a6026da25f3e..25ac6283b9c7 100644 --- a/include/scsi/osd_protocol.h +++ b/include/scsi/osd_protocol.h @@ -107,7 +107,7 @@ enum osd_attributes_mode { * int exponent: 04; * } */ -typedef __be32 __bitwise osd_cdb_offset; +typedef __be32 osd_cdb_offset; enum { OSD_OFFSET_UNUSED = 0xFFFFFFFF, diff --git a/include/scsi/sas.h b/include/scsi/sas.h index be3eb0bf1ac0..0d2607d12387 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -90,16 +90,18 @@ enum sas_oob_mode { }; /* See sas_discover.c if you plan on changing these */ -enum sas_dev_type { - NO_DEVICE = 0, /* protocol */ - SAS_END_DEV = 1, /* protocol */ - EDGE_DEV = 2, /* protocol */ - FANOUT_DEV = 3, /* protocol */ - SAS_HA = 4, - SATA_DEV = 5, - SATA_PM = 7, - SATA_PM_PORT= 8, - SATA_PENDING = 9, +enum sas_device_type { + /* these are SAS protocol defined (attached device type field) */ + SAS_PHY_UNUSED = 0, + SAS_END_DEVICE = 1, + SAS_EDGE_EXPANDER_DEVICE = 2, + SAS_FANOUT_EXPANDER_DEVICE = 3, + /* these are internal to libsas */ + SAS_HA = 4, + SAS_SATA_DEV = 5, + SAS_SATA_PM = 7, + SAS_SATA_PM_PORT = 8, + SAS_SATA_PENDING = 9, }; enum sas_protocol { diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index ff71a5654684..00f41aeeecf5 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -32,8 +32,8 @@ static inline int dev_is_sata(struct domain_device *dev) { - return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || - dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING; + return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING; } int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index a7f9cba275e9..cc645876d147 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -394,10 +394,18 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, int timeout, int retries, int flag, int *resid); -extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, - int data_direction, void *buffer, unsigned bufflen, - struct scsi_sense_hdr *, int timeout, int retries, - int *resid); +extern int scsi_execute_req_flags(struct scsi_device *sdev, + const unsigned char *cmd, int data_direction, void *buffer, + unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, + int retries, int *resid, int flags); +static inline int scsi_execute_req(struct scsi_device *sdev, + const unsigned char *cmd, int data_direction, void *buffer, + unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, + int retries, int *resid) +{ + return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, + bufflen, sshdr, timeout, retries, resid, 0); +} extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 4a58cca2ecc1..d0f1602985e7 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess); extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost); extern int iscsi_flashnode_bus_match(struct device *dev, struct device_driver *drv); -extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data); - extern struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, int (*fn)(struct device *dev, void *data)); - extern struct device * -iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, - void *data, - int (*fn)(struct device *dev, void *data)); +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess); + #endif diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 9b8e08879cfc..0bd71e2702e3 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -10,13 +10,6 @@ struct scsi_transport_template; struct sas_rphy; struct request; -enum sas_device_type { - SAS_PHY_UNUSED = 0, - SAS_END_DEVICE = 1, - SAS_EDGE_EXPANDER_DEVICE = 2, - SAS_FANOUT_EXPANDER_DEVICE = 3, -}; - static inline int sas_protocol_ata(enum sas_protocol proto) { return ((proto & SAS_PROTOCOL_SATA) || diff --git a/include/sound/tlv.h b/include/sound/tlv.h index 28c65e1ada21..e11e179420a1 100644 --- a/include/sound/tlv.h +++ b/include/sound/tlv.h @@ -74,7 +74,11 @@ #define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB) \ unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) } -/* dB range container */ +/* dB range container: + * Items in dB range container must be ordered by their values and by their + * dB values. This implies that larger values must correspond with larger + * dB values (which is also required for all other mixer controls). + */ /* Each item is: <min> <max> <TLV> */ #define TLV_DB_RANGE_ITEM(...) \ TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__) diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 9f096f1c0907..75cef3fd97ad 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -246,6 +246,7 @@ #define AUDIT_OBJ_TYPE 21 #define AUDIT_OBJ_LEV_LOW 22 #define AUDIT_OBJ_LEV_HIGH 23 +#define AUDIT_LOGINUID_SET 24 /* These are ONLY useful when checking * at syscall exit time (AUDIT_AT_EXIT). */ @@ -369,7 +370,8 @@ struct audit_status { }; struct audit_tty_status { - __u32 enabled; /* 1 = enabled, 0 = disabled */ + __u32 enabled; /* 1 = enabled, 0 = disabled */ + __u32 log_passwd; /* 1 = enabled, 0 = disabled */ }; /* audit_rule_data supports filter rules with both integer and string diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d1e48b5e348f..861e5eba3953 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -27,6 +27,8 @@ #include <linux/types.h> +#define NL80211_GENL_NAME "nl80211" + /** * DOC: Station handling * @@ -1429,6 +1431,11 @@ enum nl80211_commands { * @NL80211_ATTR_MAX_CRIT_PROT_DURATION: duration in milliseconds in which * the connection should have increased reliability (u16). * + * @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16). + * This is similar to @NL80211_ATTR_STA_AID but with a difference of being + * allowed to be used with the first @NL80211_CMD_SET_STATION command to + * update a TDLS peer STA entry. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1727,6 +1734,8 @@ enum nl80211_attrs { NL80211_ATTR_CRIT_PROT_ID, NL80211_ATTR_MAX_CRIT_PROT_DURATION, + NL80211_ATTR_PEER_AID, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -1991,6 +2000,10 @@ enum nl80211_sta_bss_param { * @NL80211_STA_INFO_PEER_PM: peer mesh STA link-specific power mode * @NL80211_STA_INFO_NONPEER_PM: neighbor mesh STA power save mode towards * non-peer STA + * @NL80211_STA_INFO_CHAIN_SIGNAL: per-chain signal strength of last PPDU + * Contains a nested array of signal strength attributes (u8, dBm) + * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average + * Same format as NL80211_STA_INFO_CHAIN_SIGNAL. * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -2020,6 +2033,8 @@ enum nl80211_sta_info { NL80211_STA_INFO_NONPEER_PM, NL80211_STA_INFO_RX_BYTES64, NL80211_STA_INFO_TX_BYTES64, + NL80211_STA_INFO_CHAIN_SIGNAL, + NL80211_STA_INFO_CHAIN_SIGNAL_AVG, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -2413,6 +2428,8 @@ enum nl80211_survey_info { * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing. * overrides all other flags. + * @NL80211_MNTR_FLAG_ACTIVE: use the configured MAC address + * and ACK incoming unicast packets. * * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag @@ -2424,6 +2441,7 @@ enum nl80211_mntr_flags { NL80211_MNTR_FLAG_CONTROL, NL80211_MNTR_FLAG_OTHER_BSS, NL80211_MNTR_FLAG_COOK_FRAMES, + NL80211_MNTR_FLAG_ACTIVE, /* keep last */ __NL80211_MNTR_FLAG_AFTER_LAST, @@ -2559,6 +2577,10 @@ enum nl80211_mesh_power_mode { * * @NL80211_MESHCONF_AWAKE_WINDOW: awake window duration (in TUs) * + * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've + * established peering with for longer than this time (in seconds), then + * remove it from the STA's list of peers. Default is 30 minutes. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -2590,6 +2612,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, NL80211_MESHCONF_POWER_MODE, NL80211_MESHCONF_AWAKE_WINDOW, + NL80211_MESHCONF_PLINK_TIMEOUT, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, @@ -2637,6 +2660,10 @@ enum nl80211_meshconf_params { * @NL80211_MESH_SETUP_USERSPACE_MPM: Enable this option if userspace will * implement an MPM which handles peer allocation and state. * + * @NL80211_MESH_SETUP_AUTH_PROTOCOL: Inform the kernel of the authentication + * method (u8, as defined in IEEE 8.4.2.100.6, e.g. 0x1 for SAE). + * Default is no authentication method required. + * * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number * * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use @@ -2650,6 +2677,7 @@ enum nl80211_mesh_setup_params { NL80211_MESH_SETUP_USERSPACE_AMPE, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, NL80211_MESH_SETUP_USERSPACE_MPM, + NL80211_MESH_SETUP_AUTH_PROTOCOL, /* keep last */ __NL80211_MESH_SETUP_ATTR_AFTER_LAST, @@ -2730,6 +2758,8 @@ enum nl80211_channel_type { * and %NL80211_ATTR_CENTER_FREQ2 attributes must be provided as well * @NL80211_CHAN_WIDTH_160: 160 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 * attribute must be provided as well + * @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel + * @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, @@ -2738,6 +2768,8 @@ enum nl80211_chan_width { NL80211_CHAN_WIDTH_80, NL80211_CHAN_WIDTH_80P80, NL80211_CHAN_WIDTH_160, + NL80211_CHAN_WIDTH_5, + NL80211_CHAN_WIDTH_10, }; /** @@ -3556,6 +3588,10 @@ enum nl80211_ap_sme_features { * Peering Management entity which may be implemented by registering for * beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is * still generated by the driver. + * @NL80211_FEATURE_ACTIVE_MONITOR: This driver supports an active monitor + * interface. An active monitor interface behaves like a normal monitor + * interface, but gets added to the driver. It ensures that incoming + * unicast packets directed at the configured interface address get ACKed. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, @@ -3575,6 +3611,7 @@ enum nl80211_feature_flags { NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 1 << 14, NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15, NL80211_FEATURE_USERSPACE_MPM = 1 << 16, + NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, }; /** diff --git a/kernel/audit.c b/kernel/audit.c index 0b084fa44b1f..21c7fa615bd3 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -49,6 +49,8 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/kthread.h> +#include <linux/kernel.h> +#include <linux/syscalls.h> #include <linux/audit.h> @@ -265,7 +267,6 @@ void audit_log_lost(const char *message) } static int audit_log_config_change(char *function_name, int new, int old, - kuid_t loginuid, u32 sessionid, u32 sid, int allow_changes) { struct audit_buffer *ab; @@ -274,29 +275,17 @@ static int audit_log_config_change(char *function_name, int new, int old, ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); if (unlikely(!ab)) return rc; - audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, - old, from_kuid(&init_user_ns, loginuid), sessionid); - if (sid) { - char *ctx = NULL; - u32 len; - - rc = security_secid_to_secctx(sid, &ctx, &len); - if (rc) { - audit_log_format(ab, " sid=%u", sid); - allow_changes = 0; /* Something weird, deny request */ - } else { - audit_log_format(ab, " subj=%s", ctx); - security_release_secctx(ctx, len); - } - } + audit_log_format(ab, "%s=%d old=%d", function_name, new, old); + audit_log_session_info(ab); + rc = audit_log_task_context(ab); + if (rc) + allow_changes = 0; /* Something weird, deny request */ audit_log_format(ab, " res=%d", allow_changes); audit_log_end(ab); return rc; } -static int audit_do_config_change(char *function_name, int *to_change, - int new, kuid_t loginuid, u32 sessionid, - u32 sid) +static int audit_do_config_change(char *function_name, int *to_change, int new) { int allow_changes, rc = 0, old = *to_change; @@ -307,8 +296,7 @@ static int audit_do_config_change(char *function_name, int *to_change, allow_changes = 1; if (audit_enabled != AUDIT_OFF) { - rc = audit_log_config_change(function_name, new, old, loginuid, - sessionid, sid, allow_changes); + rc = audit_log_config_change(function_name, new, old, allow_changes); if (rc) allow_changes = 0; } @@ -322,44 +310,37 @@ static int audit_do_config_change(char *function_name, int *to_change, return rc; } -static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid, - u32 sid) +static int audit_set_rate_limit(int limit) { - return audit_do_config_change("audit_rate_limit", &audit_rate_limit, - limit, loginuid, sessionid, sid); + return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit); } -static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid, - u32 sid) +static int audit_set_backlog_limit(int limit) { - return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, - limit, loginuid, sessionid, sid); + return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit); } -static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid) +static int audit_set_enabled(int state) { int rc; if (state < AUDIT_OFF || state > AUDIT_LOCKED) return -EINVAL; - rc = audit_do_config_change("audit_enabled", &audit_enabled, state, - loginuid, sessionid, sid); - + rc = audit_do_config_change("audit_enabled", &audit_enabled, state); if (!rc) audit_ever_enabled |= !!state; return rc; } -static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid) +static int audit_set_failure(int state) { if (state != AUDIT_FAIL_SILENT && state != AUDIT_FAIL_PRINTK && state != AUDIT_FAIL_PANIC) return -EINVAL; - return audit_do_config_change("audit_failure", &audit_failure, state, - loginuid, sessionid, sid); + return audit_do_config_change("audit_failure", &audit_failure, state); } /* @@ -417,34 +398,53 @@ static void kauditd_send_skb(struct sk_buff *skb) consume_skb(skb); } -static int kauditd_thread(void *dummy) +/* + * flush_hold_queue - empty the hold queue if auditd appears + * + * If auditd just started, drain the queue of messages already + * sent to syslog/printk. Remember loss here is ok. We already + * called audit_log_lost() if it didn't go out normally. so the + * race between the skb_dequeue and the next check for audit_pid + * doesn't matter. + * + * If you ever find kauditd to be too slow we can get a perf win + * by doing our own locking and keeping better track if there + * are messages in this queue. I don't see the need now, but + * in 5 years when I want to play with this again I'll see this + * note and still have no friggin idea what i'm thinking today. + */ +static void flush_hold_queue(void) { struct sk_buff *skb; + if (!audit_default || !audit_pid) + return; + + skb = skb_dequeue(&audit_skb_hold_queue); + if (likely(!skb)) + return; + + while (skb && audit_pid) { + kauditd_send_skb(skb); + skb = skb_dequeue(&audit_skb_hold_queue); + } + + /* + * if auditd just disappeared but we + * dequeued an skb we need to drop ref + */ + if (skb) + consume_skb(skb); +} + +static int kauditd_thread(void *dummy) +{ set_freezable(); while (!kthread_should_stop()) { - /* - * if auditd just started drain the queue of messages already - * sent to syslog/printk. remember loss here is ok. we already - * called audit_log_lost() if it didn't go out normally. so the - * race between the skb_dequeue and the next check for audit_pid - * doesn't matter. - * - * if you ever find kauditd to be too slow we can get a perf win - * by doing our own locking and keeping better track if there - * are messages in this queue. I don't see the need now, but - * in 5 years when I want to play with this again I'll see this - * note and still have no friggin idea what i'm thinking today. - */ - if (audit_default && audit_pid) { - skb = skb_dequeue(&audit_skb_hold_queue); - if (unlikely(skb)) { - while (skb && audit_pid) { - kauditd_send_skb(skb); - skb = skb_dequeue(&audit_skb_hold_queue); - } - } - } + struct sk_buff *skb; + DECLARE_WAITQUEUE(wait, current); + + flush_hold_queue(); skb = skb_dequeue(&audit_skb_queue); wake_up(&audit_backlog_wait); @@ -453,19 +453,18 @@ static int kauditd_thread(void *dummy) kauditd_send_skb(skb); else audit_printk_skb(skb); - } else { - DECLARE_WAITQUEUE(wait, current); - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&kauditd_wait, &wait); - - if (!skb_queue_len(&audit_skb_queue)) { - try_to_freeze(); - schedule(); - } + continue; + } + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kauditd_wait, &wait); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&kauditd_wait, &wait); + if (!skb_queue_len(&audit_skb_queue)) { + try_to_freeze(); + schedule(); } + + __set_current_state(TASK_RUNNING); + remove_wait_queue(&kauditd_wait, &wait); } return 0; } @@ -579,13 +578,14 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) return -EPERM; switch (msg_type) { - case AUDIT_GET: case AUDIT_LIST: - case AUDIT_LIST_RULES: - case AUDIT_SET: case AUDIT_ADD: - case AUDIT_ADD_RULE: case AUDIT_DEL: + return -EOPNOTSUPP; + case AUDIT_GET: + case AUDIT_SET: + case AUDIT_LIST_RULES: + case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: case AUDIT_SIGNAL_INFO: case AUDIT_TTY_GET: @@ -608,12 +608,10 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) return err; } -static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, - kuid_t auid, u32 ses, u32 sid) +static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) { int rc = 0; - char *ctx = NULL; - u32 len; + uid_t uid = from_kuid(&init_user_ns, current_uid()); if (!audit_enabled) { *ab = NULL; @@ -623,33 +621,21 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type, *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); if (unlikely(!*ab)) return rc; - audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u", - task_tgid_vnr(current), - from_kuid(&init_user_ns, current_uid()), - from_kuid(&init_user_ns, auid), ses); - if (sid) { - rc = security_secid_to_secctx(sid, &ctx, &len); - if (rc) - audit_log_format(*ab, " ssid=%u", sid); - else { - audit_log_format(*ab, " subj=%s", ctx); - security_release_secctx(ctx, len); - } - } + audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid); + audit_log_session_info(*ab); + audit_log_task_context(*ab); return rc; } static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { - u32 seq, sid; + u32 seq; void *data; struct audit_status *status_get, status_set; int err; struct audit_buffer *ab; u16 msg_type = nlh->nlmsg_type; - kuid_t loginuid; /* loginuid of sender */ - u32 sessionid; struct audit_sig_info *sig_data; char *ctx = NULL; u32 len; @@ -668,9 +654,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return err; } } - loginuid = audit_get_loginuid(current); - sessionid = audit_get_sessionid(current); - security_task_getsecid(current, &sid); seq = nlh->nlmsg_seq; data = nlmsg_data(nlh); @@ -691,14 +674,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; status_get = (struct audit_status *)data; if (status_get->mask & AUDIT_STATUS_ENABLED) { - err = audit_set_enabled(status_get->enabled, - loginuid, sessionid, sid); + err = audit_set_enabled(status_get->enabled); if (err < 0) return err; } if (status_get->mask & AUDIT_STATUS_FAILURE) { - err = audit_set_failure(status_get->failure, - loginuid, sessionid, sid); + err = audit_set_failure(status_get->failure); if (err < 0) return err; } @@ -706,22 +687,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) int new_pid = status_get->pid; if (audit_enabled != AUDIT_OFF) - audit_log_config_change("audit_pid", new_pid, - audit_pid, loginuid, - sessionid, sid, 1); - + audit_log_config_change("audit_pid", new_pid, audit_pid, 1); audit_pid = new_pid; audit_nlk_portid = NETLINK_CB(skb).portid; } if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { - err = audit_set_rate_limit(status_get->rate_limit, - loginuid, sessionid, sid); + err = audit_set_rate_limit(status_get->rate_limit); if (err < 0) return err; } if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) - err = audit_set_backlog_limit(status_get->backlog_limit, - loginuid, sessionid, sid); + err = audit_set_backlog_limit(status_get->backlog_limit); break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: @@ -729,25 +705,22 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; - err = audit_filter_user(); + err = audit_filter_user(msg_type); if (err == 1) { err = 0; if (msg_type == AUDIT_USER_TTY) { - err = tty_audit_push_task(current, loginuid, - sessionid); + err = tty_audit_push_current(); if (err) break; } - audit_log_common_recv_msg(&ab, msg_type, - loginuid, sessionid, sid); - + audit_log_common_recv_msg(&ab, msg_type); if (msg_type != AUDIT_USER_TTY) audit_log_format(ab, " msg='%.1024s'", (char *)data); else { int size; - audit_log_format(ab, " msg="); + audit_log_format(ab, " data="); size = nlmsg_len(nlh); if (size > 0 && ((unsigned char *)data)[size - 1] == '\0') @@ -758,50 +731,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) audit_log_end(ab); } break; - case AUDIT_ADD: - case AUDIT_DEL: - if (nlmsg_len(nlh) < sizeof(struct audit_rule)) - return -EINVAL; - if (audit_enabled == AUDIT_LOCKED) { - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, - loginuid, sessionid, sid); - - audit_log_format(ab, " audit_enabled=%d res=0", - audit_enabled); - audit_log_end(ab); - return -EPERM; - } - /* fallthrough */ - case AUDIT_LIST: - err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid, - seq, data, nlmsg_len(nlh), - loginuid, sessionid, sid); - break; case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: if (nlmsg_len(nlh) < sizeof(struct audit_rule_data)) return -EINVAL; if (audit_enabled == AUDIT_LOCKED) { - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, - loginuid, sessionid, sid); - - audit_log_format(ab, " audit_enabled=%d res=0", - audit_enabled); + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE); + audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled); audit_log_end(ab); return -EPERM; } /* fallthrough */ case AUDIT_LIST_RULES: err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid, - seq, data, nlmsg_len(nlh), - loginuid, sessionid, sid); + seq, data, nlmsg_len(nlh)); break; case AUDIT_TRIM: audit_trim_trees(); - - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, - loginuid, sessionid, sid); - + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE); audit_log_format(ab, " op=trim res=1"); audit_log_end(ab); break; @@ -831,8 +778,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) /* OK, here comes... */ err = audit_tag_tree(old, new); - audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, - loginuid, sessionid, sid); + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE); audit_log_format(ab, " op=make_equiv old="); audit_log_untrustedstring(ab, old); @@ -871,27 +817,30 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct audit_tty_status s; struct task_struct *tsk = current; - spin_lock_irq(&tsk->sighand->siglock); + spin_lock(&tsk->sighand->siglock); s.enabled = tsk->signal->audit_tty != 0; - spin_unlock_irq(&tsk->sighand->siglock); + s.log_passwd = tsk->signal->audit_tty_log_passwd; + spin_unlock(&tsk->sighand->siglock); audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_TTY_SET: { - struct audit_tty_status *s; + struct audit_tty_status s; struct task_struct *tsk = current; - if (nlh->nlmsg_len < sizeof(struct audit_tty_status)) - return -EINVAL; - s = data; - if (s->enabled != 0 && s->enabled != 1) + memset(&s, 0, sizeof(s)); + /* guard against past and future API changes */ + memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len)); + if ((s.enabled != 0 && s.enabled != 1) || + (s.log_passwd != 0 && s.log_passwd != 1)) return -EINVAL; - spin_lock_irq(&tsk->sighand->siglock); - tsk->signal->audit_tty = s->enabled != 0; - spin_unlock_irq(&tsk->sighand->siglock); + spin_lock(&tsk->sighand->siglock); + tsk->signal->audit_tty = s.enabled; + tsk->signal->audit_tty_log_passwd = s.log_passwd; + spin_unlock(&tsk->sighand->siglock); break; } default: @@ -1434,6 +1383,14 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, kfree(pathname); } +void audit_log_session_info(struct audit_buffer *ab) +{ + u32 sessionid = audit_get_sessionid(current); + uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); + + audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid); +} + void audit_log_key(struct audit_buffer *ab, char *key) { audit_log_format(ab, " key="); @@ -1443,6 +1400,224 @@ void audit_log_key(struct audit_buffer *ab, char *key) audit_log_format(ab, "(null)"); } +void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap) +{ + int i; + + audit_log_format(ab, " %s=", prefix); + CAP_FOR_EACH_U32(i) { + audit_log_format(ab, "%08x", + cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); + } +} + +void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) +{ + kernel_cap_t *perm = &name->fcap.permitted; + kernel_cap_t *inh = &name->fcap.inheritable; + int log = 0; + + if (!cap_isclear(*perm)) { + audit_log_cap(ab, "cap_fp", perm); + log = 1; + } + if (!cap_isclear(*inh)) { + audit_log_cap(ab, "cap_fi", inh); + log = 1; + } + + if (log) + audit_log_format(ab, " cap_fe=%d cap_fver=%x", + name->fcap.fE, name->fcap_ver); +} + +static inline int audit_copy_fcaps(struct audit_names *name, + const struct dentry *dentry) +{ + struct cpu_vfs_cap_data caps; + int rc; + + if (!dentry) + return 0; + + rc = get_vfs_caps_from_disk(dentry, &caps); + if (rc) + return rc; + + name->fcap.permitted = caps.permitted; + name->fcap.inheritable = caps.inheritable; + name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); + name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> + VFS_CAP_REVISION_SHIFT; + + return 0; +} + +/* Copy inode data into an audit_names. */ +void audit_copy_inode(struct audit_names *name, const struct dentry *dentry, + const struct inode *inode) +{ + name->ino = inode->i_ino; + name->dev = inode->i_sb->s_dev; + name->mode = inode->i_mode; + name->uid = inode->i_uid; + name->gid = inode->i_gid; + name->rdev = inode->i_rdev; + security_inode_getsecid(inode, &name->osid); + audit_copy_fcaps(name, dentry); +} + +/** + * audit_log_name - produce AUDIT_PATH record from struct audit_names + * @context: audit_context for the task + * @n: audit_names structure with reportable details + * @path: optional path to report instead of audit_names->name + * @record_num: record number to report when handling a list of names + * @call_panic: optional pointer to int that will be updated if secid fails + */ +void audit_log_name(struct audit_context *context, struct audit_names *n, + struct path *path, int record_num, int *call_panic) +{ + struct audit_buffer *ab; + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); + if (!ab) + return; + + audit_log_format(ab, "item=%d", record_num); + + if (path) + audit_log_d_path(ab, " name=", path); + else if (n->name) { + switch (n->name_len) { + case AUDIT_NAME_FULL: + /* log the full path */ + audit_log_format(ab, " name="); + audit_log_untrustedstring(ab, n->name->name); + break; + case 0: + /* name was specified as a relative path and the + * directory component is the cwd */ + audit_log_d_path(ab, " name=", &context->pwd); + break; + default: + /* log the name's directory component */ + audit_log_format(ab, " name="); + audit_log_n_untrustedstring(ab, n->name->name, + n->name_len); + } + } else + audit_log_format(ab, " name=(null)"); + + if (n->ino != (unsigned long)-1) { + audit_log_format(ab, " inode=%lu" + " dev=%02x:%02x mode=%#ho" + " ouid=%u ogid=%u rdev=%02x:%02x", + n->ino, + MAJOR(n->dev), + MINOR(n->dev), + n->mode, + from_kuid(&init_user_ns, n->uid), + from_kgid(&init_user_ns, n->gid), + MAJOR(n->rdev), + MINOR(n->rdev)); + } + if (n->osid != 0) { + char *ctx = NULL; + u32 len; + if (security_secid_to_secctx( + n->osid, &ctx, &len)) { + audit_log_format(ab, " osid=%u", n->osid); + if (call_panic) + *call_panic = 2; + } else { + audit_log_format(ab, " obj=%s", ctx); + security_release_secctx(ctx, len); + } + } + + audit_log_fcaps(ab, n); + audit_log_end(ab); +} + +int audit_log_task_context(struct audit_buffer *ab) +{ + char *ctx = NULL; + unsigned len; + int error; + u32 sid; + + security_task_getsecid(current, &sid); + if (!sid) + return 0; + + error = security_secid_to_secctx(sid, &ctx, &len); + if (error) { + if (error != -EINVAL) + goto error_path; + return 0; + } + + audit_log_format(ab, " subj=%s", ctx); + security_release_secctx(ctx, len); + return 0; + +error_path: + audit_panic("error in audit_log_task_context"); + return error; +} +EXPORT_SYMBOL(audit_log_task_context); + +void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) +{ + const struct cred *cred; + char name[sizeof(tsk->comm)]; + struct mm_struct *mm = tsk->mm; + char *tty; + + if (!ab) + return; + + /* tsk == current */ + cred = current_cred(); + + spin_lock_irq(&tsk->sighand->siglock); + if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) + tty = tsk->signal->tty->name; + else + tty = "(none)"; + spin_unlock_irq(&tsk->sighand->siglock); + + audit_log_format(ab, + " ppid=%ld pid=%d auid=%u uid=%u gid=%u" + " euid=%u suid=%u fsuid=%u" + " egid=%u sgid=%u fsgid=%u ses=%u tty=%s", + sys_getppid(), + tsk->pid, + from_kuid(&init_user_ns, audit_get_loginuid(tsk)), + from_kuid(&init_user_ns, cred->uid), + from_kgid(&init_user_ns, cred->gid), + from_kuid(&init_user_ns, cred->euid), + from_kuid(&init_user_ns, cred->suid), + from_kuid(&init_user_ns, cred->fsuid), + from_kgid(&init_user_ns, cred->egid), + from_kgid(&init_user_ns, cred->sgid), + from_kgid(&init_user_ns, cred->fsgid), + audit_get_sessionid(tsk), tty); + + get_task_comm(name, tsk); + audit_log_format(ab, " comm="); + audit_log_untrustedstring(ab, name); + + if (mm) { + down_read(&mm->mmap_sem); + if (mm->exe_file) + audit_log_d_path(ab, " exe=", &mm->exe_file->f_path); + up_read(&mm->mmap_sem); + } + audit_log_task_context(ab); +} +EXPORT_SYMBOL(audit_log_task_info); + /** * audit_log_link_denied - report a link restriction denial * @operation: specific link opreation @@ -1451,19 +1626,28 @@ void audit_log_key(struct audit_buffer *ab, char *key) void audit_log_link_denied(const char *operation, struct path *link) { struct audit_buffer *ab; + struct audit_names *name; + + name = kzalloc(sizeof(*name), GFP_NOFS); + if (!name) + return; + /* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */ ab = audit_log_start(current->audit_context, GFP_KERNEL, AUDIT_ANOM_LINK); if (!ab) - return; - audit_log_format(ab, "op=%s action=denied", operation); - audit_log_format(ab, " pid=%d comm=", current->pid); - audit_log_untrustedstring(ab, current->comm); - audit_log_d_path(ab, " path=", link); - audit_log_format(ab, " dev="); - audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id); - audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino); + goto out; + audit_log_format(ab, "op=%s", operation); + audit_log_task_info(ab, current); + audit_log_format(ab, " res=0"); audit_log_end(ab); + + /* Generate AUDIT_PATH record with object. */ + name->type = AUDIT_TYPE_NORMAL; + audit_copy_inode(name, link->dentry, link->dentry->d_inode); + audit_log_name(current->audit_context, name, link, 0, NULL); +out: + kfree(name); } /** diff --git a/kernel/audit.h b/kernel/audit.h index 11468d99dad0..1c95131ef760 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -22,6 +22,7 @@ #include <linux/fs.h> #include <linux/audit.h> #include <linux/skbuff.h> +#include <uapi/linux/mqueue.h> /* 0 = no checking 1 = put_count checking @@ -29,6 +30,11 @@ */ #define AUDIT_DEBUG 0 +/* AUDIT_NAMES is the number of slots we reserve in the audit_context + * for saving names from getname(). If we get more names we will allocate + * a name dynamically and also add those to the list anchored by names_list. */ +#define AUDIT_NAMES 5 + /* At task start time, the audit_state is set in the audit_context using a per-task filter. At syscall entry, the audit_state is augmented by the syscall filter. */ @@ -59,8 +65,158 @@ struct audit_entry { struct audit_krule rule; }; +struct audit_cap_data { + kernel_cap_t permitted; + kernel_cap_t inheritable; + union { + unsigned int fE; /* effective bit of file cap */ + kernel_cap_t effective; /* effective set of process */ + }; +}; + +/* When fs/namei.c:getname() is called, we store the pointer in name and + * we don't let putname() free it (instead we free all of the saved + * pointers at syscall exit time). + * + * Further, in fs/namei.c:path_lookup() we store the inode and device. + */ +struct audit_names { + struct list_head list; /* audit_context->names_list */ + + struct filename *name; + int name_len; /* number of chars to log */ + bool name_put; /* call __putname()? */ + + unsigned long ino; + dev_t dev; + umode_t mode; + kuid_t uid; + kgid_t gid; + dev_t rdev; + u32 osid; + struct audit_cap_data fcap; + unsigned int fcap_ver; + unsigned char type; /* record type */ + /* + * This was an allocated audit_names and not from the array of + * names allocated in the task audit context. Thus this name + * should be freed on syscall exit. + */ + bool should_free; +}; + +/* The per-task audit context. */ +struct audit_context { + int dummy; /* must be the first element */ + int in_syscall; /* 1 if task is in a syscall */ + enum audit_state state, current_state; + unsigned int serial; /* serial number for record */ + int major; /* syscall number */ + struct timespec ctime; /* time of syscall entry */ + unsigned long argv[4]; /* syscall arguments */ + long return_code;/* syscall return code */ + u64 prio; + int return_valid; /* return code is valid */ + /* + * The names_list is the list of all audit_names collected during this + * syscall. The first AUDIT_NAMES entries in the names_list will + * actually be from the preallocated_names array for performance + * reasons. Except during allocation they should never be referenced + * through the preallocated_names array and should only be found/used + * by running the names_list. + */ + struct audit_names preallocated_names[AUDIT_NAMES]; + int name_count; /* total records in names_list */ + struct list_head names_list; /* struct audit_names->list anchor */ + char *filterkey; /* key for rule that triggered record */ + struct path pwd; + struct audit_aux_data *aux; + struct audit_aux_data *aux_pids; + struct sockaddr_storage *sockaddr; + size_t sockaddr_len; + /* Save things to print about task_struct */ + pid_t pid, ppid; + kuid_t uid, euid, suid, fsuid; + kgid_t gid, egid, sgid, fsgid; + unsigned long personality; + int arch; + + pid_t target_pid; + kuid_t target_auid; + kuid_t target_uid; + unsigned int target_sessionid; + u32 target_sid; + char target_comm[TASK_COMM_LEN]; + + struct audit_tree_refs *trees, *first_trees; + struct list_head killed_trees; + int tree_count; + + int type; + union { + struct { + int nargs; + long args[6]; + } socketcall; + struct { + kuid_t uid; + kgid_t gid; + umode_t mode; + u32 osid; + int has_perm; + uid_t perm_uid; + gid_t perm_gid; + umode_t perm_mode; + unsigned long qbytes; + } ipc; + struct { + mqd_t mqdes; + struct mq_attr mqstat; + } mq_getsetattr; + struct { + mqd_t mqdes; + int sigev_signo; + } mq_notify; + struct { + mqd_t mqdes; + size_t msg_len; + unsigned int msg_prio; + struct timespec abs_timeout; + } mq_sendrecv; + struct { + int oflag; + umode_t mode; + struct mq_attr attr; + } mq_open; + struct { + pid_t pid; + struct audit_cap_data cap; + } capset; + struct { + int fd; + int flags; + } mmap; + }; + int fds[2]; + +#if AUDIT_DEBUG + int put_count; + int ino_count; +#endif +}; + extern int audit_ever_enabled; +extern void audit_copy_inode(struct audit_names *name, + const struct dentry *dentry, + const struct inode *inode); +extern void audit_log_cap(struct audit_buffer *ab, char *prefix, + kernel_cap_t *cap); +extern void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name); +extern void audit_log_name(struct audit_context *context, + struct audit_names *n, struct path *path, + int record_num, int *call_panic); + extern int audit_pid; #define AUDIT_INODE_BUCKETS 32 diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 267436826c3b..83a2970295d1 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -310,121 +310,83 @@ static u32 audit_to_op(u32 op) return n; } - -/* Translate struct audit_rule to kernel's rule respresentation. - * Exists for backward compatibility with userspace. */ -static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) +/* check if an audit field is valid */ +static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) { - struct audit_entry *entry; - int err = 0; - int i; - - entry = audit_to_entry_common(rule); - if (IS_ERR(entry)) - goto exit_nofree; - - for (i = 0; i < rule->field_count; i++) { - struct audit_field *f = &entry->rule.fields[i]; - u32 n; - - n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS); - - /* Support for legacy operators where - * AUDIT_NEGATE bit signifies != and otherwise assumes == */ - if (n & AUDIT_NEGATE) - f->op = Audit_not_equal; - else if (!n) - f->op = Audit_equal; - else - f->op = audit_to_op(n); - - entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1; - - f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); - f->val = rule->values[i]; - f->uid = INVALID_UID; - f->gid = INVALID_GID; - - err = -EINVAL; - if (f->op == Audit_bad) - goto exit_free; - - switch(f->type) { - default: - goto exit_free; - case AUDIT_UID: - case AUDIT_EUID: - case AUDIT_SUID: - case AUDIT_FSUID: - case AUDIT_LOGINUID: - /* bit ops not implemented for uid comparisons */ - if (f->op == Audit_bitmask || f->op == Audit_bittest) - goto exit_free; - - f->uid = make_kuid(current_user_ns(), f->val); - if (!uid_valid(f->uid)) - goto exit_free; - break; - case AUDIT_GID: - case AUDIT_EGID: - case AUDIT_SGID: - case AUDIT_FSGID: - /* bit ops not implemented for gid comparisons */ - if (f->op == Audit_bitmask || f->op == Audit_bittest) - goto exit_free; - - f->gid = make_kgid(current_user_ns(), f->val); - if (!gid_valid(f->gid)) - goto exit_free; - break; - case AUDIT_PID: - case AUDIT_PERS: - case AUDIT_MSGTYPE: - case AUDIT_PPID: - case AUDIT_DEVMAJOR: - case AUDIT_DEVMINOR: - case AUDIT_EXIT: - case AUDIT_SUCCESS: - /* bit ops are only useful on syscall args */ - if (f->op == Audit_bitmask || f->op == Audit_bittest) - goto exit_free; - break; - case AUDIT_ARG0: - case AUDIT_ARG1: - case AUDIT_ARG2: - case AUDIT_ARG3: - break; - /* arch is only allowed to be = or != */ - case AUDIT_ARCH: - if (f->op != Audit_not_equal && f->op != Audit_equal) - goto exit_free; - entry->rule.arch_f = f; - break; - case AUDIT_PERM: - if (f->val & ~15) - goto exit_free; - break; - case AUDIT_FILETYPE: - if (f->val & ~S_IFMT) - goto exit_free; - break; - case AUDIT_INODE: - err = audit_to_inode(&entry->rule, f); - if (err) - goto exit_free; - break; - } - } - - if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal) - entry->rule.inode_f = NULL; - -exit_nofree: - return entry; + switch(f->type) { + case AUDIT_MSGTYPE: + if (entry->rule.listnr != AUDIT_FILTER_TYPE && + entry->rule.listnr != AUDIT_FILTER_USER) + return -EINVAL; + break; + }; -exit_free: - audit_free_rule(entry); - return ERR_PTR(err); + switch(f->type) { + default: + return -EINVAL; + case AUDIT_UID: + case AUDIT_EUID: + case AUDIT_SUID: + case AUDIT_FSUID: + case AUDIT_LOGINUID: + case AUDIT_OBJ_UID: + case AUDIT_GID: + case AUDIT_EGID: + case AUDIT_SGID: + case AUDIT_FSGID: + case AUDIT_OBJ_GID: + case AUDIT_PID: + case AUDIT_PERS: + case AUDIT_MSGTYPE: + case AUDIT_PPID: + case AUDIT_DEVMAJOR: + case AUDIT_DEVMINOR: + case AUDIT_EXIT: + case AUDIT_SUCCESS: + /* bit ops are only useful on syscall args */ + if (f->op == Audit_bitmask || f->op == Audit_bittest) + return -EINVAL; + break; + case AUDIT_ARG0: + case AUDIT_ARG1: + case AUDIT_ARG2: + case AUDIT_ARG3: + case AUDIT_SUBJ_USER: + case AUDIT_SUBJ_ROLE: + case AUDIT_SUBJ_TYPE: + case AUDIT_SUBJ_SEN: + case AUDIT_SUBJ_CLR: + case AUDIT_OBJ_USER: + case AUDIT_OBJ_ROLE: + case AUDIT_OBJ_TYPE: + case AUDIT_OBJ_LEV_LOW: + case AUDIT_OBJ_LEV_HIGH: + case AUDIT_WATCH: + case AUDIT_DIR: + case AUDIT_FILTERKEY: + break; + case AUDIT_LOGINUID_SET: + if ((f->val != 0) && (f->val != 1)) + return -EINVAL; + /* FALL THROUGH */ + case AUDIT_ARCH: + if (f->op != Audit_not_equal && f->op != Audit_equal) + return -EINVAL; + break; + case AUDIT_PERM: + if (f->val & ~15) + return -EINVAL; + break; + case AUDIT_FILETYPE: + if (f->val & ~S_IFMT) + return -EINVAL; + break; + case AUDIT_FIELD_COMPARE: + if (f->val > AUDIT_MAX_FIELD_COMPARE) + return -EINVAL; + break; + }; + return 0; } /* Translate struct audit_rule_data to kernel's rule respresentation. */ @@ -459,17 +421,25 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, f->gid = INVALID_GID; f->lsm_str = NULL; f->lsm_rule = NULL; - switch(f->type) { + + /* Support legacy tests for a valid loginuid */ + if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) { + f->type = AUDIT_LOGINUID_SET; + f->val = 0; + } + + err = audit_field_valid(entry, f); + if (err) + goto exit_free; + + err = -EINVAL; + switch (f->type) { + case AUDIT_LOGINUID: case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: - case AUDIT_LOGINUID: case AUDIT_OBJ_UID: - /* bit ops not implemented for uid comparisons */ - if (f->op == Audit_bitmask || f->op == Audit_bittest) - goto exit_free; - f->uid = make_kuid(current_user_ns(), f->val); if (!uid_valid(f->uid)) goto exit_free; @@ -479,27 +449,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: - /* bit ops not implemented for gid comparisons */ - if (f->op == Audit_bitmask || f->op == Audit_bittest) - goto exit_free; - f->gid = make_kgid(current_user_ns(), f->val); if (!gid_valid(f->gid)) goto exit_free; break; - case AUDIT_PID: - case AUDIT_PERS: - case AUDIT_MSGTYPE: - case AUDIT_PPID: - case AUDIT_DEVMAJOR: - case AUDIT_DEVMINOR: - case AUDIT_EXIT: - case AUDIT_SUCCESS: - case AUDIT_ARG0: - case AUDIT_ARG1: - case AUDIT_ARG2: - case AUDIT_ARG3: - break; case AUDIT_ARCH: entry->rule.arch_f = f; break; @@ -570,20 +523,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, entry->rule.buflen += f->val; entry->rule.filterkey = str; break; - case AUDIT_PERM: - if (f->val & ~15) - goto exit_free; - break; - case AUDIT_FILETYPE: - if (f->val & ~S_IFMT) - goto exit_free; - break; - case AUDIT_FIELD_COMPARE: - if (f->val > AUDIT_MAX_FIELD_COMPARE) - goto exit_free; - break; - default: - goto exit_free; } } @@ -613,36 +552,6 @@ static inline size_t audit_pack_string(void **bufp, const char *str) return len; } -/* Translate kernel rule respresentation to struct audit_rule. - * Exists for backward compatibility with userspace. */ -static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule) -{ - struct audit_rule *rule; - int i; - - rule = kzalloc(sizeof(*rule), GFP_KERNEL); - if (unlikely(!rule)) - return NULL; - - rule->flags = krule->flags | krule->listnr; - rule->action = krule->action; - rule->field_count = krule->field_count; - for (i = 0; i < rule->field_count; i++) { - rule->values[i] = krule->fields[i].val; - rule->fields[i] = krule->fields[i].type; - - if (krule->vers_ops == 1) { - if (krule->fields[i].op == Audit_not_equal) - rule->fields[i] |= AUDIT_NEGATE; - } else { - rule->fields[i] |= audit_ops[krule->fields[i].op]; - } - } - for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i]; - - return rule; -} - /* Translate kernel rule respresentation to struct audit_rule_data. */ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) { @@ -1055,35 +964,6 @@ out: return ret; } -/* List rules using struct audit_rule. Exists for backward - * compatibility with userspace. */ -static void audit_list(int pid, int seq, struct sk_buff_head *q) -{ - struct sk_buff *skb; - struct audit_krule *r; - int i; - - /* This is a blocking read, so use audit_filter_mutex instead of rcu - * iterator to sync with list writers. */ - for (i=0; i<AUDIT_NR_FILTERS; i++) { - list_for_each_entry(r, &audit_rules_list[i], list) { - struct audit_rule *rule; - - rule = audit_krule_to_rule(r); - if (unlikely(!rule)) - break; - skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1, - rule, sizeof(*rule)); - if (skb) - skb_queue_tail(q, skb); - kfree(rule); - } - } - skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0); - if (skb) - skb_queue_tail(q, skb); -} - /* List rules using struct audit_rule_data. */ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q) { @@ -1113,11 +993,11 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q) } /* Log rule additions and removals */ -static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid, - char *action, struct audit_krule *rule, - int res) +static void audit_log_rule_change(char *action, struct audit_krule *rule, int res) { struct audit_buffer *ab; + uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current)); + u32 sessionid = audit_get_sessionid(current); if (!audit_enabled) return; @@ -1125,18 +1005,8 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid, ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); if (!ab) return; - audit_log_format(ab, "auid=%u ses=%u", - from_kuid(&init_user_ns, loginuid), sessionid); - if (sid) { - char *ctx = NULL; - u32 len; - if (security_secid_to_secctx(sid, &ctx, &len)) - audit_log_format(ab, " ssid=%u", sid); - else { - audit_log_format(ab, " subj=%s", ctx); - security_release_secctx(ctx, len); - } - } + audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid); + audit_log_task_context(ab); audit_log_format(ab, " op="); audit_log_string(ab, action); audit_log_key(ab, rule->filterkey); @@ -1155,8 +1025,7 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid, * @sessionid: sessionid for netlink audit message * @sid: SE Linux Security ID of sender */ -int audit_receive_filter(int type, int pid, int seq, void *data, - size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid) +int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz) { struct task_struct *tsk; struct audit_netlink_list *dest; @@ -1164,7 +1033,6 @@ int audit_receive_filter(int type, int pid, int seq, void *data, struct audit_entry *entry; switch (type) { - case AUDIT_LIST: case AUDIT_LIST_RULES: /* We can't just spew out the rules here because we might fill * the available socket buffer space and deadlock waiting for @@ -1179,10 +1047,7 @@ int audit_receive_filter(int type, int pid, int seq, void *data, skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); - if (type == AUDIT_LIST) - audit_list(pid, seq, &dest->q); - else - audit_list_rules(pid, seq, &dest->q); + audit_list_rules(pid, seq, &dest->q); mutex_unlock(&audit_filter_mutex); tsk = kthread_run(audit_send_list, dest, "audit_send_list"); @@ -1192,35 +1057,23 @@ int audit_receive_filter(int type, int pid, int seq, void *data, err = PTR_ERR(tsk); } break; - case AUDIT_ADD: case AUDIT_ADD_RULE: - if (type == AUDIT_ADD) - entry = audit_rule_to_entry(data); - else - entry = audit_data_to_entry(data, datasz); + entry = audit_data_to_entry(data, datasz); if (IS_ERR(entry)) return PTR_ERR(entry); err = audit_add_rule(entry); - audit_log_rule_change(loginuid, sessionid, sid, "add rule", - &entry->rule, !err); - + audit_log_rule_change("add rule", &entry->rule, !err); if (err) audit_free_rule(entry); break; - case AUDIT_DEL: case AUDIT_DEL_RULE: - if (type == AUDIT_DEL) - entry = audit_rule_to_entry(data); - else - entry = audit_data_to_entry(data, datasz); + entry = audit_data_to_entry(data, datasz); if (IS_ERR(entry)) return PTR_ERR(entry); err = audit_del_rule(entry); - audit_log_rule_change(loginuid, sessionid, sid, "remove rule", - &entry->rule, !err); - + audit_log_rule_change("remove rule", &entry->rule, !err); audit_free_rule(entry); break; default: @@ -1358,7 +1211,7 @@ int audit_compare_dname_path(const char *dname, const char *path, int parentlen) return strncmp(p, dname, dlen); } -static int audit_filter_user_rules(struct audit_krule *rule, +static int audit_filter_user_rules(struct audit_krule *rule, int type, enum audit_state *state) { int i; @@ -1382,6 +1235,13 @@ static int audit_filter_user_rules(struct audit_krule *rule, result = audit_uid_comparator(audit_get_loginuid(current), f->op, f->uid); break; + case AUDIT_LOGINUID_SET: + result = audit_comparator(audit_loginuid_set(current), + f->op, f->val); + break; + case AUDIT_MSGTYPE: + result = audit_comparator(type, f->op, f->val); + break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: @@ -1408,7 +1268,7 @@ static int audit_filter_user_rules(struct audit_krule *rule, return 1; } -int audit_filter_user(void) +int audit_filter_user(int type) { enum audit_state state = AUDIT_DISABLED; struct audit_entry *e; @@ -1416,7 +1276,7 @@ int audit_filter_user(void) rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) { - if (audit_filter_user_rules(&e->rule, &state)) { + if (audit_filter_user_rules(&e->rule, type, &state)) { if (state == AUDIT_DISABLED) ret = 0; break; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index c68229411a7c..3c8a601324a2 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -76,11 +76,6 @@ #define AUDITSC_SUCCESS 1 #define AUDITSC_FAILURE 2 -/* AUDIT_NAMES is the number of slots we reserve in the audit_context - * for saving names from getname(). If we get more names we will allocate - * a name dynamically and also add those to the list anchored by names_list. */ -#define AUDIT_NAMES 5 - /* no execve audit message should be longer than this (userspace limits) */ #define MAX_EXECVE_AUDIT_LEN 7500 @@ -90,44 +85,6 @@ int audit_n_rules; /* determines whether we collect data for signals sent */ int audit_signals; -struct audit_cap_data { - kernel_cap_t permitted; - kernel_cap_t inheritable; - union { - unsigned int fE; /* effective bit of a file capability */ - kernel_cap_t effective; /* effective set of a process */ - }; -}; - -/* When fs/namei.c:getname() is called, we store the pointer in name and - * we don't let putname() free it (instead we free all of the saved - * pointers at syscall exit time). - * - * Further, in fs/namei.c:path_lookup() we store the inode and device. - */ -struct audit_names { - struct list_head list; /* audit_context->names_list */ - struct filename *name; - unsigned long ino; - dev_t dev; - umode_t mode; - kuid_t uid; - kgid_t gid; - dev_t rdev; - u32 osid; - struct audit_cap_data fcap; - unsigned int fcap_ver; - int name_len; /* number of name's characters to log */ - unsigned char type; /* record type */ - bool name_put; /* call __putname() for this name */ - /* - * This was an allocated audit_names and not from the array of - * names allocated in the task audit context. Thus this name - * should be freed on syscall exit - */ - bool should_free; -}; - struct audit_aux_data { struct audit_aux_data *next; int type; @@ -175,106 +132,6 @@ struct audit_tree_refs { struct audit_chunk *c[31]; }; -/* The per-task audit context. */ -struct audit_context { - int dummy; /* must be the first element */ - int in_syscall; /* 1 if task is in a syscall */ - enum audit_state state, current_state; - unsigned int serial; /* serial number for record */ - int major; /* syscall number */ - struct timespec ctime; /* time of syscall entry */ - unsigned long argv[4]; /* syscall arguments */ - long return_code;/* syscall return code */ - u64 prio; - int return_valid; /* return code is valid */ - /* - * The names_list is the list of all audit_names collected during this - * syscall. The first AUDIT_NAMES entries in the names_list will - * actually be from the preallocated_names array for performance - * reasons. Except during allocation they should never be referenced - * through the preallocated_names array and should only be found/used - * by running the names_list. - */ - struct audit_names preallocated_names[AUDIT_NAMES]; - int name_count; /* total records in names_list */ - struct list_head names_list; /* anchor for struct audit_names->list */ - char * filterkey; /* key for rule that triggered record */ - struct path pwd; - struct audit_aux_data *aux; - struct audit_aux_data *aux_pids; - struct sockaddr_storage *sockaddr; - size_t sockaddr_len; - /* Save things to print about task_struct */ - pid_t pid, ppid; - kuid_t uid, euid, suid, fsuid; - kgid_t gid, egid, sgid, fsgid; - unsigned long personality; - int arch; - - pid_t target_pid; - kuid_t target_auid; - kuid_t target_uid; - unsigned int target_sessionid; - u32 target_sid; - char target_comm[TASK_COMM_LEN]; - - struct audit_tree_refs *trees, *first_trees; - struct list_head killed_trees; - int tree_count; - - int type; - union { - struct { - int nargs; - long args[6]; - } socketcall; - struct { - kuid_t uid; - kgid_t gid; - umode_t mode; - u32 osid; - int has_perm; - uid_t perm_uid; - gid_t perm_gid; - umode_t perm_mode; - unsigned long qbytes; - } ipc; - struct { - mqd_t mqdes; - struct mq_attr mqstat; - } mq_getsetattr; - struct { - mqd_t mqdes; - int sigev_signo; - } mq_notify; - struct { - mqd_t mqdes; - size_t msg_len; - unsigned int msg_prio; - struct timespec abs_timeout; - } mq_sendrecv; - struct { - int oflag; - umode_t mode; - struct mq_attr attr; - } mq_open; - struct { - pid_t pid; - struct audit_cap_data cap; - } capset; - struct { - int fd; - int flags; - } mmap; - }; - int fds[2]; - -#if AUDIT_DEBUG - int put_count; - int ino_count; -#endif -}; - static inline int open_arg(int flags, int mask) { int n = ACC_MODE(flags); @@ -633,9 +490,23 @@ static int audit_filter_rules(struct task_struct *tsk, break; case AUDIT_GID: result = audit_gid_comparator(cred->gid, f->op, f->gid); + if (f->op == Audit_equal) { + if (!result) + result = in_group_p(f->gid); + } else if (f->op == Audit_not_equal) { + if (result) + result = !in_group_p(f->gid); + } break; case AUDIT_EGID: result = audit_gid_comparator(cred->egid, f->op, f->gid); + if (f->op == Audit_equal) { + if (!result) + result = in_egroup_p(f->gid); + } else if (f->op == Audit_not_equal) { + if (result) + result = !in_egroup_p(f->gid); + } break; case AUDIT_SGID: result = audit_gid_comparator(cred->sgid, f->op, f->gid); @@ -742,6 +613,9 @@ static int audit_filter_rules(struct task_struct *tsk, if (ctx) result = audit_uid_comparator(tsk->loginuid, f->op, f->uid); break; + case AUDIT_LOGINUID_SET: + result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val); + break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: @@ -987,6 +861,8 @@ static inline void audit_free_names(struct audit_context *context) #if AUDIT_DEBUG == 2 if (context->put_count + context->ino_count != context->name_count) { + int i = 0; + printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" " name_count=%d put_count=%d" " ino_count=%d [NOT freeing]\n", @@ -995,7 +871,7 @@ static inline void audit_free_names(struct audit_context *context) context->name_count, context->put_count, context->ino_count); list_for_each_entry(n, &context->names_list, list) { - printk(KERN_ERR "names[%d] = %p = %s\n", i, + printk(KERN_ERR "names[%d] = %p = %s\n", i++, n->name, n->name->name ?: "(null)"); } dump_stack(); @@ -1010,7 +886,7 @@ static inline void audit_free_names(struct audit_context *context) list_for_each_entry_safe(n, next, &context->names_list, list) { list_del(&n->list); if (n->name && n->name_put) - __putname(n->name); + final_putname(n->name); if (n->should_free) kfree(n); } @@ -1093,88 +969,6 @@ static inline void audit_free_context(struct audit_context *context) kfree(context); } -void audit_log_task_context(struct audit_buffer *ab) -{ - char *ctx = NULL; - unsigned len; - int error; - u32 sid; - - security_task_getsecid(current, &sid); - if (!sid) - return; - - error = security_secid_to_secctx(sid, &ctx, &len); - if (error) { - if (error != -EINVAL) - goto error_path; - return; - } - - audit_log_format(ab, " subj=%s", ctx); - security_release_secctx(ctx, len); - return; - -error_path: - audit_panic("error in audit_log_task_context"); - return; -} - -EXPORT_SYMBOL(audit_log_task_context); - -void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) -{ - const struct cred *cred; - char name[sizeof(tsk->comm)]; - struct mm_struct *mm = tsk->mm; - char *tty; - - if (!ab) - return; - - /* tsk == current */ - cred = current_cred(); - - spin_lock_irq(&tsk->sighand->siglock); - if (tsk->signal && tsk->signal->tty) - tty = tsk->signal->tty->name; - else - tty = "(none)"; - spin_unlock_irq(&tsk->sighand->siglock); - - - audit_log_format(ab, - " ppid=%ld pid=%d auid=%u uid=%u gid=%u" - " euid=%u suid=%u fsuid=%u" - " egid=%u sgid=%u fsgid=%u ses=%u tty=%s", - sys_getppid(), - tsk->pid, - from_kuid(&init_user_ns, tsk->loginuid), - from_kuid(&init_user_ns, cred->uid), - from_kgid(&init_user_ns, cred->gid), - from_kuid(&init_user_ns, cred->euid), - from_kuid(&init_user_ns, cred->suid), - from_kuid(&init_user_ns, cred->fsuid), - from_kgid(&init_user_ns, cred->egid), - from_kgid(&init_user_ns, cred->sgid), - from_kgid(&init_user_ns, cred->fsgid), - tsk->sessionid, tty); - - get_task_comm(name, tsk); - audit_log_format(ab, " comm="); - audit_log_untrustedstring(ab, name); - - if (mm) { - down_read(&mm->mmap_sem); - if (mm->exe_file) - audit_log_d_path(ab, " exe=", &mm->exe_file->f_path); - up_read(&mm->mmap_sem); - } - audit_log_task_context(ab); -} - -EXPORT_SYMBOL(audit_log_task_info); - static int audit_log_pid_context(struct audit_context *context, pid_t pid, kuid_t auid, kuid_t uid, unsigned int sessionid, u32 sid, char *comm) @@ -1191,12 +985,14 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid, audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid, from_kuid(&init_user_ns, auid), from_kuid(&init_user_ns, uid), sessionid); - if (security_secid_to_secctx(sid, &ctx, &len)) { - audit_log_format(ab, " obj=(none)"); - rc = 1; - } else { - audit_log_format(ab, " obj=%s", ctx); - security_release_secctx(ctx, len); + if (sid) { + if (security_secid_to_secctx(sid, &ctx, &len)) { + audit_log_format(ab, " obj=(none)"); + rc = 1; + } else { + audit_log_format(ab, " obj=%s", ctx); + security_release_secctx(ctx, len); + } } audit_log_format(ab, " ocomm="); audit_log_untrustedstring(ab, comm); @@ -1390,35 +1186,6 @@ static void audit_log_execve_info(struct audit_context *context, kfree(buf); } -static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap) -{ - int i; - - audit_log_format(ab, " %s=", prefix); - CAP_FOR_EACH_U32(i) { - audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); - } -} - -static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) -{ - kernel_cap_t *perm = &name->fcap.permitted; - kernel_cap_t *inh = &name->fcap.inheritable; - int log = 0; - - if (!cap_isclear(*perm)) { - audit_log_cap(ab, "cap_fp", perm); - log = 1; - } - if (!cap_isclear(*inh)) { - audit_log_cap(ab, "cap_fi", inh); - log = 1; - } - - if (log) - audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver); -} - static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; @@ -1516,68 +1283,6 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_end(ab); } -static void audit_log_name(struct audit_context *context, struct audit_names *n, - int record_num, int *call_panic) -{ - struct audit_buffer *ab; - ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); - if (!ab) - return; /* audit_panic has been called */ - - audit_log_format(ab, "item=%d", record_num); - - if (n->name) { - switch (n->name_len) { - case AUDIT_NAME_FULL: - /* log the full path */ - audit_log_format(ab, " name="); - audit_log_untrustedstring(ab, n->name->name); - break; - case 0: - /* name was specified as a relative path and the - * directory component is the cwd */ - audit_log_d_path(ab, " name=", &context->pwd); - break; - default: - /* log the name's directory component */ - audit_log_format(ab, " name="); - audit_log_n_untrustedstring(ab, n->name->name, - n->name_len); - } - } else - audit_log_format(ab, " name=(null)"); - - if (n->ino != (unsigned long)-1) { - audit_log_format(ab, " inode=%lu" - " dev=%02x:%02x mode=%#ho" - " ouid=%u ogid=%u rdev=%02x:%02x", - n->ino, - MAJOR(n->dev), - MINOR(n->dev), - n->mode, - from_kuid(&init_user_ns, n->uid), - from_kgid(&init_user_ns, n->gid), - MAJOR(n->rdev), - MINOR(n->rdev)); - } - if (n->osid != 0) { - char *ctx = NULL; - u32 len; - if (security_secid_to_secctx( - n->osid, &ctx, &len)) { - audit_log_format(ab, " osid=%u", n->osid); - *call_panic = 2; - } else { - audit_log_format(ab, " obj=%s", ctx); - security_release_secctx(ctx, len); - } - } - - audit_log_fcaps(ab, n); - - audit_log_end(ab); -} - static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; @@ -1695,7 +1400,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts i = 0; list_for_each_entry(n, &context->names_list, list) - audit_log_name(context, n, i++, &call_panic); + audit_log_name(context, n, NULL, i++, &call_panic); /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); @@ -2030,18 +1735,18 @@ void audit_putname(struct filename *name) BUG_ON(!context); if (!context->in_syscall) { #if AUDIT_DEBUG == 2 - printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n", + printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", __FILE__, __LINE__, context->serial, name); if (context->name_count) { struct audit_names *n; - int i; + int i = 0; list_for_each_entry(n, &context->names_list, list) - printk(KERN_ERR "name[%d] = %p = %s\n", i, + printk(KERN_ERR "name[%d] = %p = %s\n", i++, n->name, n->name->name ?: "(null)"); } #endif - __putname(name); + final_putname(name); } #if AUDIT_DEBUG else { @@ -2060,41 +1765,6 @@ void audit_putname(struct filename *name) #endif } -static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry) -{ - struct cpu_vfs_cap_data caps; - int rc; - - if (!dentry) - return 0; - - rc = get_vfs_caps_from_disk(dentry, &caps); - if (rc) - return rc; - - name->fcap.permitted = caps.permitted; - name->fcap.inheritable = caps.inheritable; - name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); - name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; - - return 0; -} - - -/* Copy inode data into an audit_names. */ -static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry, - const struct inode *inode) -{ - name->ino = inode->i_ino; - name->dev = inode->i_sb->s_dev; - name->mode = inode->i_mode; - name->uid = inode->i_uid; - name->gid = inode->i_gid; - name->rdev = inode->i_rdev; - security_inode_getsecid(inode, &name->osid); - audit_copy_fcaps(name, dentry); -} - /** * __audit_inode - store the inode and device from a lookup * @name: name being audited @@ -2303,7 +1973,7 @@ int audit_set_loginuid(kuid_t loginuid) unsigned int sessionid; #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE - if (uid_valid(task->loginuid)) + if (audit_loginuid_set(task)) return -EPERM; #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */ if (!capable(CAP_AUDIT_CONTROL)) @@ -2471,17 +2141,20 @@ int __audit_bprm(struct linux_binprm *bprm) /** * audit_socketcall - record audit data for sys_socketcall - * @nargs: number of args + * @nargs: number of args, which should not be more than AUDITSC_ARGS. * @args: args array * */ -void __audit_socketcall(int nargs, unsigned long *args) +int __audit_socketcall(int nargs, unsigned long *args) { struct audit_context *context = current->audit_context; + if (nargs <= 0 || nargs > AUDITSC_ARGS || !args) + return -EINVAL; context->type = AUDIT_SOCKETCALL; context->socketcall.nargs = nargs; memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); + return 0; } /** diff --git a/kernel/params.c b/kernel/params.c index ed35345be536..53b958fcd639 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -613,10 +613,13 @@ static __modinit int add_sysfs_param(struct module_kobject *mk, sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1), GFP_KERNEL); if (!new) { - kfree(mk->mp); + kfree(attrs); err = -ENOMEM; goto fail; } + /* Despite looking like the typical realloc() bug, this is safe. + * We *want* the old 'attrs' to be freed either way, and we'll store + * the new one in the success case. */ attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL); if (!attrs) { err = -ENOMEM; diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index bfd6787b355a..7078052284fd 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -200,6 +200,7 @@ cond_syscall(sys_perf_event_open); /* fanotify! */ cond_syscall(sys_fanotify_init); cond_syscall(sys_fanotify_mark); +cond_syscall(compat_sys_fanotify_mark); /* open by handle */ cond_syscall(sys_name_to_handle_at); diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index ebf72358e86a..aea4a9ea6fc8 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -15,6 +15,7 @@ #include <linux/netdevice.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/compat.h> #ifdef CONFIG_SYSCTL_SYSCALL @@ -1447,7 +1448,6 @@ SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args) #ifdef CONFIG_COMPAT -#include <asm/compat.h> struct compat_sysctl_args { compat_uptr_t name; @@ -1459,7 +1459,7 @@ struct compat_sysctl_args { compat_ulong_t __unused[4]; }; -asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args) +COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args) { struct compat_sysctl_args tmp; compat_size_t __user *compat_oldlenp; diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 5e9efd4b83a4..015f85aaca08 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -71,6 +71,7 @@ config TRACE_CLOCK config RING_BUFFER bool select TRACE_CLOCK + select IRQ_WORK config FTRACE_NMI_ENTER bool @@ -107,7 +108,6 @@ config TRACING select BINARY_PRINTF select EVENT_TRACING select TRACE_CLOCK - select IRQ_WORK config GENERIC_TRACER bool diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8a5c017bb50c..b549b0f5b977 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -64,6 +64,13 @@ #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) +#ifdef CONFIG_DYNAMIC_FTRACE +#define INIT_REGEX_LOCK(opsname) \ + .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), +#else +#define INIT_REGEX_LOCK(opsname) +#endif + static struct ftrace_ops ftrace_list_end __read_mostly = { .func = ftrace_stub, .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, @@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); while (likely(op = rcu_dereference_raw((op)->next)) && \ unlikely((op) != &ftrace_list_end)) +static inline void ftrace_ops_init(struct ftrace_ops *ops) +{ +#ifdef CONFIG_DYNAMIC_FTRACE + if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { + mutex_init(&ops->regex_lock); + ops->flags |= FTRACE_OPS_FL_INITIALIZED; + } +#endif +} + /** * ftrace_nr_registered_ops - return number of ops registered * @@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void) #else static struct ftrace_ops ftrace_profile_ops __read_mostly = { .func = function_profile_call, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(ftrace_profile_ops) }; static int register_ftrace_profiler(void) @@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = { .func = ftrace_stub, .notrace_hash = EMPTY_HASH, .filter_hash = EMPTY_HASH, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(global_ops) }; -static DEFINE_MUTEX(ftrace_regex_lock); - struct ftrace_page { struct ftrace_page *next; struct dyn_ftrace *records; @@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash) void ftrace_free_filter(struct ftrace_ops *ops) { + ftrace_ops_init(ops); free_ftrace_hash(ops->filter_hash); free_ftrace_hash(ops->notrace_hash); } @@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || ((iter->flags & FTRACE_ITER_ENABLED) && - !(rec->flags & ~FTRACE_FL_MASK))) { + !(rec->flags & FTRACE_FL_ENABLED))) { rec = NULL; goto retry; @@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, struct ftrace_hash *hash; int ret = 0; + ftrace_ops_init(ops); + if (unlikely(ftrace_disabled)) return -ENODEV; @@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, return -ENOMEM; } + iter->ops = ops; + iter->flags = flag; + + mutex_lock(&ops->regex_lock); + if (flag & FTRACE_ITER_NOTRACE) hash = ops->notrace_hash; else hash = ops->filter_hash; - iter->ops = ops; - iter->flags = flag; - if (file->f_mode & FMODE_WRITE) { - mutex_lock(&ftrace_lock); iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); - mutex_unlock(&ftrace_lock); - if (!iter->hash) { trace_parser_put(&iter->parser); kfree(iter); - return -ENOMEM; + ret = -ENOMEM; + goto out_unlock; } } - mutex_lock(&ftrace_regex_lock); - if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) ftrace_filter_reset(iter->hash); @@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, } } else file->private_data = iter; - mutex_unlock(&ftrace_regex_lock); + + out_unlock: + mutex_unlock(&ops->regex_lock); return ret; } @@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, static struct ftrace_ops trace_probe_ops __read_mostly = { .func = function_trace_probe_call, + .flags = FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(trace_probe_ops) }; static int ftrace_probe_registered; @@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void) int ret; int i; - if (ftrace_probe_registered) + if (ftrace_probe_registered) { + /* still need to update the function call sites */ + if (ftrace_enabled) + ftrace_run_update_code(FTRACE_UPDATE_CALLS); return; + } for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; @@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, if (WARN_ON(not)) return -EINVAL; - mutex_lock(&ftrace_lock); + mutex_lock(&trace_probe_ops.regex_lock); hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); if (!hash) { count = -ENOMEM; - goto out_unlock; + goto out; } if (unlikely(ftrace_disabled)) { count = -ENODEV; - goto out_unlock; + goto out; } + mutex_lock(&ftrace_lock); + do_for_each_ftrace_rec(pg, rec) { if (!ftrace_match_record(rec, NULL, search, len, type)) @@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, out_unlock: mutex_unlock(&ftrace_lock); + out: + mutex_unlock(&trace_probe_ops.regex_lock); free_ftrace_hash(hash); return count; @@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, return; } - mutex_lock(&ftrace_lock); + mutex_lock(&trace_probe_ops.regex_lock); hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); if (!hash) @@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, list_add(&entry->free_list, &free_list); } } + mutex_lock(&ftrace_lock); __disable_ftrace_function_probe(); /* * Remove after the disable is called. Otherwise, if the last @@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, list_del(&entry->free_list); ftrace_free_entry(entry); } + mutex_unlock(&ftrace_lock); out_unlock: - mutex_unlock(&ftrace_lock); + mutex_unlock(&trace_probe_ops.regex_lock); free_ftrace_hash(hash); } @@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (!cnt) return 0; - mutex_lock(&ftrace_regex_lock); - - ret = -ENODEV; - if (unlikely(ftrace_disabled)) - goto out_unlock; - if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; iter = m->private; } else iter = file->private_data; + if (unlikely(ftrace_disabled)) + return -ENODEV; + + /* iter->hash is a local copy, so we don't need regex_lock */ + parser = &iter->parser; read = trace_get_user(parser, ubuf, cnt, ppos); @@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, ret = ftrace_process_regex(iter->hash, parser->buffer, parser->idx, enable); trace_parser_clear(parser); - if (ret) - goto out_unlock; + if (ret < 0) + goto out; } ret = read; -out_unlock: - mutex_unlock(&ftrace_regex_lock); - + out: return ret; } @@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, if (unlikely(ftrace_disabled)) return -ENODEV; + mutex_lock(&ops->regex_lock); + if (enable) orig_hash = &ops->filter_hash; else orig_hash = &ops->notrace_hash; hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); - if (!hash) - return -ENOMEM; + if (!hash) { + ret = -ENOMEM; + goto out_regex_unlock; + } - mutex_lock(&ftrace_regex_lock); if (reset) ftrace_filter_reset(hash); if (buf && !ftrace_match_records(hash, buf, len)) { @@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_unlock(&ftrace_lock); out_regex_unlock: - mutex_unlock(&ftrace_regex_lock); + mutex_unlock(&ops->regex_lock); free_ftrace_hash(hash); return ret; @@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset) { + ftrace_ops_init(ops); return ftrace_set_addr(ops, ip, remove, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); @@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset) { + ftrace_ops_init(ops); return ftrace_set_regex(ops, buf, len, reset, 1); } EXPORT_SYMBOL_GPL(ftrace_set_filter); @@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int len, int reset) { + ftrace_ops_init(ops); return ftrace_set_regex(ops, buf, len, reset, 0); } EXPORT_SYMBOL_GPL(ftrace_set_notrace); @@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) { char *func; + ftrace_ops_init(ops); + while (buf) { func = strsep(&buf, ","); ftrace_set_regex(ops, func, strlen(func), 0, enable); @@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) int filter_hash; int ret; - mutex_lock(&ftrace_regex_lock); if (file->f_mode & FMODE_READ) { iter = m->private; - seq_release(inode, file); } else iter = file->private_data; @@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) trace_parser_put(parser); + mutex_lock(&iter->ops->regex_lock); + if (file->f_mode & FMODE_WRITE) { filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); @@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file) mutex_unlock(&ftrace_lock); } + + mutex_unlock(&iter->ops->regex_lock); free_ftrace_hash(iter->hash); kfree(iter); - mutex_unlock(&ftrace_regex_lock); return 0; } @@ -4126,7 +4164,8 @@ void __init ftrace_init(void) static struct ftrace_ops global_ops = { .func = ftrace_stub, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(global_ops) }; static int __init ftrace_nodyn_init(void) @@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, } static struct ftrace_ops control_ops = { - .func = ftrace_ops_control_func, - .flags = FTRACE_OPS_FL_RECURSION_SAFE, + .func = ftrace_ops_control_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + INIT_REGEX_LOCK(control_ops) }; static inline void @@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops) { int ret = -1; + ftrace_ops_init(ops); + mutex_lock(&ftrace_lock); ret = __register_ftrace_function(ops); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53582e982e51..7a0cf68027cc 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, switch (enable) { case 0: /* - * When soft_disable is set and enable is cleared, we want + * When soft_disable is set and enable is cleared, the sm_ref + * reference counter is decremented. If it reaches 0, we want * to clear the SOFT_DISABLED flag but leave the event in the * state that it was. That is, if the event was enabled and * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED @@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. */ if (soft_disable) { + if (atomic_dec_return(&file->sm_ref) > 0) + break; disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); } else @@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, */ if (!soft_disable) clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); - else + else { + if (atomic_inc_return(&file->sm_ref) > 1) + break; set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); + } if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { @@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, if (file->flags & FTRACE_EVENT_FL_ENABLED) { if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) buf = "0*\n"; + else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) + buf = "1*\n"; else buf = "1\n"; } else @@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod) return 0; } +static struct ftrace_event_file * +trace_create_new_event(struct ftrace_event_call *call, + struct trace_array *tr) +{ + struct ftrace_event_file *file; + + file = kmem_cache_alloc(file_cachep, GFP_TRACE); + if (!file) + return NULL; + + file->event_call = call; + file->tr = tr; + atomic_set(&file->sm_ref, 0); + list_add(&file->list, &tr->events); + + return file; +} + /* Add an event to a trace directory */ static int __trace_add_new_event(struct ftrace_event_call *call, @@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call, { struct ftrace_event_file *file; - file = kmem_cache_alloc(file_cachep, GFP_TRACE); + file = trace_create_new_event(call, tr); if (!file) return -ENOMEM; - file->event_call = call; - file->tr = tr; - list_add(&file->list, &tr->events); - return event_create_dir(tr->event_dir, file, id, enable, filter, format); } @@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call, { struct ftrace_event_file *file; - file = kmem_cache_alloc(file_cachep, GFP_TRACE); + file = trace_create_new_event(call, tr); if (!file) return -ENOMEM; - file->event_call = call; - file->tr = tr; - list_add(&file->list, &tr->events); - return 0; } @@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash, if (ret < 0) goto out_put; ret = register_ftrace_function_probe(glob, ops, data); - if (!ret) + /* + * The above returns on success the # of functions enabled, + * but if it didn't find any functions it returns zero. + * Consider no functions a failure too. + */ + if (!ret) { + ret = -ENOENT; + goto out_disable; + } else if (ret < 0) goto out_disable; + /* Just return zero, not the number of enabled functions */ + ret = 0; out: mutex_unlock(&event_mutex); return ret; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1865d5f76538..636d45fe69b3 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -27,7 +27,6 @@ /** * Kprobe event core functions */ - struct trace_probe { struct list_head list; struct kretprobe rp; /* Use rp.kp for kprobe use */ @@ -36,6 +35,7 @@ struct trace_probe { const char *symbol; /* symbol name */ struct ftrace_event_class class; struct ftrace_event_call call; + struct ftrace_event_file **files; ssize_t size; /* trace entry size */ unsigned int nr_args; struct probe_arg args[]; @@ -46,7 +46,7 @@ struct trace_probe { (sizeof(struct probe_arg) * (n))) -static __kprobes int trace_probe_is_return(struct trace_probe *tp) +static __kprobes bool trace_probe_is_return(struct trace_probe *tp) { return tp->rp.handler != NULL; } @@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event, return NULL; } -/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ -static int enable_trace_probe(struct trace_probe *tp, int flag) +static int trace_probe_nr_files(struct trace_probe *tp) +{ + struct ftrace_event_file **file = tp->files; + int ret = 0; + + if (file) + while (*(file++)) + ret++; + + return ret; +} + +static DEFINE_MUTEX(probe_enable_lock); + +/* + * Enable trace_probe + * if the file is NULL, enable "perf" handler, or enable "trace" handler. + */ +static int +enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) { int ret = 0; - tp->flags |= flag; + mutex_lock(&probe_enable_lock); + + if (file) { + struct ftrace_event_file **new, **old = tp->files; + int n = trace_probe_nr_files(tp); + + /* 1 is for new one and 1 is for stopper */ + new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto out_unlock; + } + memcpy(new, old, n * sizeof(struct ftrace_event_file *)); + new[n] = file; + /* The last one keeps a NULL */ + + rcu_assign_pointer(tp->files, new); + tp->flags |= TP_FLAG_TRACE; + + if (old) { + /* Make sure the probe is done with old files */ + synchronize_sched(); + kfree(old); + } + } else + tp->flags |= TP_FLAG_PROFILE; + if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) { if (trace_probe_is_return(tp)) @@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag) ret = enable_kprobe(&tp->rp.kp); } + out_unlock: + mutex_unlock(&probe_enable_lock); + return ret; } -/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ -static void disable_trace_probe(struct trace_probe *tp, int flag) +static int +trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) +{ + int i; + + if (tp->files) { + for (i = 0; tp->files[i]; i++) + if (tp->files[i] == file) + return i; + } + + return -1; +} + +/* + * Disable trace_probe + * if the file is NULL, disable "perf" handler, or disable "trace" handler. + */ +static int +disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) { - tp->flags &= ~flag; + int ret = 0; + + mutex_lock(&probe_enable_lock); + + if (file) { + struct ftrace_event_file **new, **old = tp->files; + int n = trace_probe_nr_files(tp); + int i, j; + + if (n == 0 || trace_probe_file_index(tp, file) < 0) { + ret = -EINVAL; + goto out_unlock; + } + + if (n == 1) { /* Remove the last file */ + tp->flags &= ~TP_FLAG_TRACE; + new = NULL; + } else { + new = kzalloc(n * sizeof(struct ftrace_event_file *), + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto out_unlock; + } + + /* This copy & check loop copies the NULL stopper too */ + for (i = 0, j = 0; j < n && i < n + 1; i++) + if (old[i] != file) + new[j++] = old[i]; + } + + rcu_assign_pointer(tp->files, new); + + /* Make sure the probe is done with old files */ + synchronize_sched(); + kfree(old); + } else + tp->flags &= ~TP_FLAG_PROFILE; + if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { if (trace_probe_is_return(tp)) disable_kretprobe(&tp->rp); else disable_kprobe(&tp->rp.kp); } + + out_unlock: + mutex_unlock(&probe_enable_lock); + + return ret; } /* Internal register function - just handle k*probes and flags */ @@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, } /* Kprobe handler */ -static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) +static __kprobes void +__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, + struct ftrace_event_file *ftrace_file) { - struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct kprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; - tp->nhit++; + WARN_ON(call != ftrace_file->event_call); + + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) + return; local_save_flags(irq_flags); pc = preempt_count(); @@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) dsize = __get_data_size(tp, regs); size = sizeof(*entry) + tp->size + dsize; - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, - size, irq_flags, pc); + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, + call->event.type, + size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); - entry->ip = (unsigned long)kp->addr; + entry->ip = (unsigned long)tp->rp.kp.addr; store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); if (!filter_current_check_discard(buffer, call, entry, event)) @@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) irq_flags, pc, regs); } +static __kprobes void +kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) +{ + struct ftrace_event_file **file = tp->files; + + /* Note: preempt is already disabled around the kprobe handler */ + while (*file) { + __kprobe_trace_func(tp, regs, *file); + file++; + } +} + /* Kretprobe handler */ -static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, - struct pt_regs *regs) +static __kprobes void +__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, + struct pt_regs *regs, + struct ftrace_event_file *ftrace_file) { - struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct kretprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; + WARN_ON(call != ftrace_file->event_call); + + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) + return; + local_save_flags(irq_flags); pc = preempt_count(); dsize = __get_data_size(tp, regs); size = sizeof(*entry) + tp->size + dsize; - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, - size, irq_flags, pc); + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, + call->event.type, + size, irq_flags, pc); if (!event) return; @@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, irq_flags, pc, regs); } +static __kprobes void +kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + struct ftrace_event_file **file = tp->files; + + /* Note: preempt is already disabled around the kprobe handler */ + while (*file) { + __kretprobe_trace_func(tp, ri, regs, *file); + file++; + } +} + /* Event entry printers */ enum print_line_t print_kprobe_event(struct trace_iterator *iter, int flags, @@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void kprobe_perf_func(struct kprobe *kp, - struct pt_regs *regs) +static __kprobes void +kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) { - struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry_head *entry; struct hlist_head *head; @@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, if (!entry) return; - entry->ip = (unsigned long)kp->addr; + entry->ip = (unsigned long)tp->rp.kp.addr; memset(&entry[1], 0, dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); @@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, } /* Kretprobe profile handler */ -static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, - struct pt_regs *regs) +static __kprobes void +kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, + struct pt_regs *regs) { - struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry_head *entry; struct hlist_head *head; @@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) { struct trace_probe *tp = (struct trace_probe *)event->data; + struct ftrace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: - return enable_trace_probe(tp, TP_FLAG_TRACE); + return enable_trace_probe(tp, file); case TRACE_REG_UNREGISTER: - disable_trace_probe(tp, TP_FLAG_TRACE); - return 0; + return disable_trace_probe(tp, file); #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return enable_trace_probe(tp, TP_FLAG_PROFILE); + return enable_trace_probe(tp, NULL); case TRACE_REG_PERF_UNREGISTER: - disable_trace_probe(tp, TP_FLAG_PROFILE); - return 0; + return disable_trace_probe(tp, NULL); case TRACE_REG_PERF_OPEN: case TRACE_REG_PERF_CLOSE: case TRACE_REG_PERF_ADD: @@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); + tp->nhit++; + if (tp->flags & TP_FLAG_TRACE) - kprobe_trace_func(kp, regs); + kprobe_trace_func(tp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kprobe_perf_func(kp, regs); + kprobe_perf_func(tp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); + tp->nhit++; + if (tp->flags & TP_FLAG_TRACE) - kretprobe_trace_func(ri, regs); + kretprobe_trace_func(tp, ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kretprobe_perf_func(ri, regs); + kretprobe_perf_func(tp, ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, return a1 + a2 + a3 + a4 + a5 + a6; } +static struct ftrace_event_file * +find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) +{ + struct ftrace_event_file *file; + + list_for_each_entry(file, &tr->events, list) + if (file->event_call == &tp->call) + return file; + + return NULL; +} + static __init int kprobe_trace_self_tests_init(void) { int ret, warn = 0; int (*target)(int, int, int, int, int, int); struct trace_probe *tp; + struct ftrace_event_file *file; target = kprobe_trace_selftest_target; @@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void) "$stack $stack0 +0($stack)", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on probing function entry.\n"); + pr_warn("error on probing function entry.\n"); warn++; } else { /* Enable trace point */ tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting new probe.\n"); + pr_warn("error on getting new probe.\n"); warn++; - } else - enable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + enable_trace_probe(tp, file); + } } ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " "$retval", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on probing function return.\n"); + pr_warn("error on probing function return.\n"); warn++; } else { /* Enable trace point */ tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting new probe.\n"); + pr_warn("error on getting 2nd new probe.\n"); warn++; - } else - enable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + enable_trace_probe(tp, file); + } } if (warn) @@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void) /* Disable trace points before removing it */ tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting test probe.\n"); + pr_warn("error on getting test probe.\n"); warn++; - } else - disable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + disable_trace_probe(tp, file); + } tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); if (WARN_ON_ONCE(tp == NULL)) { - pr_warning("error on getting 2nd test probe.\n"); + pr_warn("error on getting 2nd test probe.\n"); warn++; - } else - disable_trace_probe(tp, TP_FLAG_TRACE); + } else { + file = find_trace_probe_file(tp, top_trace_array()); + if (WARN_ON_ONCE(file == NULL)) { + pr_warn("error on getting probe file.\n"); + warn++; + } else + disable_trace_probe(tp, file); + } ret = traceprobe_command("-:testprobe", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on deleting a probe.\n"); + pr_warn("error on deleting a probe.\n"); warn++; } ret = traceprobe_command("-:testprobe2", create_trace_probe); if (WARN_ON_ONCE(ret)) { - pr_warning("error on deleting a probe.\n"); + pr_warn("error on deleting a probe.\n"); warn++; } diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index 0785e95c9924..be7614b9ed27 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -85,7 +85,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, *cpos++ = *pos++ ^ e[i]; } - for (i = 0; i < CCMP_MIC_LEN; i++) + for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s_0[i]; } @@ -123,7 +123,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, crypto_cipher_encrypt_one(tfm, a, a); } - for (i = 0; i < CCMP_MIC_LEN; i++) { + for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++) { if ((mic[i] ^ s_0[i]) != a[i]) return -1; } @@ -138,7 +138,7 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]) tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (!IS_ERR(tfm)) - crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN); + crypto_cipher_setkey(tfm, key, WLAN_KEY_LEN_CCMP); return tfm; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4fdb306e42e0..93120de776f0 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -73,16 +73,19 @@ static int ieee80211_change_iface(struct wiphy *wiphy, struct ieee80211_local *local = sdata->local; if (ieee80211_sdata_running(sdata)) { + u32 mask = MONITOR_FLAG_COOK_FRAMES | + MONITOR_FLAG_ACTIVE; + /* - * Prohibit MONITOR_FLAG_COOK_FRAMES to be - * changed while the interface is up. + * Prohibit MONITOR_FLAG_COOK_FRAMES and + * MONITOR_FLAG_ACTIVE to be changed while the + * interface is up. * Else we would need to add a lot of cruft * to update everything: * cooked_mntrs, monitor and all fif_* counters * reconfigure hardware */ - if ((*flags & MONITOR_FLAG_COOK_FRAMES) != - (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) + if ((*flags & mask) != (sdata->u.mntr_flags & mask)) return -EBUSY; ieee80211_adjust_monitor_flags(sdata, -1); @@ -444,7 +447,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) struct ieee80211_local *local = sdata->local; struct timespec uptime; u64 packets = 0; - int ac; + int i, ac; sinfo->generation = sdata->local->sta_generation; @@ -488,6 +491,17 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->signal = (s8)sta->last_signal; sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); } + if (sta->chains) { + sinfo->filled |= STATION_INFO_CHAIN_SIGNAL | + STATION_INFO_CHAIN_SIGNAL_AVG; + + sinfo->chains = sta->chains; + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chain_signal[i] = sta->chain_signal_last[i]; + sinfo->chain_signal_avg[i] = + (s8) -ewma_read(&sta->chain_signal_avg[i]); + } + } sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); sta_set_rate_info_rx(sta, &sinfo->rxrate); @@ -728,7 +742,7 @@ static void ieee80211_get_et_strings(struct wiphy *wiphy, if (sset == ETH_SS_STATS) { sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); - memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats); + memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats); } drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); } @@ -1741,6 +1755,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->user_mpm = setup->user_mpm; + ifmsh->mesh_auth_id = setup->auth_id; ifmsh->security = IEEE80211_MESH_SEC_NONE; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; @@ -1750,6 +1765,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, /* mcast rate setting in Mesh Node */ memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate, sizeof(setup->mcast_rate)); + sdata->vif.bss_conf.basic_rates = setup->basic_rates; sdata->vif.bss_conf.beacon_int = setup->beacon_interval; sdata->vif.bss_conf.dtim_period = setup->dtim_period; @@ -1862,6 +1878,8 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask)) conf->dot11MeshAwakeWindowDuration = nconf->dot11MeshAwakeWindowDuration; + if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask)) + conf->plink_timeout = nconf->plink_timeout; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } @@ -2312,7 +2330,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode old_req; int err; - lockdep_assert_held(&sdata->u.mgd.mtx); + lockdep_assert_held(&sdata->wdev.mtx); old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; @@ -2369,9 +2387,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, local->dynamic_ps_forced_timeout = timeout; /* no change, but if automatic follow powersave */ - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); @@ -2829,6 +2847,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, return -EOPNOTSUPP; } + /* configurations requiring offchan cannot work if no channel has been + * specified + */ + if (need_offchan && !chan) + return -EINVAL; + mutex_lock(&local->mtx); /* Check if the operating channel is the requested channel */ @@ -2838,10 +2862,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (chanctx_conf) - need_offchan = chan != chanctx_conf->def.chan; - else + if (chanctx_conf) { + need_offchan = chan && (chan != chanctx_conf->def.chan); + } else if (!chan) { + ret = -EINVAL; + rcu_read_unlock(); + goto out_unlock; + } else { need_offchan = true; + } rcu_read_unlock(); } @@ -2901,19 +2930,8 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, u16 frame_type, bool reg) { struct ieee80211_local *local = wiphy_priv(wiphy); - struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); switch (frame_type) { - case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH: - if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { - struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - - if (reg) - ifibss->auth_frame_registrations++; - else - ifibss->auth_frame_registrations--; - } - break; case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ: if (reg) local->probe_req_reg++; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 14abcf44f974..cafe614ef93d 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -228,9 +228,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); err = __ieee80211_request_smps(sdata, smps_mode); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); return err; } @@ -313,16 +313,16 @@ static ssize_t ieee80211_if_parse_tkip_mic_test( case NL80211_IFTYPE_STATION: fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); if (!sdata->u.mgd.associated) { - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); dev_kfree_skb(skb); return -ENOTCONN; } memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, addr, ETH_ALEN); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); break; default: dev_kfree_skb(skb); @@ -471,6 +471,8 @@ __IEEE80211_IF_FILE_W(tsf); IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); #ifdef CONFIG_MAC80211_MESH +IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC); + /* Mesh stats attributes */ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); @@ -480,7 +482,6 @@ IEEE80211_IF_FILE(dropped_frames_congestion, u.mesh.mshstats.dropped_frames_congestion, DEC); IEEE80211_IF_FILE(dropped_frames_no_route, u.mesh.mshstats.dropped_frames_no_route, DEC); -IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC); /* Mesh parameters */ IEEE80211_IF_FILE(dot11MeshMaxRetries, @@ -583,6 +584,7 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata) static void add_mesh_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); + DEBUGFS_ADD_MODE(estab_plinks, 0400); } static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) @@ -598,7 +600,6 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) MESHSTATS_ADD(dropped_frames_ttl); MESHSTATS_ADD(dropped_frames_no_route); MESHSTATS_ADD(dropped_frames_congestion); - MESHSTATS_ADD(estab_plinks); #undef MESHSTATS_ADD } diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 169664c122e2..b931c96a596f 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -146,7 +146,8 @@ static inline int drv_add_interface(struct ieee80211_local *local, if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && - !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)))) + !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF) && + !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)))) return -EINVAL; trace_drv_add_interface(local, sdata); diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index af8cee06e4f3..f83534f6a2ee 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -281,13 +281,14 @@ void ieee80211_ba_session_work(struct work_struct *work) sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_UNSPECIFIED, true); + spin_lock_bh(&sta->lock); + tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; if (tid_tx) { /* * Assign it over to the normal tid_tx array * where it "goes live". */ - spin_lock_bh(&sta->lock); sta->ampdu_mlme.tid_start_tx[tid] = NULL; /* could there be a race? */ @@ -300,6 +301,7 @@ void ieee80211_ba_session_work(struct work_struct *work) ieee80211_tx_ba_session_handle_start(sta, tid); continue; } + spin_unlock_bh(&sta->lock); tid_tx = rcu_dereference_protected_tid_tx(sta, tid); if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, @@ -429,9 +431,9 @@ void ieee80211_request_smps_work(struct work_struct *work) container_of(work, struct ieee80211_sub_if_data, u.mgd.request_smps_work); - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); } void ieee80211_request_smps(struct ieee80211_vif *vif, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 170f9a7fa319..ea7b9c2c7e66 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -54,7 +54,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct beacon_data *presp; int frame_len; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); /* Reset own TSF to allow time synchronization work. */ drv_reset_tsf(local, sdata); @@ -74,14 +74,14 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, } presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&ifibss->mtx)); + lockdep_is_held(&sdata->wdev.mtx)); rcu_assign_pointer(ifibss->presp, NULL); if (presp) kfree_rcu(presp, rcu_head); sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; - cfg80211_chandef_create(&chandef, chan, ifibss->channel_type); + chandef = ifibss->chandef; if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { chandef.width = NL80211_CHAN_WIDTH_20; chandef.center_freq1 = chan->center_freq; @@ -176,6 +176,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, /* add HT capability and information IEs */ if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT && + chandef.width != NL80211_CHAN_WIDTH_5 && + chandef.width != NL80211_CHAN_WIDTH_10 && sband->ht_cap.ht_supported) { pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); @@ -263,7 +265,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, const struct cfg80211_bss_ies *ies; u64 tsf; - lockdep_assert_held(&sdata->u.ibss.mtx); + sdata_assert_lock(sdata); if (beacon_int < 10) beacon_int = 10; @@ -298,8 +300,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, tsf, false); } -static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, - bool auth) +static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; @@ -321,26 +322,19 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta, /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); - if (auth && !sdata->u.ibss.auth_frame_registrations) { - ibss_dbg(sdata, - "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n", - sdata->vif.addr, addr, sdata->u.ibss.bssid); - ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0, - addr, sdata->u.ibss.bssid, NULL, 0, 0, 0); - } return sta; } static struct sta_info * -ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, - const u8 *bssid, const u8 *addr, - u32 supp_rates, bool auth) +ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, + const u8 *addr, u32 supp_rates) __acquires(RCU) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; int band; /* @@ -380,10 +374,11 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, sta->last_rx = jiffies; /* make sure mandatory rates are always added */ + sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); + ieee80211_mandatory_rates(sband); - return ieee80211_ibss_finish_sta(sta, auth); + return ieee80211_ibss_finish_sta(sta); } static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, @@ -405,10 +400,8 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, size_t len) { u16 auth_alg, auth_transaction; - struct sta_info *sta; - u8 deauth_frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - lockdep_assert_held(&sdata->u.ibss.mtx); + sdata_assert_lock(sdata); if (len < 24 + 6) return; @@ -423,22 +416,6 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) return; - sta_info_destroy_addr(sdata, mgmt->sa); - sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false); - rcu_read_unlock(); - - /* - * if we have any problem in allocating the new station, we reply with a - * DEAUTH frame to tell the other end that we had a problem - */ - if (!sta) { - ieee80211_send_deauth_disassoc(sdata, sdata->u.ibss.bssid, - IEEE80211_STYPE_DEAUTH, - WLAN_REASON_UNSPECIFIED, true, - deauth_frame_buf); - return; - } - /* * IEEE 802.11 standard does not require authentication in IBSS * networks and most implementations do not seem to use it. @@ -492,7 +469,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, prev_rates = sta->sta.supp_rates[band]; /* make sure mandatory rates are always added */ sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); + ieee80211_mandatory_rates(sband); if (sta->sta.supp_rates[band] != prev_rates) { ibss_dbg(sdata, @@ -504,7 +481,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, } else { rcu_read_unlock(); sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, - mgmt->sa, supp_rates, true); + mgmt->sa, supp_rates); } } @@ -512,7 +489,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, set_sta_flag(sta, WLAN_STA_WME); if (sta && elems->ht_operation && elems->ht_cap_elem && - sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 && + sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) { /* we both use HT */ struct ieee80211_ht_cap htcap_ie; struct cfg80211_chan_def chandef; @@ -527,8 +506,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, * fall back to HT20 if we don't use or use * the other extension channel */ - if (cfg80211_get_chandef_type(&chandef) != - sdata->u.ibss.channel_type) + if (chandef.center_freq1 != + sdata->u.ibss.chandef.center_freq1) htcap_ie.cap_info &= cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40); @@ -567,7 +546,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, /* different channel */ if (sdata->u.ibss.fixed_channel && - sdata->u.ibss.channel != cbss->channel) + sdata->u.ibss.chandef.chan != cbss->channel) goto put_bss; /* different SSID */ @@ -608,7 +587,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, ieee80211_sta_join_ibss(sdata, bss); supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, - supp_rates, true); + supp_rates); rcu_read_unlock(); } @@ -624,6 +603,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; + struct ieee80211_supported_band *sband; int band; /* @@ -658,8 +638,9 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, sta->last_rx = jiffies; /* make sure mandatory rates are always added */ + sband = local->hw.wiphy->bands[band]; sta->sta.supp_rates[band] = supp_rates | - ieee80211_mandatory_rates(local, band); + ieee80211_mandatory_rates(sband); spin_lock(&ifibss->incomplete_lock); list_add(&sta->list, &ifibss->incomplete_stations); @@ -673,7 +654,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) int active = 0; struct sta_info *sta; - lockdep_assert_held(&sdata->u.ibss.mtx); + sdata_assert_lock(sdata); rcu_read_lock(); @@ -699,7 +680,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); @@ -730,7 +711,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) u16 capability; int i; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); if (ifibss->fixed_bssid) { memcpy(bssid, ifibss->bssid, ETH_ALEN); @@ -755,7 +736,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) sdata->drop_unencrypted = 0; __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, - ifibss->channel, ifibss->basic_rates, + ifibss->chandef.chan, ifibss->basic_rates, capability, 0, true); } @@ -773,7 +754,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) int active_ibss; u16 capability; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); active_ibss = ieee80211_sta_active_ibss(sdata); ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss); @@ -787,7 +768,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) if (ifibss->fixed_bssid) bssid = ifibss->bssid; if (ifibss->fixed_channel) - chan = ifibss->channel; + chan = ifibss->chandef.chan; if (!is_zero_ether_addr(ifibss->bssid)) bssid = ifibss->bssid; cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid, @@ -843,10 +824,10 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, struct beacon_data *presp; u8 *pos, *end; - lockdep_assert_held(&ifibss->mtx); + sdata_assert_lock(sdata); presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&ifibss->mtx)); + lockdep_is_held(&sdata->wdev.mtx)); if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || len < 24 + 2 || !presp) @@ -930,7 +911,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); - mutex_lock(&sdata->u.ibss.mtx); + sdata_lock(sdata); if (!sdata->u.ibss.ssid_len) goto mgmt_out; /* not ready to merge yet */ @@ -953,7 +934,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, } mgmt_out: - mutex_unlock(&sdata->u.ibss.mtx); + sdata_unlock(sdata); } void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) @@ -961,7 +942,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct sta_info *sta; - mutex_lock(&ifibss->mtx); + sdata_lock(sdata); /* * Work could be scheduled after scan or similar @@ -978,7 +959,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) list_del(&sta->list); spin_unlock_bh(&ifibss->incomplete_lock); - ieee80211_ibss_finish_sta(sta, true); + ieee80211_ibss_finish_sta(sta); rcu_read_unlock(); spin_lock_bh(&ifibss->incomplete_lock); } @@ -997,7 +978,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) } out: - mutex_unlock(&ifibss->mtx); + sdata_unlock(sdata); } static void ieee80211_ibss_timer(unsigned long data) @@ -1014,7 +995,6 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) setup_timer(&ifibss->timer, ieee80211_ibss_timer, (unsigned long) sdata); - mutex_init(&ifibss->mtx); INIT_LIST_HEAD(&ifibss->incomplete_stations); spin_lock_init(&ifibss->incomplete_lock); } @@ -1041,8 +1021,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, { u32 changed = 0; - mutex_lock(&sdata->u.ibss.mtx); - if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); sdata->u.ibss.fixed_bssid = true; @@ -1057,9 +1035,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.beacon_int = params->beacon_interval; - sdata->u.ibss.channel = params->chandef.chan; - sdata->u.ibss.channel_type = - cfg80211_get_chandef_type(¶ms->chandef); + sdata->u.ibss.chandef = params->chandef; sdata->u.ibss.fixed_channel = params->channel_fixed; if (params->ie) { @@ -1075,8 +1051,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; - mutex_unlock(&sdata->u.ibss.mtx); - /* * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is * reserved, but an HT STA shall protect HT transmissions as though @@ -1112,8 +1086,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) struct sta_info *sta; struct beacon_data *presp; - mutex_lock(&sdata->u.ibss.mtx); - active_ibss = ieee80211_sta_active_ibss(sdata); if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { @@ -1122,7 +1094,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; - cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel, + cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY, @@ -1157,7 +1129,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) /* remove beacon */ kfree(sdata->u.ibss.ie); presp = rcu_dereference_protected(ifibss->presp, - lockdep_is_held(&sdata->u.ibss.mtx)); + lockdep_is_held(&sdata->wdev.mtx)); RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); sdata->vif.bss_conf.ibss_joined = false; sdata->vif.bss_conf.ibss_creator = false; @@ -1173,7 +1145,5 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) del_timer_sync(&sdata->u.ibss.timer); - mutex_unlock(&sdata->u.ibss.mtx); - return 0; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 9ca8e3278cc0..8412a303993a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -94,6 +94,7 @@ struct ieee80211_bss { #define IEEE80211_MAX_SUPP_RATES 32 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; size_t supp_rates_len; + struct ieee80211_rate *beacon_rate; /* * During association, we save an ERP value from a probe response so @@ -366,7 +367,7 @@ struct ieee80211_mgd_assoc_data { u8 ssid_len; u8 supp_rates_len; bool wmm, uapsd; - bool have_beacon, need_beacon; + bool need_beacon; bool synced; bool timeout_started; @@ -394,7 +395,6 @@ struct ieee80211_if_managed { bool nullfunc_failed; bool connection_loss; - struct mutex mtx; struct cfg80211_bss *associated; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; @@ -405,6 +405,7 @@ struct ieee80211_if_managed { bool powersave; /* powersave requested for this iface */ bool broken_ap; /* AP is broken -- turn off powersave */ + bool have_beacon; u8 dtim_period; enum ieee80211_smps_mode req_smps, /* requested smps mode */ driver_smps_mode; /* smps mode request */ @@ -488,8 +489,6 @@ struct ieee80211_if_managed { struct ieee80211_if_ibss { struct timer_list timer; - struct mutex mtx; - unsigned long last_scan_completed; u32 basic_rates; @@ -499,14 +498,12 @@ struct ieee80211_if_ibss { bool privacy; bool control_port; - unsigned int auth_frame_registrations; u8 bssid[ETH_ALEN] __aligned(2); u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len, ie_len; u8 *ie; - struct ieee80211_channel *channel; - enum nl80211_channel_type channel_type; + struct cfg80211_chan_def chandef; unsigned long ibss_join_req; /* probe response/beacon for IBSS */ @@ -545,6 +542,7 @@ struct ieee80211_if_mesh { struct timer_list mesh_path_root_timer; unsigned long wrkq_flags; + unsigned long mbss_changed; u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; size_t mesh_id_len; @@ -580,8 +578,6 @@ struct ieee80211_if_mesh { bool accepting_plinks; int num_gates; struct beacon_data __rcu *beacon; - /* just protects beacon updates for now */ - struct mutex mtx; const u8 *ie; u8 ie_len; enum { @@ -778,6 +774,26 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p) return container_of(p, struct ieee80211_sub_if_data, vif); } +static inline void sdata_lock(struct ieee80211_sub_if_data *sdata) + __acquires(&sdata->wdev.mtx) +{ + mutex_lock(&sdata->wdev.mtx); + __acquire(&sdata->wdev.mtx); +} + +static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata) + __releases(&sdata->wdev.mtx) +{ + mutex_unlock(&sdata->wdev.mtx); + __release(&sdata->wdev.mtx); +} + +static inline void +sdata_assert_lock(struct ieee80211_sub_if_data *sdata) +{ + lockdep_assert_held(&sdata->wdev.mtx); +} + static inline enum ieee80211_band ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) { @@ -1507,9 +1523,6 @@ static inline void ieee802_11_parse_elems(const u8 *start, size_t len, ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); } -u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band); - void ieee80211_dynamic_ps_enable_work(struct work_struct *work); void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(unsigned long data); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 98d20c0f6fed..a2a8250e2f84 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -159,7 +159,8 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr) +static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr, + bool check_dup) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *iter; @@ -180,13 +181,16 @@ static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr) ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) | ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8); + if (!check_dup) + return ret; mutex_lock(&local->iflist_mtx); list_for_each_entry(iter, &local->interfaces, list) { if (iter == sdata) continue; - if (iter->vif.type == NL80211_IFTYPE_MONITOR) + if (iter->vif.type == NL80211_IFTYPE_MONITOR && + !(iter->u.mntr_flags & MONITOR_FLAG_ACTIVE)) continue; m = iter->vif.addr; @@ -208,12 +212,17 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sockaddr *sa = addr; + bool check_dup = true; int ret; if (ieee80211_sdata_running(sdata)) return -EBUSY; - ret = ieee80211_verify_mac(sdata, sa->sa_data); + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && + !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + check_dup = false; + + ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup); if (ret) return ret; @@ -545,7 +554,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) break; } - if (local->monitors == 0 && local->open_count == 0) { + if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) { + res = drv_add_interface(local, sdata); + if (res) + goto err_stop; + } else if (local->monitors == 0 && local->open_count == 0) { res = ieee80211_add_virtual_monitor(local); if (res) goto err_stop; @@ -923,7 +936,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, mutex_lock(&local->mtx); ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); - break; + + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + break; + + /* fall through */ default: if (going_down) drv_remove_interface(local, sdata); @@ -1072,7 +1089,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_start_xmit = ieee80211_monitor_start_xmit, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_change_mtu = ieee80211_change_mtu, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_monitor_select_queue, }; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 67059b88fea5..e39cc91d0cf1 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -335,12 +335,12 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - key->conf.iv_len = WEP_IV_LEN; - key->conf.icv_len = WEP_ICV_LEN; + key->conf.iv_len = IEEE80211_WEP_IV_LEN; + key->conf.icv_len = IEEE80211_WEP_ICV_LEN; break; case WLAN_CIPHER_SUITE_TKIP: - key->conf.iv_len = TKIP_IV_LEN; - key->conf.icv_len = TKIP_ICV_LEN; + key->conf.iv_len = IEEE80211_TKIP_IV_LEN; + key->conf.icv_len = IEEE80211_TKIP_ICV_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { key->u.tkip.rx[i].iv32 = @@ -352,13 +352,13 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, spin_lock_init(&key->u.tkip.txlock); break; case WLAN_CIPHER_SUITE_CCMP: - key->conf.iv_len = CCMP_HDR_LEN; - key->conf.icv_len = CCMP_MIC_LEN; + key->conf.iv_len = IEEE80211_CCMP_HDR_LEN; + key->conf.icv_len = IEEE80211_CCMP_MIC_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) - for (j = 0; j < CCMP_PN_LEN; j++) + for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = - seq[CCMP_PN_LEN - j - 1]; + seq[IEEE80211_CCMP_PN_LEN - j - 1]; } /* * Initialize AES key state here as an optimization so that @@ -375,9 +375,9 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, key->conf.iv_len = 0; key->conf.icv_len = sizeof(struct ieee80211_mmie); if (seq) - for (j = 0; j < CMAC_PN_LEN; j++) + for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) key->u.aes_cmac.rx_pn[j] = - seq[CMAC_PN_LEN - j - 1]; + seq[IEEE80211_CMAC_PN_LEN - j - 1]; /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. @@ -740,13 +740,13 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; - memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN); + memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; - memcpy(seq->aes_cmac.pn, pn, CMAC_PN_LEN); + memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN); break; } } diff --git a/net/mac80211/key.h b/net/mac80211/key.h index e8de3e6d7804..036d57e76a5e 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -19,17 +19,6 @@ #define NUM_DEFAULT_KEYS 4 #define NUM_DEFAULT_MGMT_KEYS 2 -#define WEP_IV_LEN 4 -#define WEP_ICV_LEN 4 -#define ALG_CCMP_KEY_LEN 16 -#define CCMP_HDR_LEN 8 -#define CCMP_MIC_LEN 8 -#define CCMP_TK_LEN 16 -#define CCMP_PN_LEN 6 -#define TKIP_IV_LEN 8 -#define TKIP_ICV_LEN 4 -#define CMAC_PN_LEN 6 - struct ieee80211_local; struct ieee80211_sub_if_data; struct sta_info; @@ -93,13 +82,13 @@ struct ieee80211_key { * frames and the last counter is used with Robust * Management frames. */ - u8 rx_pn[IEEE80211_NUM_TIDS + 1][CCMP_PN_LEN]; + u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_CCMP_PN_LEN]; struct crypto_cipher *tfm; u32 replays; /* dot11RSNAStatsCCMPReplays */ } ccmp; struct { atomic64_t tx_pn; - u8 rx_pn[CMAC_PN_LEN]; + u8 rx_pn[IEEE80211_CMAC_PN_LEN]; struct crypto_cipher *tfm; u32 replays; /* dot11RSNAStatsCMACReplays */ u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 8a7bfc47d577..626c83c042d7 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -331,7 +331,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, return NOTIFY_DONE; ifmgd = &sdata->u.mgd; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); /* Copy the addresses to the bss_conf list */ ifa = idev->ifa_list; @@ -349,7 +349,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_ARP_FILTER); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return NOTIFY_DONE; } @@ -686,8 +686,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) return -EINVAL; #ifdef CONFIG_PM - if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) && - (!local->ops->suspend || !local->ops->resume)) + if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; #endif diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6952760881c8..447f41bbe744 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -271,8 +271,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, *pos++ = ifmsh->mesh_auth_id; /* Mesh Formation Info - number of neighbors */ neighbors = atomic_read(&ifmsh->estab_plinks); - /* Number of neighbor mesh STAs or 15 whichever is smaller */ - neighbors = (neighbors > 15) ? 15 : neighbors; + neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); *pos++ = neighbors << 1; /* Mesh capability */ *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING; @@ -417,7 +416,9 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[band]; if (!sband->ht_cap.ht_supported || - sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 || + sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10) return 0; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) @@ -573,7 +574,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; u32 changed; - ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); + ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ); mesh_path_expire(sdata); changed = mesh_accept_plinks_update(sdata); @@ -697,38 +698,38 @@ out_free: } static int -ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh) +ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata) { struct beacon_data *old_bcn; int ret; - mutex_lock(&ifmsh->mtx); - - old_bcn = rcu_dereference_protected(ifmsh->beacon, - lockdep_is_held(&ifmsh->mtx)); - ret = ieee80211_mesh_build_beacon(ifmsh); + old_bcn = rcu_dereference_protected(sdata->u.mesh.beacon, + lockdep_is_held(&sdata->wdev.mtx)); + ret = ieee80211_mesh_build_beacon(&sdata->u.mesh); if (ret) /* just reuse old beacon */ - goto out; + return ret; if (old_bcn) kfree_rcu(old_bcn, rcu_head); -out: - mutex_unlock(&ifmsh->mtx); - return ret; + return 0; } void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, u32 changed) { - if (sdata->vif.bss_conf.enable_beacon && - (changed & (BSS_CHANGED_BEACON | - BSS_CHANGED_HT | - BSS_CHANGED_BASIC_RATES | - BSS_CHANGED_BEACON_INT))) - if (ieee80211_mesh_rebuild_beacon(&sdata->u.mesh)) - return; - ieee80211_bss_info_change_notify(sdata, changed); + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + unsigned long bits = changed; + u32 bit; + + if (!bits) + return; + + /* if we race with running work, worst case this work becomes a noop */ + for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE) + set_bit(bit, &ifmsh->mbss_changed); + set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags); + ieee80211_queue_work(&sdata->local->hw, &sdata->work); } int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) @@ -740,7 +741,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT; - enum ieee80211_band band = ieee80211_get_sdata_band(sdata); local->fif_other_bss++; /* mesh ifaces must set allmulti to forward mcast traffic */ @@ -748,7 +748,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ieee80211_configure_filter(local); ifmsh->mesh_cc_id = 0; /* Disabled */ - ifmsh->mesh_auth_id = 0; /* Disabled */ /* register sync ops from extensible synchronization framework */ ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id); ifmsh->adjusting_tbtt = false; @@ -759,8 +758,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) sdata->vif.bss_conf.ht_operation_mode = ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.enable_beacon = true; - sdata->vif.bss_conf.basic_rates = - ieee80211_mandatory_rates(local, band); changed |= ieee80211_mps_local_status_update(sdata); @@ -788,12 +785,10 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) sdata->vif.bss_conf.enable_beacon = false; clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - mutex_lock(&ifmsh->mtx); bcn = rcu_dereference_protected(ifmsh->beacon, - lockdep_is_held(&ifmsh->mtx)); + lockdep_is_held(&sdata->wdev.mtx)); rcu_assign_pointer(ifmsh->beacon, NULL); kfree_rcu(bcn, rcu_head); - mutex_unlock(&ifmsh->mtx); /* flush STAs and mpaths on this iface */ sta_info_flush(sdata); @@ -806,14 +801,10 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) del_timer_sync(&sdata->u.mesh.housekeeping_timer); del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); del_timer_sync(&sdata->u.mesh.mesh_path_timer); - /* - * If the timer fired while we waited for it, it will have - * requeued the work. Now the work will be running again - * but will not rearm the timer again because it checks - * whether the interface is running, which, at this point, - * it no longer is. - */ - cancel_work_sync(&sdata->work); + + /* clear any mesh work (for next join) we may have accrued */ + ifmsh->wrkq_flags = 0; + ifmsh->mbss_changed = 0; local->fif_other_bss--; atomic_dec(&local->iff_allmultis); @@ -954,6 +945,12 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt; u16 stype; + sdata_lock(sdata); + + /* mesh already went down */ + if (!sdata->wdev.mesh_id_len) + goto out; + rx_status = IEEE80211_SKB_RXCB(skb); mgmt = (struct ieee80211_mgmt *) skb->data; stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; @@ -971,12 +968,42 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); break; } +out: + sdata_unlock(sdata); +} + +static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + u32 bit, changed = 0; + + for_each_set_bit(bit, &ifmsh->mbss_changed, + sizeof(changed) * BITS_PER_BYTE) { + clear_bit(bit, &ifmsh->mbss_changed); + changed |= BIT(bit); + } + + if (sdata->vif.bss_conf.enable_beacon && + (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT))) + if (ieee80211_mesh_rebuild_beacon(sdata)) + return; + + ieee80211_bss_info_change_notify(sdata, changed); } void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + sdata_lock(sdata); + + /* mesh already went down */ + if (!sdata->wdev.mesh_id_len) + goto out; + if (ifmsh->preq_queue_len && time_after(jiffies, ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) @@ -996,6 +1023,11 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags)) mesh_sync_adjust_tbtt(sdata); + + if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags)) + mesh_bss_info_changed(sdata); +out: + sdata_unlock(sdata); } void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) @@ -1041,7 +1073,6 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) spin_lock_init(&ifmsh->mesh_preq_queue_lock); spin_lock_init(&ifmsh->sync_offset_lock); RCU_INIT_POINTER(ifmsh->beacon, NULL); - mutex_init(&ifmsh->mtx); sdata->vif.bss_conf.bssid = zero_addr; } diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index da158774eebb..2bc7fd2f787d 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -58,6 +58,7 @@ enum mesh_path_flags { * @MESH_WORK_ROOT: the mesh root station needs to send a frame * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other * mesh nodes + * @MESH_WORK_MBSS_CHANGED: rebuild beacon and notify driver of BSS changes */ enum mesh_deferred_task_flags { MESH_WORK_HOUSEKEEPING, @@ -65,6 +66,7 @@ enum mesh_deferred_task_flags { MESH_WORK_GROW_MPP_TABLE, MESH_WORK_ROOT, MESH_WORK_DRIFT_ADJUST, + MESH_WORK_MBSS_CHANGED, }; /** @@ -188,7 +190,6 @@ struct mesh_rmc { u32 idx_mask; }; -#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) #define MESH_PATH_EXPIRE (600 * HZ) @@ -324,14 +325,14 @@ static inline u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.estab_plinks); - return mesh_accept_plinks_update(sdata); + return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.estab_plinks); - return mesh_accept_plinks_update(sdata); + return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 09bebed99416..02c05fa15c20 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -154,8 +154,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) u16 ht_opmode; bool non_ht_sta = false, ht20_sta = false; - if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) + switch (sdata->vif.bss_conf.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: return 0; + default: + break; + } rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 741448b30825..34d54fe81483 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -91,41 +91,6 @@ MODULE_PARM_DESC(probe_wait_ms, #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 /* - * All cfg80211 functions have to be called outside a locked - * section so that they can acquire a lock themselves... This - * is much simpler than queuing up things in cfg80211, but we - * do need some indirection for that here. - */ -enum rx_mgmt_action { - /* no action required */ - RX_MGMT_NONE, - - /* caller must call cfg80211_send_deauth() */ - RX_MGMT_CFG80211_DEAUTH, - - /* caller must call cfg80211_send_disassoc() */ - RX_MGMT_CFG80211_DISASSOC, - - /* caller must call cfg80211_send_rx_auth() */ - RX_MGMT_CFG80211_RX_AUTH, - - /* caller must call cfg80211_send_rx_assoc() */ - RX_MGMT_CFG80211_RX_ASSOC, - - /* caller must call cfg80211_send_assoc_timeout() */ - RX_MGMT_CFG80211_ASSOC_TIMEOUT, - - /* used when a processed beacon causes a deauth */ - RX_MGMT_CFG80211_TX_DEAUTH, -}; - -/* utils */ -static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) -{ - lockdep_assert_held(&ifmgd->mtx); -} - -/* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was @@ -135,13 +100,14 @@ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) * has happened -- the work that runs from this timer will * do that. */ -static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout) +static void run_again(struct ieee80211_sub_if_data *sdata, + unsigned long timeout) { - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); - if (!timer_pending(&ifmgd->timer) || - time_before(timeout, ifmgd->timer.expires)) - mod_timer(&ifmgd->timer, timeout); + if (!timer_pending(&sdata->u.mgd.timer) || + time_before(timeout, sdata->u.mgd.timer.expires)) + mod_timer(&sdata->u.mgd.timer, timeout); } void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) @@ -224,6 +190,12 @@ static u32 chandef_downgrade(struct cfg80211_chan_def *c) c->width = NL80211_CHAN_WIDTH_20_NOHT; ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + WARN_ON_ONCE(1); + /* keep c->width */ + ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; + break; } WARN_ON_ONCE(!cfg80211_chandef_valid(c)); @@ -652,7 +624,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) struct ieee80211_channel *chan; u32 rates = 0; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); @@ -914,6 +886,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local, IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_INTFL_OFFCHAN_TX_OK; + + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE; @@ -962,7 +938,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) if (!ieee80211_sdata_running(sdata)) return; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) goto out; @@ -985,7 +961,7 @@ static void ieee80211_chswitch_work(struct work_struct *work) IEEE80211_QUEUE_STOP_REASON_CSA); out: ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) @@ -1036,7 +1012,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, const struct ieee80211_ht_operation *ht_oper; int secondary_channel_offset = -1; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (!cbss) return; @@ -1390,6 +1366,9 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata) IEEE80211_STA_CONNECTION_POLL)) return false; + if (!mgd->have_beacon) + return false; + rcu_read_lock(); sta = sta_info_get(sdata, mgd->bssid); if (sta) @@ -1798,7 +1777,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, ieee80211_led_assoc(local, 1); - if (sdata->u.mgd.assoc_data->have_beacon) { + if (sdata->u.mgd.have_beacon) { /* * If the AP is buggy we may get here with no DTIM period * known, so assume it's 1 which is the only safe assumption @@ -1806,8 +1785,10 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, * probably just won't work at all. */ bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1; - bss_info_changed |= BSS_CHANGED_DTIM_PERIOD; + bss_conf->beacon_rate = bss->beacon_rate; + bss_info_changed |= BSS_CHANGED_BEACON_INFO; } else { + bss_conf->beacon_rate = NULL; bss_conf->dtim_period = 0; } @@ -1842,7 +1823,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; u32 changed = 0; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (WARN_ON_ONCE(tx && !frame_buf)) return; @@ -1930,6 +1911,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.chswitch_timer); sdata->vif.bss_conf.dtim_period = 0; + sdata->vif.bss_conf.beacon_rate = NULL; + + ifmgd->have_beacon = false; ifmgd->flags = 0; ieee80211_vif_release_channel(sdata); @@ -2051,7 +2035,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) } ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ieee80211_flush_queues(sdata->local, sdata); } @@ -2065,7 +2049,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, if (!ieee80211_sdata_running(sdata)) return; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) goto out; @@ -2119,7 +2103,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); out: - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, @@ -2135,7 +2119,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (ifmgd->associated) cbss = ifmgd->associated; @@ -2168,9 +2152,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } @@ -2181,13 +2165,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); - mutex_unlock(&ifmgd->mtx); - /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ - cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); + sdata_unlock(sdata); } static void ieee80211_beacon_connection_loss_work(struct work_struct *work) @@ -2254,7 +2235,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, { struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); if (!assoc) { sta_info_destroy_addr(sdata, auth_data->bss->bssid); @@ -2295,27 +2276,26 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata, auth_data->key_idx, tx_flags); } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u16 auth_alg, auth_transaction, status_code; struct sta_info *sta; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (len < 24 + 6) - return RX_MGMT_NONE; + return; if (!ifmgd->auth_data || ifmgd->auth_data->done) - return RX_MGMT_NONE; + return; memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN); if (!ether_addr_equal(bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); @@ -2327,14 +2307,15 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, ifmgd->auth_data->expected_transaction); - return RX_MGMT_NONE; + return; } if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); + return; } switch (ifmgd->auth_data->algorithm) { @@ -2347,20 +2328,20 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, if (ifmgd->auth_data->expected_transaction != 4) { ieee80211_auth_challenge(sdata, mgmt, len); /* need another frame */ - return RX_MGMT_NONE; + return; } break; default: WARN_ONCE(1, "invalid auth alg %d", ifmgd->auth_data->algorithm); - return RX_MGMT_NONE; + return; } sdata_info(sdata, "authenticated\n"); ifmgd->auth_data->done = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && ifmgd->auth_data->expected_transaction != 2) { @@ -2368,7 +2349,8 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, * Report auth frame to user space for processing since another * round of Authentication frames is still needed. */ - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); + return; } /* move station state to auth */ @@ -2384,30 +2366,29 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, } mutex_unlock(&sdata->local->sta_mtx); - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); + return; out_err: mutex_unlock(&sdata->local->sta_mtx); /* ignore frame -- wait for timeout */ - return RX_MGMT_NONE; } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *bssid = NULL; u16 reason_code; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (len < 24 + 2) - return RX_MGMT_NONE; + return; if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; bssid = ifmgd->associated->bssid; @@ -2418,25 +2399,24 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - return RX_MGMT_CFG80211_DEAUTH; + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (len < 24 + 2) - return RX_MGMT_NONE; + return; if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); @@ -2445,7 +2425,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - return RX_MGMT_CFG80211_DISASSOC; + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); } static void ieee80211_get_rates(struct ieee80211_supported_band *sband, @@ -2495,7 +2475,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); if (!assoc) { sta_info_destroy_addr(sdata, assoc_data->bss->bssid); @@ -2749,10 +2729,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, return ret; } -static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - struct cfg80211_bss **bss) +static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; @@ -2760,13 +2739,14 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems elems; u8 *pos; bool reassoc; + struct cfg80211_bss *bss; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (!assoc_data) - return RX_MGMT_NONE; + return; if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return; /* * AssocResp and ReassocResp have identical structure, so process both @@ -2774,7 +2754,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, */ if (len < 24 + 6) - return RX_MGMT_NONE; + return; reassoc = ieee80211_is_reassoc_req(mgmt->frame_control); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); @@ -2801,22 +2781,23 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, assoc_data->timeout = jiffies + msecs_to_jiffies(ms); assoc_data->timeout_started = true; if (ms > IEEE80211_ASSOC_TIMEOUT) - run_again(ifmgd, assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, assoc_data->timeout); + return; } - *bss = assoc_data->bss; + bss = assoc_data->bss; if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied association (code=%d)\n", mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false); } else { - if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { + if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false); - cfg80211_put_bss(sdata->local->hw.wiphy, *bss); - return RX_MGMT_CFG80211_ASSOC_TIMEOUT; + cfg80211_put_bss(sdata->local->hw.wiphy, bss); + cfg80211_assoc_timeout(sdata->dev, mgmt->bssid); + return; } sdata_info(sdata, "associated\n"); @@ -2828,7 +2809,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ieee80211_destroy_assoc_data(sdata, true); } - return RX_MGMT_CFG80211_RX_ASSOC; + cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@ -2840,23 +2821,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, int freq; struct ieee80211_bss *bss; struct ieee80211_channel *channel; - bool need_ps = false; - - lockdep_assert_held(&sdata->u.mgd.mtx); - if ((sdata->u.mgd.associated && - ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) || - (sdata->u.mgd.assoc_data && - ether_addr_equal(mgmt->bssid, - sdata->u.mgd.assoc_data->bss->bssid))) { - /* not previously set so we may need to recalc */ - need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period; - - if (elems->tim && !elems->parse_error) { - const struct ieee80211_tim_ie *tim_ie = elems->tim; - sdata->u.mgd.dtim_period = tim_ie->dtim_period; - } - } + sdata_assert_lock(sdata); if (elems->ds_params) freq = ieee80211_channel_to_frequency(elems->ds_params[0], @@ -2871,19 +2837,15 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, channel); - if (bss) + if (bss) { ieee80211_rx_bss_put(local, bss); + sdata->vif.bss_conf.beacon_rate = bss->beacon_rate; + } if (!sdata->u.mgd.associated || !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) return; - if (need_ps) { - mutex_lock(&local->iflist_mtx); - ieee80211_recalc_ps(local, -1); - mutex_unlock(&local->iflist_mtx); - } - ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, elems, true); @@ -2901,7 +2863,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ifmgd = &sdata->u.mgd; - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata); if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ @@ -2926,7 +2888,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, ifmgd->auth_data->tries = 0; ifmgd->auth_data->timeout = jiffies; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } } @@ -2951,10 +2913,9 @@ static const u64 care_about_ies = (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_OPERATION); -static enum rx_mgmt_action -ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - u8 *deauth_buf, struct ieee80211_rx_status *rx_status) +static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; @@ -2969,24 +2930,25 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, u8 erp_value = 0; u32 ncrc; u8 *bssid; + u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); /* Process beacon from the current BSS */ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; if (baselen > len) - return RX_MGMT_NONE; + return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; } if (rx_status->freq != chanctx_conf->def.chan->center_freq) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); @@ -2997,7 +2959,11 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, len - baselen, false, &elems); ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems); - ifmgd->assoc_data->have_beacon = true; + if (elems.tim && !elems.parse_error) { + const struct ieee80211_tim_ie *tim_ie = elems.tim; + ifmgd->dtim_period = tim_ie->dtim_period; + } + ifmgd->have_beacon = true; ifmgd->assoc_data->need_beacon = false; if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) { sdata->vif.bss_conf.sync_tsf = @@ -3013,13 +2979,13 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, /* continue assoc process */ ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; - run_again(ifmgd, ifmgd->assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, ifmgd->assoc_data->timeout); + return; } if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; bssid = ifmgd->associated->bssid; /* Track average RSSI from the Beacon frames of the current AP */ @@ -3165,7 +3131,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, } if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) - return RX_MGMT_NONE; + return; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true; @@ -3179,7 +3145,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, * If we haven't had a beacon before, tell the driver about the * DTIM period (and beacon timing if desired) now. */ - if (!bss_conf->dtim_period) { + if (!ifmgd->have_beacon) { /* a few bogus AP send dtim_period = 0 or no TIM IE */ if (elems.tim) bss_conf->dtim_period = elems.tim->dtim_period ?: 1; @@ -3198,7 +3164,14 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.sync_dtim_count = 0; } - changed |= BSS_CHANGED_DTIM_PERIOD; + changed |= BSS_CHANGED_BEACON_INFO; + ifmgd->have_beacon = true; + + mutex_lock(&local->iflist_mtx); + ieee80211_recalc_ps(local, -1); + mutex_unlock(&local->iflist_mtx); + + ieee80211_recalc_ps_vif(sdata); } if (elems.erp_info) { @@ -3220,7 +3193,9 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); - return RX_MGMT_CFG80211_TX_DEAUTH; + cfg80211_tx_mlme_mgmt(sdata->dev, deauth_buf, + sizeof(deauth_buf)); + return; } if (sta && elems.opmode_notif) @@ -3237,19 +3212,13 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, elems.pwr_constr_elem); ieee80211_bss_info_change_notify(sdata, changed); - - return RX_MGMT_NONE; } void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; - struct cfg80211_bss *bss = NULL; - enum rx_mgmt_action rma = RX_MGMT_NONE; - u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; u16 fc; struct ieee802_11_elems elems; int ies_len; @@ -3258,28 +3227,27 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: - rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, - deauth_buf, rx_status); + ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(sdata, skb); break; case IEEE80211_STYPE_AUTH: - rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: - rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: - rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: - rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss); + ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { @@ -3325,34 +3293,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, } break; } - mutex_unlock(&ifmgd->mtx); - - switch (rma) { - case RX_MGMT_NONE: - /* no action */ - break; - case RX_MGMT_CFG80211_DEAUTH: - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_DISASSOC: - cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_AUTH: - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_ASSOC: - cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_ASSOC_TIMEOUT: - cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); - break; - case RX_MGMT_CFG80211_TX_DEAUTH: - cfg80211_send_deauth(sdata->dev, deauth_buf, - sizeof(deauth_buf)); - break; - default: - WARN(1, "unexpected: %d", rma); - } + sdata_unlock(sdata); } static void ieee80211_sta_timer(unsigned long data) @@ -3366,20 +3307,13 @@ static void ieee80211_sta_timer(unsigned long data) static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 *bssid, u8 reason, bool tx) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); - mutex_unlock(&ifmgd->mtx); - - /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ - cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); - mutex_lock(&ifmgd->mtx); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); } static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) @@ -3389,7 +3323,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0; - lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata); if (WARN_ON_ONCE(!auth_data)) return -EINVAL; @@ -3462,7 +3396,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, auth_data->timeout); + run_again(sdata, auth_data->timeout); } else { auth_data->timeout_started = false; } @@ -3475,7 +3409,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; struct ieee80211_local *local = sdata->local; - lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata); assoc_data->tries++; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { @@ -3499,7 +3433,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; assoc_data->timeout_started = true; - run_again(&sdata->u.mgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout); } else { assoc_data->timeout_started = false; } @@ -3524,7 +3458,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (ifmgd->status_received) { __le16 fc = ifmgd->status_fc; @@ -3536,7 +3470,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (status_acked) { ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; } @@ -3547,7 +3481,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (status_acked) { ifmgd->assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout); } else { ifmgd->assoc_data->timeout = jiffies - 1; } @@ -3570,17 +3504,14 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ieee80211_destroy_auth_data(sdata, false); - mutex_unlock(&ifmgd->mtx); - cfg80211_send_auth_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); + cfg80211_auth_timeout(sdata->dev, bssid); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { - if ((ifmgd->assoc_data->need_beacon && - !ifmgd->assoc_data->have_beacon) || + if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) || ieee80211_do_assoc(sdata)) { u8 bssid[ETH_ALEN]; @@ -3588,12 +3519,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ieee80211_destroy_assoc_data(sdata, false); - mutex_unlock(&ifmgd->mtx); - cfg80211_send_assoc_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); + cfg80211_assoc_timeout(sdata->dev, bssid); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout); if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL) && @@ -3627,7 +3556,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) false); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { mlme_dbg(sdata, "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", @@ -3656,7 +3585,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) } } - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } static void ieee80211_sta_bcn_mon_timer(unsigned long data) @@ -3717,9 +3646,9 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } @@ -3730,10 +3659,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) ifmgd->associated->bssid, WLAN_REASON_UNSPECIFIED, true); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } #endif @@ -3765,8 +3694,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; ifmgd->p2p_noa_index = -1; - mutex_init(&ifmgd->mtx); - if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else @@ -3923,6 +3850,12 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, */ ret = ieee80211_vif_use_channel(sdata, &chandef, IEEE80211_CHANCTX_SHARED); + + /* don't downgrade for 5 and 10 MHz channels, though. */ + if (chandef.width == NL80211_CHAN_WIDTH_5 || + chandef.width == NL80211_CHAN_WIDTH_10) + return ret; + while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) { ifmgd->flags |= chandef_downgrade(&chandef); ret = ieee80211_vif_use_channel(sdata, &chandef, @@ -4122,8 +4055,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* try to authenticate/probe */ - mutex_lock(&ifmgd->mtx); - if ((ifmgd->auth_data && !ifmgd->auth_data->done) || ifmgd->assoc_data) { err = -EBUSY; @@ -4143,8 +4074,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, WLAN_REASON_UNSPECIFIED, false, frame_buf); - __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + sizeof(frame_buf)); } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -4161,8 +4092,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, /* hold our own reference */ cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); - err = 0; - goto out_unlock; + return 0; err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); @@ -4170,9 +4100,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, ifmgd->auth_data = NULL; err_free: kfree(auth_data); - out_unlock: - mutex_unlock(&ifmgd->mtx); - return err; } @@ -4203,8 +4130,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, assoc_data->ssid_len = ssidie[1]; rcu_read_unlock(); - mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; @@ -4212,8 +4137,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, WLAN_REASON_UNSPECIFIED, false, frame_buf); - __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + sizeof(frame_buf)); } if (ifmgd->auth_data && !ifmgd->auth_data->done) { @@ -4360,6 +4285,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->assoc_data = assoc_data; ifmgd->dtim_period = 0; + ifmgd->have_beacon = false; err = ieee80211_prep_connection(sdata, req->bss, true); if (err) @@ -4391,7 +4317,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->dtim_period = tim->dtim_period; dtim_count = tim->dtim_count; } - assoc_data->have_beacon = true; + ifmgd->have_beacon = true; assoc_data->timeout = jiffies; assoc_data->timeout_started = true; @@ -4407,7 +4333,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, } rcu_read_unlock(); - run_again(ifmgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout); if (bss->corrupt_data) { char *corrupt_type = "data"; @@ -4423,17 +4349,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, corrupt_type); } - err = 0; - goto out; + return 0; err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); - out: - mutex_unlock(&ifmgd->mtx); - return err; } @@ -4445,8 +4367,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, bool tx = !req->local_state_change; bool report_frame = false; - mutex_lock(&ifmgd->mtx); - sdata_info(sdata, "deauthenticating from %pM by local choice (reason=%d)\n", req->bssid, req->reason_code); @@ -4458,7 +4378,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->reason_code, tx, frame_buf); ieee80211_destroy_auth_data(sdata, false); - mutex_unlock(&ifmgd->mtx); report_frame = true; goto out; @@ -4470,12 +4389,11 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->reason_code, tx, frame_buf); report_frame = true; } - mutex_unlock(&ifmgd->mtx); out: if (report_frame) - __cfg80211_send_deauth(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); return 0; } @@ -4487,18 +4405,14 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, u8 bssid[ETH_ALEN]; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - mutex_lock(&ifmgd->mtx); - /* * cfg80211 should catch this ... but it's racy since * we can receive a disassoc frame, process it, hand it * to cfg80211 while that's in a locked section already * trying to tell us that the user wants to disconnect. */ - if (ifmgd->associated != req->bss) { - mutex_unlock(&ifmgd->mtx); + if (ifmgd->associated != req->bss) return -ENOLINK; - } sdata_info(sdata, "disassociating from %pM by local choice (reason=%d)\n", @@ -4508,10 +4422,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, req->reason_code, !req->local_state_change, frame_buf); - mutex_unlock(&ifmgd->mtx); - __cfg80211_send_disassoc(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN); return 0; } @@ -4531,13 +4444,13 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) cancel_work_sync(&ifmgd->csa_connection_drop_work); cancel_work_sync(&ifmgd->chswitch_work); - mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); del_timer_sync(&ifmgd->timer); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index a02bef35b134..30d58d2d13e2 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -397,8 +397,14 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, return; /* if HT BSS, and we handle a data frame, also try HT rates */ - if (chan_width == NL80211_CHAN_WIDTH_20_NOHT) + switch (chan_width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: return; + default: + break; + } alt_rate.idx = 0; /* keep protection flags */ diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8e2952620256..23dbcfc69b3b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -258,6 +258,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, pos += 2; if (status->flag & RX_FLAG_HT) { + unsigned int stbc; + rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); *pos++ = local->hw.radiotap_mcs_details; *pos = 0; @@ -267,6 +269,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, *pos |= IEEE80211_RADIOTAP_MCS_BW_40; if (status->flag & RX_FLAG_HT_GF) *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; + stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; + *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; pos++; *pos++ = status->rate_idx; } @@ -1372,6 +1376,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int i; if (!sta) return RX_CONTINUE; @@ -1422,6 +1427,19 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) ewma_add(&sta->avg_signal, -status->signal); } + if (status->chains) { + sta->chains = status->chains; + for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { + int signal = status->chain_signal[i]; + + if (!(status->chains & BIT(i))) + continue; + + sta->chain_signal_last[i] = signal; + ewma_add(&sta->chain_signal_avg[i], -signal); + } + } + /* * Change STA power saving mode only at the end of a frame * exchange sequence. @@ -1608,7 +1626,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) entry->ccmp = 1; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], - CCMP_PN_LEN); + IEEE80211_CCMP_PN_LEN); } return RX_QUEUED; } @@ -1627,21 +1645,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) * (IEEE 802.11i, 8.3.3.4.5) */ if (entry->ccmp) { int i; - u8 pn[CCMP_PN_LEN], *rpn; + u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; int queue; if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) return RX_DROP_UNUSABLE; - memcpy(pn, entry->last_pn, CCMP_PN_LEN); - for (i = CCMP_PN_LEN - 1; i >= 0; i--) { + memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); + for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } queue = rx->security_idx; rpn = rx->key->u.ccmp.rx_pn[queue]; - if (memcmp(pn, rpn, CCMP_PN_LEN)) + if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) return RX_DROP_UNUSABLE; - memcpy(entry->last_pn, pn, CCMP_PN_LEN); + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); } skb_pull(rx->skb, ieee80211_hdrlen(fc)); @@ -1729,27 +1747,21 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) if (unlikely(!ieee80211_has_protected(fc) && ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && rx->key)) { - if (ieee80211_is_deauth(fc)) - cfg80211_send_unprot_deauth(rx->sdata->dev, - rx->skb->data, - rx->skb->len); - else if (ieee80211_is_disassoc(fc)) - cfg80211_send_unprot_disassoc(rx->sdata->dev, - rx->skb->data, - rx->skb->len); + if (ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc)) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); return -EACCES; } /* BIP does not use Protected field, so need to check MMIE */ if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && ieee80211_get_mmie_keyidx(rx->skb) < 0)) { - if (ieee80211_is_deauth(fc)) - cfg80211_send_unprot_deauth(rx->sdata->dev, - rx->skb->data, - rx->skb->len); - else if (ieee80211_is_disassoc(fc)) - cfg80211_send_unprot_disassoc(rx->sdata->dev, - rx->skb->data, - rx->skb->len); + if (ieee80211_is_deauth(fc) || + ieee80211_is_disassoc(fc)) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + rx->skb->data, + rx->skb->len); return -EACCES; } /* diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 99b103921a4b..1b122a79b0d8 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -140,6 +140,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local, bss->valid_data |= IEEE80211_BSS_VALID_WMM; } + if (beacon) { + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[rx_status->band]; + if (!(rx_status->flag & RX_FLAG_HT) && + !(rx_status->flag & RX_FLAG_VHT)) + bss->beacon_rate = + &sband->bitrates[rx_status->rate_idx]; + } + return bss; } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 11216bc13b27..aaf68d297221 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -149,6 +149,7 @@ static void cleanup_single_sta(struct sta_info *sta) * directly by station destruction. */ for (i = 0; i < IEEE80211_NUM_TIDS; i++) { + kfree(sta->ampdu_mlme.tid_start_tx[i]); tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); if (!tid_tx) continue; @@ -358,6 +359,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, do_posix_clock_monotonic_gettime(&uptime); sta->last_connected = uptime.tv_sec; ewma_init(&sta->avg_signal, 1024, 8); + for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) + ewma_init(&sta->chain_signal_avg[i], 1024, 8); if (sta_prepare_rate_control(local, sta, gfp)) { kfree(sta); @@ -1130,6 +1133,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, * ends the poll/service period. */ info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_PS_RESPONSE | IEEE80211_TX_STATUS_EOSP | IEEE80211_TX_CTL_REQ_TX_STATUS; @@ -1267,7 +1271,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, * STA may still remain is PS mode after this frame * exchange. */ - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_PS_RESPONSE; /* * Use MoreData flag to indicate whether there are diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index adc30045f99e..4208dbd5861f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -203,6 +203,7 @@ struct tid_ampdu_rx { * driver requested to close until the work for it runs * @mtx: mutex to protect all TX data (except non-NULL assignments * to tid_tx[idx], which are protected by the sta spinlock) + * tid_start_tx is also protected by sta->lock. */ struct sta_ampdu_mlme { struct mutex mtx; @@ -297,6 +298,9 @@ struct sta_ampdu_mlme { * @rcu_head: RCU head used for freeing this station struct * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, * taken from HT/VHT capabilities or VHT operating mode notification + * @chains: chains ever used for RX from this station + * @chain_signal_last: last signal (per chain) + * @chain_signal_avg: signal average (per chain) */ struct sta_info { /* General information, mostly static */ @@ -344,6 +348,11 @@ struct sta_info { int last_signal; struct ewma avg_signal; int last_ack_signal; + + u8 chains; + s8 chain_signal_last[IEEE80211_MAX_CHAINS]; + struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS]; + /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 9972e07a2f96..4105d0ca963e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -398,13 +398,14 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) if (ieee80211_has_order(hdr->frame_control)) return TX_CONTINUE; + if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) + info->hw_queue = tx->sdata->vif.cab_queue; + /* no stations in PS mode */ if (!atomic_read(&ps->num_sta_ps)) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; - if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) - info->hw_queue = tx->sdata->vif.cab_queue; /* device releases frame after DTIM beacon */ if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) @@ -1789,12 +1790,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: - if (!sdata->u.mesh.mshcfg.dot11MeshTTL) { - /* Do not send frames with mesh_ttl == 0 */ - sdata->u.mesh.mshstats.dropped_frames_ttl++; - goto fail_rcu; - } - if (!is_multicast_ether_addr(skb->data)) { struct sta_info *next_hop; bool mpp_lookup = true; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 72e6292955bb..22654452a561 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -560,6 +560,9 @@ void ieee80211_iterate_active_interfaces( list_for_each_entry(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + continue; + break; case NL80211_IFTYPE_AP_VLAN: continue; default: @@ -598,6 +601,9 @@ void ieee80211_iterate_active_interfaces_atomic( list_for_each_entry_rcu(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + continue; + break; case NL80211_IFTYPE_AP_VLAN: continue; default: @@ -1072,32 +1078,6 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, ieee80211_set_wmm_default(sdata, true); } -u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band) -{ - struct ieee80211_supported_band *sband; - struct ieee80211_rate *bitrates; - u32 mandatory_rates; - enum ieee80211_rate_flags mandatory_flag; - int i; - - sband = local->hw.wiphy->bands[band]; - if (WARN_ON(!sband)) - return 1; - - if (band == IEEE80211_BAND_2GHZ) - mandatory_flag = IEEE80211_RATE_MANDATORY_B; - else - mandatory_flag = IEEE80211_RATE_MANDATORY_A; - - bitrates = sband->bitrates; - mandatory_rates = 0; - for (i = 0; i < sband->n_bitrates; i++) - if (bitrates[i].flags & mandatory_flag) - mandatory_rates |= BIT(i); - return mandatory_rates; -} - void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, @@ -1604,12 +1584,13 @@ int ieee80211_reconfig(struct ieee80211_local *local) BSS_CHANGED_ARP_FILTER | BSS_CHANGED_PS; - if (sdata->u.mgd.dtim_period) - changed |= BSS_CHANGED_DTIM_PERIOD; + /* Re-send beacon info report to the driver */ + if (sdata->u.mgd.have_beacon) + changed |= BSS_CHANGED_BEACON_INFO; - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 171344d4eb7c..97c289414e32 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -396,7 +396,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, new_bw = ieee80211_sta_cur_vht_bw(sta); if (new_bw != sta->sta.bandwidth) { sta->sta.bandwidth = new_bw; - changed |= IEEE80211_RC_NSS_CHANGED; + changed |= IEEE80211_RC_BW_CHANGED; } change: diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index c04d401dae92..6ee2b5863572 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -28,7 +28,7 @@ int ieee80211_wep_init(struct ieee80211_local *local) { /* start WEP IV from a random value */ - get_random_bytes(&local->wep_iv, WEP_IV_LEN); + get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(local->wep_tx_tfm)) { @@ -98,20 +98,21 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN || - skb_headroom(skb) < WEP_IV_LEN)) + if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || + skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) return NULL; hdrlen = ieee80211_hdrlen(hdr->frame_control); - newhdr = skb_push(skb, WEP_IV_LEN); - memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen); + newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); + memmove(newhdr, newhdr + IEEE80211_WEP_IV_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return newhdr + hdrlen; - skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_WEP_IV_LEN); ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); return newhdr + hdrlen; } @@ -125,8 +126,8 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local, unsigned int hdrlen; hdrlen = ieee80211_hdrlen(hdr->frame_control); - memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); - skb_pull(skb, WEP_IV_LEN); + memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_WEP_IV_LEN); } @@ -146,7 +147,7 @@ int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, put_unaligned(icv, (__le32 *)(data + data_len)); crypto_cipher_setkey(tfm, rc4key, klen); - for (i = 0; i < data_len + WEP_ICV_LEN; i++) + for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) crypto_cipher_encrypt_one(tfm, data + i, data + i); return 0; @@ -172,7 +173,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, if (!iv) return -1; - len = skb->len - (iv + WEP_IV_LEN - skb->data); + len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data); /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, iv, 3); @@ -181,10 +182,10 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, memcpy(rc4key + 3, key, keylen); /* Add room for ICV */ - skb_put(skb, WEP_ICV_LEN); + skb_put(skb, IEEE80211_WEP_ICV_LEN); return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, - iv + WEP_IV_LEN, len); + iv + IEEE80211_WEP_IV_LEN, len); } @@ -201,11 +202,11 @@ int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, return -1; crypto_cipher_setkey(tfm, rc4key, klen); - for (i = 0; i < data_len + WEP_ICV_LEN; i++) + for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) crypto_cipher_decrypt_one(tfm, data + i, data + i); crc = cpu_to_le32(~crc32_le(~0, data, data_len)); - if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) + if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) /* ICV mismatch */ return -1; @@ -237,10 +238,10 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local, return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control); - if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN) + if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN) return -1; - len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN; + len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN; keyidx = skb->data[hdrlen + 3] >> 6; @@ -256,16 +257,16 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local, memcpy(rc4key + 3, key->conf.key, key->conf.keylen); if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, - skb->data + hdrlen + WEP_IV_LEN, - len)) + skb->data + hdrlen + + IEEE80211_WEP_IV_LEN, len)) ret = -1; /* Trim ICV */ - skb_trim(skb, skb->len - WEP_ICV_LEN); + skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); /* Remove IV */ - memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen); - skb_pull(skb, WEP_IV_LEN); + memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_WEP_IV_LEN); return ret; } @@ -305,13 +306,14 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { - if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + WEP_IV_LEN)) + if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + + IEEE80211_WEP_IV_LEN)) return RX_DROP_UNUSABLE; if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key)) rx->sta->wep_weak_iv_count++; ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); /* remove ICV */ - if (pskb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN)) + if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) return RX_DROP_UNUSABLE; } diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index c7c6d644486f..c9edfcb7a13b 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -62,10 +62,10 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) - tail += TKIP_ICV_LEN; + tail += IEEE80211_TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < TKIP_IV_LEN)) + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) return TX_DROP; key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; @@ -198,15 +198,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) if (info->control.hw_key) tail = 0; else - tail = TKIP_ICV_LEN; + tail = IEEE80211_TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < TKIP_IV_LEN)) + skb_headroom(skb) < IEEE80211_TKIP_IV_LEN)) return -1; - pos = skb_push(skb, TKIP_IV_LEN); - memmove(pos, pos + TKIP_IV_LEN, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN); + pos = skb_push(skb, IEEE80211_TKIP_IV_LEN); + memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_TKIP_IV_LEN); pos += hdrlen; /* the HW only needs room for the IV, but not the actual IV */ @@ -227,7 +228,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) return 0; /* Add room for ICV */ - skb_put(skb, TKIP_ICV_LEN); + skb_put(skb, IEEE80211_TKIP_ICV_LEN); return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, key, skb, pos, len); @@ -290,11 +291,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; /* Trim ICV */ - skb_trim(skb, skb->len - TKIP_ICV_LEN); + skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); /* Remove IV */ - memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); - skb_pull(skb, TKIP_IV_LEN); + memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_TKIP_IV_LEN); return RX_CONTINUE; } @@ -337,9 +338,9 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, else qos_tid = 0; - data_len = skb->len - hdrlen - CCMP_HDR_LEN; + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN; if (encrypted) - data_len -= CCMP_MIC_LEN; + data_len -= IEEE80211_CCMP_MIC_LEN; /* First block, b_0 */ b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ @@ -348,7 +349,7 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, */ b_0[1] = qos_tid | (mgmt << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); - memcpy(&b_0[8], pn, CCMP_PN_LEN); + memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN); /* l(m) */ put_unaligned_be16(data_len, &b_0[14]); @@ -424,15 +425,16 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) if (info->control.hw_key) tail = 0; else - tail = CCMP_MIC_LEN; + tail = IEEE80211_CCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || - skb_headroom(skb) < CCMP_HDR_LEN)) + skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN)) return -1; - pos = skb_push(skb, CCMP_HDR_LEN); - memmove(pos, pos + CCMP_HDR_LEN, hdrlen); - skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN); + pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN); + memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + + IEEE80211_CCMP_HDR_LEN); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && @@ -457,10 +459,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) if (info->control.hw_key) return 0; - pos += CCMP_HDR_LEN; + pos += IEEE80211_CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, scratch, 0); ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len, - pos, skb_put(skb, CCMP_MIC_LEN)); + pos, skb_put(skb, IEEE80211_CCMP_MIC_LEN)); return 0; } @@ -490,7 +492,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - u8 pn[CCMP_PN_LEN]; + u8 pn[IEEE80211_CCMP_PN_LEN]; int data_len; int queue; @@ -500,12 +502,13 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) !ieee80211_is_robust_mgmt_frame(hdr)) return RX_CONTINUE; - data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; + data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - + IEEE80211_CCMP_MIC_LEN; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; if (status->flag & RX_FLAG_DECRYPTED) { - if (!pskb_may_pull(rx->skb, hdrlen + CCMP_HDR_LEN)) + if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } else { if (skb_linearize(rx->skb)) @@ -516,7 +519,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) queue = rx->security_idx; - if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { + if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } @@ -528,19 +531,20 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, scratch, - skb->data + hdrlen + CCMP_HDR_LEN, data_len, - skb->data + skb->len - CCMP_MIC_LEN, - skb->data + hdrlen + CCMP_HDR_LEN)) + skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, + data_len, + skb->data + skb->len - IEEE80211_CCMP_MIC_LEN, + skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } - memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); + memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); /* Remove CCMP header and MIC */ - if (pskb_trim(skb, skb->len - CCMP_MIC_LEN)) + if (pskb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN)) return RX_DROP_UNUSABLE; - memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); - skb_pull(skb, CCMP_HDR_LEN); + memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen); + skb_pull(skb, IEEE80211_CCMP_HDR_LEN); return RX_CONTINUE; } diff --git a/net/socket.c b/net/socket.c index b416093997da..6b94633ca61d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2412,7 +2412,7 @@ static const unsigned char nargs[21] = { SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { - unsigned long a[6]; + unsigned long a[AUDITSC_ARGS]; unsigned long a0, a1; int err; unsigned int len; @@ -2428,7 +2428,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) if (copy_from_user(a, args, len)) return -EFAULT; - audit_socketcall(nargs[call] / sizeof(unsigned long), a); + err = audit_socketcall(nargs[call] / sizeof(unsigned long), a); + if (err) + return err; a0 = a[0]; a1 = a[1]; diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index 5c4c61d527e2..357f613df7ff 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -21,16 +21,6 @@ #include <linux/sunrpc/svcauth.h> #include "gss_rpc_xdr.h" -static bool gssx_check_pointer(struct xdr_stream *xdr) -{ - __be32 *p; - - p = xdr_reserve_space(xdr, 4); - if (unlikely(p == NULL)) - return -ENOSPC; - return *p?true:false; -} - static int gssx_enc_bool(struct xdr_stream *xdr, int v) { __be32 *p; @@ -264,25 +254,27 @@ static int gssx_dec_option_array(struct xdr_stream *xdr, if (unlikely(p == NULL)) return -ENOSPC; count = be32_to_cpup(p++); - if (count != 0) { - /* we recognize only 1 currently: CREDS_VALUE */ - oa->count = 1; + if (!count) + return 0; - oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL); - if (!oa->data) - return -ENOMEM; + /* we recognize only 1 currently: CREDS_VALUE */ + oa->count = 1; - creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); - if (!creds) { - kfree(oa->data); - return -ENOMEM; - } + oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL); + if (!oa->data) + return -ENOMEM; - oa->data[0].option.data = CREDS_VALUE; - oa->data[0].option.len = sizeof(CREDS_VALUE); - oa->data[0].value.data = (void *)creds; - oa->data[0].value.len = 0; + creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); + if (!creds) { + kfree(oa->data); + return -ENOMEM; } + + oa->data[0].option.data = CREDS_VALUE; + oa->data[0].option.len = sizeof(CREDS_VALUE); + oa->data[0].value.data = (void *)creds; + oa->data[0].value.len = 0; + for (i = 0; i < count; i++) { gssx_buffer dummy = { 0, NULL }; u32 length; @@ -800,6 +792,7 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct gssx_res_accept_sec_context *res) { + u32 value_follows; int err; /* res->status */ @@ -808,7 +801,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, return err; /* res->context_handle */ - if (gssx_check_pointer(xdr)) { + err = gssx_dec_bool(xdr, &value_follows); + if (err) + return err; + if (value_follows) { err = gssx_dec_ctx(xdr, res->context_handle); if (err) return err; @@ -817,7 +813,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, } /* res->output_token */ - if (gssx_check_pointer(xdr)) { + err = gssx_dec_bool(xdr, &value_follows); + if (err) + return err; + if (value_follows) { err = gssx_dec_buffer(xdr, res->output_token); if (err) return err; @@ -826,7 +825,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, } /* res->delegated_cred_handle */ - if (gssx_check_pointer(xdr)) { + err = gssx_dec_bool(xdr, &value_follows); + if (err) + return err; + if (value_follows) { /* we do not support upcall servers sending this data. */ return -EINVAL; } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index fd556ac05fdb..50f6195c8b70 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -54,6 +54,8 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: if (chandef->center_freq1 != control_freq) @@ -152,6 +154,12 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) int width; switch (c->width) { + case NL80211_CHAN_WIDTH_5: + width = 5; + break; + case NL80211_CHAN_WIDTH_10: + width = 10; + break; case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: width = 20; @@ -194,6 +202,16 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1, if (c1->width == c2->width) return NULL; + /* + * can't be compatible if one of them is 5 or 10 MHz, + * but they don't have the same width. + */ + if (c1->width == NL80211_CHAN_WIDTH_5 || + c1->width == NL80211_CHAN_WIDTH_10 || + c2->width == NL80211_CHAN_WIDTH_5 || + c2->width == NL80211_CHAN_WIDTH_10) + return NULL; + if (c1->width == NL80211_CHAN_WIDTH_20_NOHT || c1->width == NL80211_CHAN_WIDTH_20) return c2; @@ -264,11 +282,17 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy, u32 bandwidth) { struct ieee80211_channel *c; - u32 freq; + u32 freq, start_freq, end_freq; + + if (bandwidth <= 20) { + start_freq = center_freq; + end_freq = center_freq; + } else { + start_freq = center_freq - bandwidth/2 + 10; + end_freq = center_freq + bandwidth/2 - 10; + } - for (freq = center_freq - bandwidth/2 + 10; - freq <= center_freq + bandwidth/2 - 10; - freq += 20) { + for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return -EINVAL; @@ -310,11 +334,17 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, u32 prohibited_flags) { struct ieee80211_channel *c; - u32 freq; + u32 freq, start_freq, end_freq; + + if (bandwidth <= 20) { + start_freq = center_freq; + end_freq = center_freq; + } else { + start_freq = center_freq - bandwidth/2 + 10; + end_freq = center_freq + bandwidth/2 - 10; + } - for (freq = center_freq - bandwidth/2 + 10; - freq <= center_freq + bandwidth/2 - 10; - freq += 20) { + for (freq = start_freq; freq <= end_freq; freq += 20) { c = ieee80211_get_channel(wiphy, freq); if (!c) return false; @@ -349,6 +379,12 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_5: + width = 5; + break; + case NL80211_CHAN_WIDTH_10: + width = 10; + break; case NL80211_CHAN_WIDTH_20: if (!ht_cap->ht_supported) return false; @@ -405,6 +441,11 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, if (width > 20) prohibited_flags |= IEEE80211_CHAN_NO_OFDM; + /* 5 and 10 MHz are only defined for the OFDM PHY */ + if (width < 20) + prohibited_flags |= IEEE80211_CHAN_NO_OFDM; + + if (!cfg80211_secondary_chans_ok(wiphy, chandef->center_freq1, width, prohibited_flags)) return false; diff --git a/net/wireless/core.c b/net/wireless/core.c index 73405e00c800..4224e7554a76 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -34,13 +34,12 @@ MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); +MODULE_ALIAS_GENL_FAMILY(NL80211_GENL_NAME); -/* RCU-protected (and cfg80211_mutex for writers) */ +/* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg80211_rdev_list); int cfg80211_rdev_list_generation; -DEFINE_MUTEX(cfg80211_mutex); - /* for debugfs */ static struct dentry *ieee80211_debugfs_dir; @@ -52,12 +51,11 @@ module_param(cfg80211_disable_40mhz_24ghz, bool, 0644); MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz, "Disable 40MHz support in the 2.4GHz band"); -/* requires cfg80211_mutex to be held! */ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) { struct cfg80211_registered_device *result = NULL, *rdev; - assert_cfg80211_lock(); + ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (rdev->wiphy_idx == wiphy_idx) { @@ -76,12 +74,11 @@ int get_wiphy_idx(struct wiphy *wiphy) return rdev->wiphy_idx; } -/* requires cfg80211_rdev_mutex to be held! */ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) { struct cfg80211_registered_device *rdev; - assert_cfg80211_lock(); + ASSERT_RTNL(); rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); if (!rdev) @@ -89,35 +86,13 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) return &rdev->wiphy; } -struct cfg80211_registered_device * -cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) -{ - struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV); - struct net_device *dev; - - mutex_lock(&cfg80211_mutex); - dev = dev_get_by_index(net, ifindex); - if (!dev) - goto out; - if (dev->ieee80211_ptr) { - rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); - mutex_lock(&rdev->mtx); - } else - rdev = ERR_PTR(-ENODEV); - dev_put(dev); - out: - mutex_unlock(&cfg80211_mutex); - return rdev; -} - -/* requires cfg80211_mutex to be held */ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname) { struct cfg80211_registered_device *rdev2; int wiphy_idx, taken = -1, result, digits; - assert_cfg80211_lock(); + ASSERT_RTNL(); /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); @@ -215,8 +190,7 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { - lockdep_assert_held(&rdev->devlist_mtx); - lockdep_assert_held(&rdev->sched_scan_mtx); + ASSERT_RTNL(); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) return; @@ -230,18 +204,15 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, rdev->opencount--; if (rdev->scan_req && rdev->scan_req->wdev == wdev) { - bool busy = work_busy(&rdev->scan_done_wk); - /* - * If the work isn't pending or running (in which case it would - * be waiting for the lock we hold) the driver didn't properly - * cancel the scan when the interface was removed. In this case - * warn and leak the scan request object to not crash later. + * If the scan request wasn't notified as done, set it + * to aborted and leak it after a warning. The driver + * should have notified us that it ended at the latest + * during rdev_stop_p2p_device(). */ - WARN_ON(!busy); - - rdev->scan_req->aborted = true; - ___cfg80211_scan_done(rdev, !busy); + if (WARN_ON(!rdev->scan_req->notified)) + rdev->scan_req->aborted = true; + ___cfg80211_scan_done(rdev, !rdev->scan_req->notified); } } @@ -255,8 +226,6 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) rtnl_lock(); - /* read-only iteration need not hold the devlist_mtx */ - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->netdev) { dev_close(wdev->netdev); @@ -265,12 +234,7 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) /* otherwise, check iftype */ switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: - /* but this requires it */ - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); cfg80211_stop_p2p_device(rdev, wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); break; default: break; @@ -298,10 +262,7 @@ static void cfg80211_event_work(struct work_struct *work) event_work); rtnl_lock(); - cfg80211_lock_rdev(rdev); - cfg80211_process_rdev_events(rdev); - cfg80211_unlock_rdev(rdev); rtnl_unlock(); } @@ -309,7 +270,7 @@ static void cfg80211_event_work(struct work_struct *work) struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) { - static int wiphy_counter; + static atomic_t wiphy_counter = ATOMIC_INIT(0); struct cfg80211_registered_device *rdev; int alloc_size; @@ -331,26 +292,21 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) rdev->ops = ops; - mutex_lock(&cfg80211_mutex); - - rdev->wiphy_idx = wiphy_counter++; + rdev->wiphy_idx = atomic_inc_return(&wiphy_counter); if (unlikely(rdev->wiphy_idx < 0)) { - wiphy_counter--; - mutex_unlock(&cfg80211_mutex); /* ugh, wrapped! */ + atomic_dec(&wiphy_counter); kfree(rdev); return NULL; } - mutex_unlock(&cfg80211_mutex); + /* atomic_inc_return makes it start at 1, make it start at 0 */ + rdev->wiphy_idx--; /* give it a proper name */ dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); - mutex_init(&rdev->mtx); - mutex_init(&rdev->devlist_mtx); - mutex_init(&rdev->sched_scan_mtx); INIT_LIST_HEAD(&rdev->wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); spin_lock_init(&rdev->beacon_registrations_lock); @@ -496,8 +452,13 @@ int wiphy_register(struct wiphy *wiphy) u16 ifmodes = wiphy->interface_modes; #ifdef CONFIG_PM - if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && - !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) + if (WARN_ON(wiphy->wowlan && + (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && + !(wiphy->wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) + return -EINVAL; + if (WARN_ON(wiphy->wowlan && + !wiphy->wowlan->flags && !wiphy->wowlan->n_patterns && + !wiphy->wowlan->tcp)) return -EINVAL; #endif @@ -587,25 +548,28 @@ int wiphy_register(struct wiphy *wiphy) } #ifdef CONFIG_PM - if (rdev->wiphy.wowlan.n_patterns) { - if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len || - rdev->wiphy.wowlan.pattern_min_len > - rdev->wiphy.wowlan.pattern_max_len)) - return -EINVAL; - } + if (WARN_ON(rdev->wiphy.wowlan && rdev->wiphy.wowlan->n_patterns && + (!rdev->wiphy.wowlan->pattern_min_len || + rdev->wiphy.wowlan->pattern_min_len > + rdev->wiphy.wowlan->pattern_max_len))) + return -EINVAL; #endif /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); - mutex_lock(&cfg80211_mutex); res = device_add(&rdev->wiphy.dev); + if (res) + return res; + + res = rfkill_register(rdev->rfkill); if (res) { - mutex_unlock(&cfg80211_mutex); + device_del(&rdev->wiphy.dev); return res; } + rtnl_lock(); /* set up regulatory info */ wiphy_regulatory_register(wiphy); @@ -631,25 +595,7 @@ int wiphy_register(struct wiphy *wiphy) } cfg80211_debugfs_rdev_add(rdev); - mutex_unlock(&cfg80211_mutex); - - /* - * due to a locking dependency this has to be outside of the - * cfg80211_mutex lock - */ - res = rfkill_register(rdev->rfkill); - if (res) { - device_del(&rdev->wiphy.dev); - - mutex_lock(&cfg80211_mutex); - debugfs_remove_recursive(rdev->wiphy.debugfsdir); - list_del_rcu(&rdev->list); - wiphy_regulatory_deregister(wiphy); - mutex_unlock(&cfg80211_mutex); - return res; - } - rtnl_lock(); rdev->wiphy.registered = true; rtnl_unlock(); return 0; @@ -679,25 +625,19 @@ void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - rtnl_lock(); - rdev->wiphy.registered = false; - rtnl_unlock(); - - rfkill_unregister(rdev->rfkill); - - /* protect the device list */ - mutex_lock(&cfg80211_mutex); - wait_event(rdev->dev_wait, ({ int __count; - mutex_lock(&rdev->devlist_mtx); + rtnl_lock(); __count = rdev->opencount; - mutex_unlock(&rdev->devlist_mtx); + rtnl_unlock(); __count == 0; })); - mutex_lock(&rdev->devlist_mtx); + rfkill_unregister(rdev->rfkill); + + rtnl_lock(); + rdev->wiphy.registered = false; + BUG_ON(!list_empty(&rdev->wdev_list)); - mutex_unlock(&rdev->devlist_mtx); /* * First remove the hardware from everywhere, this makes @@ -708,20 +648,6 @@ void wiphy_unregister(struct wiphy *wiphy) synchronize_rcu(); /* - * Try to grab rdev->mtx. If a command is still in progress, - * hopefully the driver will refuse it since it's tearing - * down the device already. We wait for this command to complete - * before unlinking the item from the list. - * Note: as codified by the BUG_ON above we cannot get here if - * a virtual interface is still present. Hence, we can only get - * to lock contention here if userspace issues a command that - * identified the hardware by wiphy index. - */ - cfg80211_lock_rdev(rdev); - /* nothing */ - cfg80211_unlock_rdev(rdev); - - /* * If this device got a regulatory hint tell core its * free to listen now to a new shiny device regulatory hint */ @@ -730,15 +656,17 @@ void wiphy_unregister(struct wiphy *wiphy) cfg80211_rdev_list_generation++; device_del(&rdev->wiphy.dev); - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); - if (rdev->wowlan && rdev->ops->set_wakeup) +#ifdef CONFIG_PM + if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) rdev_set_wakeup(rdev, false); +#endif cfg80211_rdev_free_wowlan(rdev); } EXPORT_SYMBOL(wiphy_unregister); @@ -748,9 +676,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; rfkill_destroy(rdev->rfkill); - mutex_destroy(&rdev->mtx); - mutex_destroy(&rdev->devlist_mtx); - mutex_destroy(&rdev->sched_scan_mtx); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); kfree(reg); @@ -775,36 +700,6 @@ void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) } EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); -static void wdev_cleanup_work(struct work_struct *work) -{ - struct wireless_dev *wdev; - struct cfg80211_registered_device *rdev; - - wdev = container_of(work, struct wireless_dev, cleanup_work); - rdev = wiphy_to_dev(wdev->wiphy); - - mutex_lock(&rdev->sched_scan_mtx); - - if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { - rdev->scan_req->aborted = true; - ___cfg80211_scan_done(rdev, true); - } - - if (WARN_ON(rdev->sched_scan_req && - rdev->sched_scan_req->dev == wdev->netdev)) { - __cfg80211_stop_sched_scan(rdev, false); - } - - mutex_unlock(&rdev->sched_scan_mtx); - - mutex_lock(&rdev->devlist_mtx); - rdev->opencount--; - mutex_unlock(&rdev->devlist_mtx); - wake_up(&rdev->dev_wait); - - dev_put(wdev->netdev); -} - void cfg80211_unregister_wdev(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); @@ -814,8 +709,6 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) if (WARN_ON(wdev->netdev)) return; - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); list_del_rcu(&wdev->list); rdev->devlist_generation++; @@ -827,8 +720,6 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) WARN_ON_ONCE(1); break; } - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); } EXPORT_SYMBOL(cfg80211_unregister_wdev); @@ -847,7 +738,7 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, } void cfg80211_leave(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev) + struct wireless_dev *wdev) { struct net_device *dev = wdev->netdev; @@ -857,9 +748,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - mutex_lock(&rdev->sched_scan_mtx); __cfg80211_stop_sched_scan(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); wdev_lock(wdev); #ifdef CONFIG_CFG80211_WEXT @@ -868,8 +757,8 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, wdev->wext.ie_len = 0; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif - __cfg80211_disconnect(rdev, dev, - WLAN_REASON_DEAUTH_LEAVING, true); + cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, true); wdev_unlock(wdev); break; case NL80211_IFTYPE_MESH_POINT: @@ -912,13 +801,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * are added with nl80211. */ mutex_init(&wdev->mtx); - INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); - mutex_lock(&rdev->devlist_mtx); wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wdev_list); rdev->devlist_generation++; @@ -930,8 +817,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, pr_err("failed to add phy80211 symlink to netdev!\n"); } wdev->netdev = dev; - wdev->sme_state = CFG80211_SME_IDLE; - mutex_unlock(&rdev->devlist_mtx); #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_key = -1; wdev->wext.default_mgmt_key = -1; @@ -957,26 +842,22 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, break; case NETDEV_DOWN: cfg80211_update_iface_num(rdev, wdev->iftype, -1); - dev_hold(dev); - queue_work(cfg80211_wq, &wdev->cleanup_work); + if (rdev->scan_req && rdev->scan_req->wdev == wdev) { + if (WARN_ON(!rdev->scan_req->notified)) + rdev->scan_req->aborted = true; + ___cfg80211_scan_done(rdev, true); + } + + if (WARN_ON(rdev->sched_scan_req && + rdev->sched_scan_req->dev == wdev->netdev)) { + __cfg80211_stop_sched_scan(rdev, false); + } + + rdev->opencount--; + wake_up(&rdev->dev_wait); break; case NETDEV_UP: - /* - * If we have a really quick DOWN/UP succession we may - * have this work still pending ... cancel it and see - * if it was pending, in which case we need to account - * for some of the work it would have done. - */ - if (cancel_work_sync(&wdev->cleanup_work)) { - mutex_lock(&rdev->devlist_mtx); - rdev->opencount--; - mutex_unlock(&rdev->devlist_mtx); - dev_put(dev); - } cfg80211_update_iface_num(rdev, wdev->iftype, 1); - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); switch (wdev->iftype) { #ifdef CONFIG_CFG80211_WEXT @@ -1008,10 +889,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, break; } wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); rdev->opencount++; - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); /* * Configure power management to the driver here so that its @@ -1028,12 +906,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, break; case NETDEV_UNREGISTER: /* - * NB: cannot take rdev->mtx here because this may be - * called within code protected by it when interfaces - * are removed with nl80211. - */ - mutex_lock(&rdev->devlist_mtx); - /* * It is possible to get NETDEV_UNREGISTER * multiple times. To detect that, check * that the interface is still on the list @@ -1049,7 +921,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, kfree(wdev->wext.keys); #endif } - mutex_unlock(&rdev->devlist_mtx); /* * synchronise (so that we won't find this netdev * from other code any more) and then clear the list @@ -1069,9 +940,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, return notifier_from_errno(-EOPNOTSUPP); if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); - mutex_lock(&rdev->devlist_mtx); ret = cfg80211_can_add_interface(rdev, wdev->iftype); - mutex_unlock(&rdev->devlist_mtx); if (ret) return notifier_from_errno(ret); break; @@ -1089,12 +958,10 @@ static void __net_exit cfg80211_pernet_exit(struct net *net) struct cfg80211_registered_device *rdev; rtnl_lock(); - mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (net_eq(wiphy_net(&rdev->wiphy), net)) WARN_ON(cfg80211_switch_netns(rdev, &init_net)); } - mutex_unlock(&cfg80211_mutex); rtnl_unlock(); } diff --git a/net/wireless/core.h b/net/wireless/core.h index fd35dae547c4..a6b45bf00f33 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -5,7 +5,6 @@ */ #ifndef __NET_WIRELESS_CORE_H #define __NET_WIRELESS_CORE_H -#include <linux/mutex.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/rbtree.h> @@ -23,11 +22,6 @@ struct cfg80211_registered_device { const struct cfg80211_ops *ops; struct list_head list; - /* we hold this mutex during any call so that - * we cannot do multiple calls at once, and also - * to avoid the deregister call to proceed while - * any call is in progress */ - struct mutex mtx; /* rfkill support */ struct rfkill_ops rfkill_ops; @@ -49,9 +43,7 @@ struct cfg80211_registered_device { /* wiphy index, internal only */ int wiphy_idx; - /* associated wireless interfaces */ - struct mutex devlist_mtx; - /* protected by devlist_mtx or RCU */ + /* associated wireless interfaces, protected by rtnl or RCU */ struct list_head wdev_list; int devlist_generation, wdev_id; int opencount; /* also protected by devlist_mtx */ @@ -75,8 +67,6 @@ struct cfg80211_registered_device { struct work_struct scan_done_wk; struct work_struct sched_scan_results_wk; - struct mutex sched_scan_mtx; - #ifdef CONFIG_NL80211_TESTMODE struct genl_info *testmode_info; #endif @@ -84,8 +74,6 @@ struct cfg80211_registered_device { struct work_struct conn_work; struct work_struct event_work; - struct cfg80211_wowlan *wowlan; - struct delayed_work dfs_update_channels_wk; /* netlink port which started critical protocol (0 means not started) */ @@ -106,29 +94,26 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) static inline void cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev) { +#ifdef CONFIG_PM int i; - if (!rdev->wowlan) + if (!rdev->wiphy.wowlan_config) return; - for (i = 0; i < rdev->wowlan->n_patterns; i++) - kfree(rdev->wowlan->patterns[i].mask); - kfree(rdev->wowlan->patterns); - if (rdev->wowlan->tcp && rdev->wowlan->tcp->sock) - sock_release(rdev->wowlan->tcp->sock); - kfree(rdev->wowlan->tcp); - kfree(rdev->wowlan); + for (i = 0; i < rdev->wiphy.wowlan_config->n_patterns; i++) + kfree(rdev->wiphy.wowlan_config->patterns[i].mask); + kfree(rdev->wiphy.wowlan_config->patterns); + if (rdev->wiphy.wowlan_config->tcp && + rdev->wiphy.wowlan_config->tcp->sock) + sock_release(rdev->wiphy.wowlan_config->tcp->sock); + kfree(rdev->wiphy.wowlan_config->tcp); + kfree(rdev->wiphy.wowlan_config); +#endif } extern struct workqueue_struct *cfg80211_wq; -extern struct mutex cfg80211_mutex; extern struct list_head cfg80211_rdev_list; extern int cfg80211_rdev_list_generation; -static inline void assert_cfg80211_lock(void) -{ - lockdep_assert_held(&cfg80211_mutex); -} - struct cfg80211_internal_bss { struct list_head list; struct list_head hidden_list; @@ -161,27 +146,11 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss) struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx); int get_wiphy_idx(struct wiphy *wiphy); -/* requires cfg80211_rdev_mutex to be held! */ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); -/* identical to cfg80211_get_dev_from_info but only operate on ifindex */ -extern struct cfg80211_registered_device * -cfg80211_get_dev_from_ifindex(struct net *net, int ifindex); - int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net); -static inline void cfg80211_lock_rdev(struct cfg80211_registered_device *rdev) -{ - mutex_lock(&rdev->mtx); -} - -static inline void cfg80211_unlock_rdev(struct cfg80211_registered_device *rdev) -{ - BUG_ON(IS_ERR(rdev) || !rdev); - mutex_unlock(&rdev->mtx); -} - static inline void wdev_lock(struct wireless_dev *wdev) __acquires(wdev) { @@ -196,7 +165,7 @@ static inline void wdev_unlock(struct wireless_dev *wdev) mutex_unlock(&wdev->mtx); } -#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx) +#define ASSERT_RDEV_LOCK(rdev) ASSERT_RTNL() #define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx) static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev) @@ -314,38 +283,21 @@ int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev); /* MLME */ -int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct ieee80211_channel *chan, - enum nl80211_auth_type auth_type, - const u8 *bssid, - const u8 *ssid, int ssid_len, - const u8 *ie, int ie_len, - const u8 *key, int key_len, int key_idx, - const u8 *sae_data, int sae_data_len); int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, - struct net_device *dev, struct ieee80211_channel *chan, - enum nl80211_auth_type auth_type, const u8 *bssid, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_auth_type auth_type, + const u8 *bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, const u8 *key, int key_len, int key_idx, const u8 *sae_data, int sae_data_len); -int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct ieee80211_channel *chan, - const u8 *bssid, - const u8 *ssid, int ssid_len, - struct cfg80211_assoc_request *req); int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *ssid, int ssid_len, struct cfg80211_assoc_request *req); -int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, - struct net_device *dev, const u8 *bssid, - const u8 *ie, int ie_len, u16 reason, - bool local_state_change); int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, @@ -356,11 +308,6 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, bool local_state_change); void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev); -void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, - const u8 *req_ie, size_t req_ie_len, - const u8 *resp_ie, size_t resp_ie_len, - u16 status, bool wextev, - struct cfg80211_bss *bss); int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, u16 frame_type, const u8 *match_data, int match_len); @@ -376,19 +323,19 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, const struct ieee80211_vht_cap *vht_capa_mask); -/* SME */ -int __cfg80211_connect(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct cfg80211_connect_params *connect, - struct cfg80211_cached_keys *connkeys, - const u8 *prev_bssid); +/* SME events */ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, - struct cfg80211_cached_keys *connkeys); -int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, - struct net_device *dev, u16 reason, - bool wextev); + struct cfg80211_cached_keys *connkeys, + const u8 *prev_bssid); +void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, + const u8 *req_ie, size_t req_ie_len, + const u8 *resp_ie, size_t resp_ie_len, + u16 status, bool wextev, + struct cfg80211_bss *bss); +void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, + size_t ie_len, u16 reason, bool from_ap); int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev); @@ -399,21 +346,21 @@ void __cfg80211_roamed(struct wireless_dev *wdev, int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); +/* SME implementation */ void cfg80211_conn_work(struct work_struct *work); -void cfg80211_sme_failed_assoc(struct wireless_dev *wdev); -bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev); +void cfg80211_sme_scan_done(struct net_device *dev); +bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status); +void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len); +void cfg80211_sme_disassoc(struct wireless_dev *wdev); +void cfg80211_sme_deauth(struct wireless_dev *wdev); +void cfg80211_sme_auth_timeout(struct wireless_dev *wdev); +void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev); /* internal helpers */ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr); -void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, - size_t ie_len, u16 reason, bool from_ap); -void cfg80211_sme_scan_done(struct net_device *dev); -void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len); -void cfg80211_sme_disassoc(struct net_device *dev, - struct cfg80211_internal_bss *bss); void __cfg80211_scan_done(struct work_struct *wk); void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); void __cfg80211_sched_scan_results(struct work_struct *wk); diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 920cabe0461b..90d050036624 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -74,7 +74,7 @@ static ssize_t ht40allow_map_read(struct file *file, if (!buf) return -ENOMEM; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; @@ -85,7 +85,7 @@ static ssize_t ht40allow_map_read(struct file *file, buf, buf_size, offset); } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); r = simple_read_from_buffer(user_buf, count, ppos, buf, offset); diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index d80e47194d49..39bff7d36768 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -43,7 +43,6 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid) cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); - wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_upload_connect_keys(wdev); nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, @@ -64,8 +63,6 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp) trace_cfg80211_ibss_joined(dev, bssid); - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); - ev = kzalloc(sizeof(*ev), gfp); if (!ev) return; @@ -120,7 +117,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, #ifdef CONFIG_CFG80211_WEXT wdev->wext.ibss.chandef = params->chandef; #endif - wdev->sme_state = CFG80211_SME_CONNECTING; err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan, params->channel_fixed @@ -134,7 +130,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, err = rdev_join_ibss(rdev, dev, params); if (err) { wdev->connect_keys = NULL; - wdev->sme_state = CFG80211_SME_IDLE; return err; } @@ -152,11 +147,11 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - mutex_lock(&rdev->devlist_mtx); + ASSERT_RTNL(); + wdev_lock(wdev); err = __cfg80211_join_ibss(rdev, dev, params, connkeys); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -186,7 +181,6 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext) } wdev->current_bss = NULL; - wdev->sme_state = CFG80211_SME_IDLE; wdev->ssid_len = 0; #ifdef CONFIG_CFG80211_WEXT if (!nowext) @@ -359,11 +353,9 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev, wdev->wext.ibss.channel_fixed = false; } - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -429,11 +421,9 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev, memcpy(wdev->wext.ibss.ssid, ssid, len); wdev->wext.ibss.ssid_len = len; - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -512,11 +502,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev, } else wdev->wext.ibss.bssid = NULL; - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = cfg80211_ibss_wext_join(rdev, wdev); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 0bb93f3061a4..30c49202ee4d 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -18,6 +18,7 @@ #define MESH_PATH_TO_ROOT_TIMEOUT 6000 #define MESH_ROOT_INTERVAL 5000 #define MESH_ROOT_CONFIRMATION_INTERVAL 2000 +#define MESH_DEFAULT_PLINK_TIMEOUT 1800 /* timeout in seconds */ /* * Minimum interval between two consecutive PREQs originated by the same @@ -75,6 +76,7 @@ const struct mesh_config default_mesh_config = { .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, .power_mode = NL80211_MESH_POWER_ACTIVE, .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, + .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, }; const struct mesh_setup default_mesh_setup = { @@ -82,6 +84,7 @@ const struct mesh_setup default_mesh_setup = { .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, .path_metric = IEEE80211_PATH_METRIC_AIRTIME, + .auth_id = 0, /* open */ .ie = NULL, .ie_len = 0, .is_secure = false, @@ -159,6 +162,16 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, setup->chandef.center_freq1 = setup->chandef.chan->center_freq; } + /* + * check if basic rates are available otherwise use mandatory rates as + * basic rates + */ + if (!setup->basic_rates) { + struct ieee80211_supported_band *sband = + rdev->wiphy.bands[setup->chandef.chan->band]; + setup->basic_rates = ieee80211_mandatory_rates(sband); + } + if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef)) return -EINVAL; @@ -185,11 +198,9 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - mutex_lock(&rdev->devlist_mtx); wdev_lock(wdev); err = __cfg80211_join_mesh(rdev, dev, setup, conf); wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 0c7b7dd855f6..a61a44bc6cf0 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -18,37 +18,18 @@ #include "rdev-ops.h" -void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - - trace_cfg80211_send_rx_auth(dev); - wdev_lock(wdev); - - nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); - cfg80211_sme_rx_auth(dev, buf, len); - - wdev_unlock(wdev); -} -EXPORT_SYMBOL(cfg80211_send_rx_auth); - -void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, +void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss, const u8 *buf, size_t len) { - u16 status_code; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u8 *ie = mgmt->u.assoc_resp.variable; int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); + u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); trace_cfg80211_send_rx_assoc(dev, bss); - wdev_lock(wdev); - - status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code); /* * This is a bit of a hack, we don't notify userspace of @@ -56,174 +37,131 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss, * and got a reject -- we only try again with an assoc * frame instead of reassoc. */ - if (status_code != WLAN_STATUS_SUCCESS && wdev->conn && - cfg80211_sme_failed_reassoc(wdev)) { + if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) { cfg80211_put_bss(wiphy, bss); - goto out; + return; } nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL); - - if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) { - cfg80211_sme_failed_assoc(wdev); - /* - * do not call connect_result() now because the - * sme will schedule work that does it later. - */ - cfg80211_put_bss(wiphy, bss); - goto out; - } - - if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) { - /* - * This is for the userspace SME, the CONNECTING - * state will be changed to CONNECTED by - * __cfg80211_connect_result() below. - */ - wdev->sme_state = CFG80211_SME_CONNECTING; - } - - /* this consumes the bss reference */ + /* update current_bss etc., consumes the bss reference */ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs, status_code, status_code == WLAN_STATUS_SUCCESS, bss); - out: - wdev_unlock(wdev); } -EXPORT_SYMBOL(cfg80211_send_rx_assoc); +EXPORT_SYMBOL(cfg80211_rx_assoc_resp); -void __cfg80211_send_deauth(struct net_device *dev, - const u8 *buf, size_t len) +static void cfg80211_process_auth(struct wireless_dev *wdev, + const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; - const u8 *bssid = mgmt->bssid; - bool was_current = false; - - trace___cfg80211_send_deauth(dev); - ASSERT_WDEV_LOCK(wdev); - - if (wdev->current_bss && - ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { - cfg80211_unhold_bss(wdev->current_bss); - cfg80211_put_bss(wiphy, &wdev->current_bss->pub); - wdev->current_bss = NULL; - was_current = true; - } + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); - - if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) { - u16 reason_code; - bool from_ap; - - reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); - - from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); - __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); - } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { - __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); - } + nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); + cfg80211_sme_rx_auth(wdev, buf, len); } -EXPORT_SYMBOL(__cfg80211_send_deauth); -void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len) +static void cfg80211_process_deauth(struct wireless_dev *wdev, + const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; + const u8 *bssid = mgmt->bssid; + u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); + bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); + + nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL); + + if (!wdev->current_bss || + !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) + return; - wdev_lock(wdev); - __cfg80211_send_deauth(dev, buf, len); - wdev_unlock(wdev); + __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); + cfg80211_sme_deauth(wdev); } -EXPORT_SYMBOL(cfg80211_send_deauth); -void __cfg80211_send_disassoc(struct net_device *dev, - const u8 *buf, size_t len) +static void cfg80211_process_disassoc(struct wireless_dev *wdev, + const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; - u16 reason_code; - bool from_ap; - - trace___cfg80211_send_disassoc(dev); - ASSERT_WDEV_LOCK(wdev); + u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); + bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); - nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL); + nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL); - if (wdev->sme_state != CFG80211_SME_CONNECTED) + if (WARN_ON(!wdev->current_bss || + !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return; - if (wdev->current_bss && - ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) { - cfg80211_sme_disassoc(dev, wdev->current_bss); - cfg80211_unhold_bss(wdev->current_bss); - cfg80211_put_bss(wiphy, &wdev->current_bss->pub); - wdev->current_bss = NULL; - } else - WARN_ON(1); - - - reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - - from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr); - __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); + __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); + cfg80211_sme_disassoc(wdev); } -EXPORT_SYMBOL(__cfg80211_send_disassoc); -void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len) +void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_mgmt *mgmt = (void *)buf; + + ASSERT_WDEV_LOCK(wdev); + + trace_cfg80211_rx_mlme_mgmt(dev, buf, len); + + if (WARN_ON(len < 2)) + return; - wdev_lock(wdev); - __cfg80211_send_disassoc(dev, buf, len); - wdev_unlock(wdev); + if (ieee80211_is_auth(mgmt->frame_control)) + cfg80211_process_auth(wdev, buf, len); + else if (ieee80211_is_deauth(mgmt->frame_control)) + cfg80211_process_deauth(wdev, buf, len); + else if (ieee80211_is_disassoc(mgmt->frame_control)) + cfg80211_process_disassoc(wdev, buf, len); } -EXPORT_SYMBOL(cfg80211_send_disassoc); +EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); -void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) +void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); trace_cfg80211_send_auth_timeout(dev, addr); - wdev_lock(wdev); nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); - if (wdev->sme_state == CFG80211_SME_CONNECTING) - __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); - - wdev_unlock(wdev); + cfg80211_sme_auth_timeout(wdev); } -EXPORT_SYMBOL(cfg80211_send_auth_timeout); +EXPORT_SYMBOL(cfg80211_auth_timeout); -void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr) +void cfg80211_assoc_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); trace_cfg80211_send_assoc_timeout(dev, addr); - wdev_lock(wdev); nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); - if (wdev->sme_state == CFG80211_SME_CONNECTING) - __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); + cfg80211_sme_assoc_timeout(wdev); +} +EXPORT_SYMBOL(cfg80211_assoc_timeout); + +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct ieee80211_mgmt *mgmt = (void *)buf; + + ASSERT_WDEV_LOCK(wdev); + + trace_cfg80211_tx_mlme_mgmt(dev, buf, len); - wdev_unlock(wdev); + if (WARN_ON(len < 2)) + return; + + if (ieee80211_is_deauth(mgmt->frame_control)) + cfg80211_process_deauth(wdev, buf, len); + else + cfg80211_process_disassoc(wdev, buf, len); } -EXPORT_SYMBOL(cfg80211_send_assoc_timeout); +EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, @@ -253,18 +191,27 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, EXPORT_SYMBOL(cfg80211_michael_mic_failure); /* some MLME handling for userspace SME */ -int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct ieee80211_channel *chan, - enum nl80211_auth_type auth_type, - const u8 *bssid, - const u8 *ssid, int ssid_len, - const u8 *ie, int ie_len, - const u8 *key, int key_len, int key_idx, - const u8 *sae_data, int sae_data_len) +int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_auth_type auth_type, + const u8 *bssid, + const u8 *ssid, int ssid_len, + const u8 *ie, int ie_len, + const u8 *key, int key_len, int key_idx, + const u8 *sae_data, int sae_data_len) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_auth_request req; + struct cfg80211_auth_request req = { + .ie = ie, + .ie_len = ie_len, + .sae_data = sae_data, + .sae_data_len = sae_data_len, + .auth_type = auth_type, + .key = key, + .key_len = key_len, + .key_idx = key_idx, + }; int err; ASSERT_WDEV_LOCK(wdev); @@ -277,18 +224,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, ether_addr_equal(bssid, wdev->current_bss->pub.bssid)) return -EALREADY; - memset(&req, 0, sizeof(req)); - - req.ie = ie; - req.ie_len = ie_len; - req.sae_data = sae_data; - req.sae_data_len = sae_data_len; - req.auth_type = auth_type; req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); - req.key = key; - req.key_len = key_len; - req.key_idx = key_idx; if (!req.bss) return -ENOENT; @@ -304,28 +241,6 @@ out: return err; } -int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, - struct net_device *dev, struct ieee80211_channel *chan, - enum nl80211_auth_type auth_type, const u8 *bssid, - const u8 *ssid, int ssid_len, - const u8 *ie, int ie_len, - const u8 *key, int key_len, int key_idx, - const u8 *sae_data, int sae_data_len) -{ - int err; - - mutex_lock(&rdev->devlist_mtx); - wdev_lock(dev->ieee80211_ptr); - err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, - ssid, ssid_len, ie, ie_len, - key, key_len, key_idx, - sae_data, sae_data_len); - wdev_unlock(dev->ieee80211_ptr); - mutex_unlock(&rdev->devlist_mtx); - - return err; -} - /* Do a logical ht_capa &= ht_capa_mask. */ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, const struct ieee80211_ht_cap *ht_capa_mask) @@ -360,30 +275,21 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, p1[i] &= p2[i]; } -int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct ieee80211_channel *chan, - const u8 *bssid, - const u8 *ssid, int ssid_len, - struct cfg80211_assoc_request *req) +int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct ieee80211_channel *chan, + const u8 *bssid, + const u8 *ssid, int ssid_len, + struct cfg80211_assoc_request *req) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; - bool was_connected = false; ASSERT_WDEV_LOCK(wdev); - if (wdev->current_bss && req->prev_bssid && - ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid)) { - /* - * Trying to reassociate: Allow this to proceed and let the old - * association to be dropped when the new one is completed. - */ - if (wdev->sme_state == CFG80211_SME_CONNECTED) { - was_connected = true; - wdev->sme_state = CFG80211_SME_CONNECTING; - } - } else if (wdev->current_bss) + if (wdev->current_bss && + (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid, + req->prev_bssid))) return -EALREADY; cfg80211_oper_and_ht_capa(&req->ht_capa_mask, @@ -393,11 +299,8 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); - if (!req->bss) { - if (was_connected) - wdev->sme_state = CFG80211_SME_CONNECTED; + if (!req->bss) return -ENOENT; - } err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED); if (err) @@ -406,39 +309,16 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, err = rdev_assoc(rdev, dev, req); out: - if (err) { - if (was_connected) - wdev->sme_state = CFG80211_SME_CONNECTED; + if (err) cfg80211_put_bss(&rdev->wiphy, req->bss); - } - - return err; -} - -int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct ieee80211_channel *chan, - const u8 *bssid, - const u8 *ssid, int ssid_len, - struct cfg80211_assoc_request *req) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - int err; - - mutex_lock(&rdev->devlist_mtx); - wdev_lock(wdev); - err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, - ssid, ssid_len, req); - wdev_unlock(wdev); - mutex_unlock(&rdev->devlist_mtx); return err; } -int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, - struct net_device *dev, const u8 *bssid, - const u8 *ie, int ie_len, u16 reason, - bool local_state_change) +int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason, + bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_deauth_request req = { @@ -451,79 +331,51 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); - if (local_state_change && (!wdev->current_bss || - !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) + if (local_state_change && + (!wdev->current_bss || + !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) return 0; return rdev_deauth(rdev, dev, &req); } -int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, - struct net_device *dev, const u8 *bssid, - const u8 *ie, int ie_len, u16 reason, - bool local_state_change) +int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *bssid, + const u8 *ie, int ie_len, u16 reason, + bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_disassoc_request req = { + .reason_code = reason, + .local_state_change = local_state_change, + .ie = ie, + .ie_len = ie_len, + }; int err; - wdev_lock(wdev); - err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason, - local_state_change); - wdev_unlock(wdev); - - return err; -} - -static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, - struct net_device *dev, const u8 *bssid, - const u8 *ie, int ie_len, u16 reason, - bool local_state_change) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_disassoc_request req; - ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state != CFG80211_SME_CONNECTED) - return -ENOTCONN; - - if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state)) + if (!wdev->current_bss) return -ENOTCONN; - memset(&req, 0, sizeof(req)); - req.reason_code = reason; - req.local_state_change = local_state_change; - req.ie = ie; - req.ie_len = ie_len; if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) req.bss = &wdev->current_bss->pub; else return -ENOTCONN; - return rdev_disassoc(rdev, dev, &req); -} - -int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, - struct net_device *dev, const u8 *bssid, - const u8 *ie, int ie_len, u16 reason, - bool local_state_change) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - int err; - - wdev_lock(wdev); - err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason, - local_state_change); - wdev_unlock(wdev); + err = rdev_disassoc(rdev, dev, &req); + if (err) + return err; - return err; + /* driver should have reported the disassoc */ + WARN_ON(wdev->current_bss); + return 0; } void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_deauth_request req; u8 bssid[ETH_ALEN]; ASSERT_WDEV_LOCK(wdev); @@ -531,23 +383,12 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, if (!rdev->ops->deauth) return; - memset(&req, 0, sizeof(req)); - req.reason_code = WLAN_REASON_DEAUTH_LEAVING; - req.ie = NULL; - req.ie_len = 0; - if (!wdev->current_bss) return; memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); - req.bssid = bssid; - rdev_deauth(rdev, dev, &req); - - if (wdev->current_bss) { - cfg80211_unhold_bss(wdev->current_bss); - cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub); - wdev->current_bss = NULL; - } + cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, + WLAN_REASON_DEAUTH_LEAVING, false); } struct cfg80211_mgmt_registration { @@ -848,7 +689,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) dfs_update_channels_wk); wiphy = &rdev->wiphy; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) { sband = wiphy->bands[bandid]; if (!sband) @@ -881,7 +722,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work) check_again = true; } } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); /* reschedule if there are other channels waiting to be cleared again */ if (check_again) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b14b7e3cb6e6..f8ffb9a59c83 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -37,10 +37,10 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, /* the netlink family */ static struct genl_family nl80211_fam = { - .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ - .name = "nl80211", /* have users key off the name instead */ - .hdrsize = 0, /* no private header */ - .version = 1, /* no particular meaning now */ + .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ + .name = NL80211_GENL_NAME, /* have users key off the name instead */ + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .netnsok = true, .pre_doit = nl80211_pre_doit, @@ -59,7 +59,7 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) int wiphy_idx = -1; int ifidx = -1; - assert_cfg80211_lock(); + ASSERT_RTNL(); if (!have_ifidx && !have_wdev_id) return ERR_PTR(-EINVAL); @@ -80,7 +80,6 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) if (have_wdev_id && rdev->wiphy_idx != wiphy_idx) continue; - mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { @@ -92,7 +91,6 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs) break; } } - mutex_unlock(&rdev->devlist_mtx); if (result) break; @@ -109,7 +107,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) struct cfg80211_registered_device *rdev = NULL, *tmp; struct net_device *netdev; - assert_cfg80211_lock(); + ASSERT_RTNL(); if (!attrs[NL80211_ATTR_WIPHY] && !attrs[NL80211_ATTR_IFINDEX] && @@ -128,14 +126,12 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32); if (tmp) { /* make sure wdev exists */ - mutex_lock(&tmp->devlist_mtx); list_for_each_entry(wdev, &tmp->wdev_list, list) { if (wdev->identifier != (u32)wdev_id) continue; found = true; break; } - mutex_unlock(&tmp->devlist_mtx); if (!found) tmp = NULL; @@ -182,19 +178,6 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) /* * This function returns a pointer to the driver * that the genl_info item that is passed refers to. - * If successful, it returns non-NULL and also locks - * the driver's mutex! - * - * This means that you need to call cfg80211_unlock_rdev() - * before being allowed to acquire &cfg80211_mutex! - * - * This is necessary because we need to lock the global - * mutex to get an item off the list safely, and then - * we lock the rdev mutex so it doesn't go away under us. - * - * We don't want to keep cfg80211_mutex locked - * for all the time in order to allow requests on - * other interfaces to go through at the same time. * * The result of this can be a PTR_ERR and hence must * be checked with IS_ERR() for errors. @@ -202,20 +185,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) static struct cfg80211_registered_device * cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) { - struct cfg80211_registered_device *rdev; - - mutex_lock(&cfg80211_mutex); - rdev = __cfg80211_rdev_from_attrs(netns, info->attrs); - - /* if it is not an error we grab the lock on - * it to assure it won't be going away while - * we operate on it */ - if (!IS_ERR(rdev)) - mutex_lock(&rdev->mtx); - - mutex_unlock(&cfg80211_mutex); - - return rdev; + return __cfg80211_rdev_from_attrs(netns, info->attrs); } /* policy for the attributes */ @@ -378,6 +348,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 }, }; /* policy for the key attributes */ @@ -455,7 +426,6 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, int err; rtnl_lock(); - mutex_lock(&cfg80211_mutex); if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, @@ -484,14 +454,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, *rdev = wiphy_to_dev(wiphy); *wdev = NULL; - mutex_lock(&(*rdev)->devlist_mtx); list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { if (tmp->identifier == cb->args[1]) { *wdev = tmp; break; } } - mutex_unlock(&(*rdev)->devlist_mtx); if (!*wdev) { err = -ENODEV; @@ -499,19 +467,14 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, } } - cfg80211_lock_rdev(*rdev); - - mutex_unlock(&cfg80211_mutex); return 0; out_unlock: - mutex_unlock(&cfg80211_mutex); rtnl_unlock(); return err; } static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) { - cfg80211_unlock_rdev(rdev); rtnl_unlock(); } @@ -837,12 +800,9 @@ static int nl80211_key_allowed(struct wireless_dev *wdev) case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: - if (!wdev->current_bss) - return -ENOLINK; - break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: - if (wdev->sme_state != CFG80211_SME_CONNECTED) + if (!wdev->current_bss) return -ENOLINK; break; default: @@ -945,7 +905,7 @@ nla_put_failure: static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { - const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp; + const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan->tcp; struct nlattr *nl_tcp; if (!tcp) @@ -988,37 +948,37 @@ static int nl80211_send_wowlan(struct sk_buff *msg, { struct nlattr *nl_wowlan; - if (!dev->wiphy.wowlan.flags && !dev->wiphy.wowlan.n_patterns) + if (!dev->wiphy.wowlan) return 0; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); if (!nl_wowlan) return -ENOBUFS; - if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && + if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || - ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) && + ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) return -ENOBUFS; - if (dev->wiphy.wowlan.n_patterns) { + if (dev->wiphy.wowlan->n_patterns) { struct nl80211_wowlan_pattern_support pat = { - .max_patterns = dev->wiphy.wowlan.n_patterns, - .min_pattern_len = dev->wiphy.wowlan.pattern_min_len, - .max_pattern_len = dev->wiphy.wowlan.pattern_max_len, - .max_pkt_offset = dev->wiphy.wowlan.max_pkt_offset, + .max_patterns = dev->wiphy.wowlan->n_patterns, + .min_pattern_len = dev->wiphy.wowlan->pattern_min_len, + .max_pattern_len = dev->wiphy.wowlan->pattern_max_len, + .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset, }; if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, @@ -1228,6 +1188,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev, if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) goto nla_put_failure; + if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) && + nla_put_flag(msg, WIPHY_FLAG_SUPPORTS_5_10_MHZ)) + goto nla_put_failure; (*split_start)++; if (split) @@ -1572,7 +1535,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) if (!tb) return -ENOMEM; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, NL80211_ATTR_MAX, nl80211_policy); if (res == 0) { @@ -1587,7 +1550,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) netdev = dev_get_by_index(sock_net(skb->sk), ifidx); if (!netdev) { - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); kfree(tb); return -ENODEV; } @@ -1635,7 +1598,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) !skb->len && cb->min_dump_alloc < 4096) { cb->min_dump_alloc = 4096; - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); return 1; } idx--; @@ -1644,7 +1607,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) } while (cb->args[1] > 0); break; } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); cb->args[0] = idx; @@ -1778,6 +1741,11 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, IEEE80211_CHAN_DISABLED)) return -EINVAL; + if ((chandef->width == NL80211_CHAN_WIDTH_5 || + chandef->width == NL80211_CHAN_WIDTH_10) && + !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) + return -EINVAL; + return 0; } @@ -1799,7 +1767,6 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, if (result) return result; - mutex_lock(&rdev->devlist_mtx); switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@ -1823,7 +1790,6 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, default: result = -EINVAL; } - mutex_unlock(&rdev->devlist_mtx); return result; } @@ -1872,6 +1838,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0; + ASSERT_RTNL(); + /* * Try to find the wiphy and netdev. Normally this * function shouldn't need the netdev, but this is @@ -1881,31 +1849,25 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) * also passed a netdev to set_wiphy, so that it is * possible to let that go to the right netdev! */ - mutex_lock(&cfg80211_mutex); if (info->attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); netdev = dev_get_by_index(genl_info_net(info), ifindex); - if (netdev && netdev->ieee80211_ptr) { + if (netdev && netdev->ieee80211_ptr) rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); - mutex_lock(&rdev->mtx); - } else + else netdev = NULL; } if (!netdev) { rdev = __cfg80211_rdev_from_attrs(genl_info_net(info), info->attrs); - if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); + if (IS_ERR(rdev)) return PTR_ERR(rdev); - } wdev = NULL; netdev = NULL; result = 0; - - mutex_lock(&rdev->mtx); } else wdev = netdev->ieee80211_ptr; @@ -1918,8 +1880,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); - mutex_unlock(&cfg80211_mutex); - if (result) goto bad_res; @@ -2126,7 +2086,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) } bad_res: - mutex_unlock(&rdev->mtx); if (netdev) dev_put(netdev); return result; @@ -2224,7 +2183,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; @@ -2234,7 +2193,6 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * } if_idx = 0; - mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (if_idx < if_start) { if_idx++; @@ -2243,17 +2201,15 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev) < 0) { - mutex_unlock(&rdev->devlist_mtx); goto out; } if_idx++; } - mutex_unlock(&rdev->devlist_mtx); wp_idx++; } out: - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); cb->args[0] = wp_idx; cb->args[1] = if_idx; @@ -2286,6 +2242,7 @@ static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, + [NL80211_MNTR_FLAG_ACTIVE] = { .type = NLA_FLAG }, }; static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) @@ -2397,6 +2354,10 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) change = true; } + if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && + !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + if (change) err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms); else @@ -2454,6 +2415,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); + + if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && + !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, ¶ms); @@ -2486,11 +2452,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); - mutex_lock(&rdev->devlist_mtx); wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wdev_list); rdev->devlist_generation++; - mutex_unlock(&rdev->devlist_mtx); break; default: break; @@ -2933,61 +2897,58 @@ static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info) return err; } -static int nl80211_parse_beacon(struct genl_info *info, +static int nl80211_parse_beacon(struct nlattr *attrs[], struct cfg80211_beacon_data *bcn) { bool haveinfo = false; - if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) || - !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) || - !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_PROBE_RESP]) || - !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP])) + if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) || + !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) || + !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) || + !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP])) return -EINVAL; memset(bcn, 0, sizeof(*bcn)); - if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { - bcn->head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); - bcn->head_len = nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]); + if (attrs[NL80211_ATTR_BEACON_HEAD]) { + bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]); + bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]); if (!bcn->head_len) return -EINVAL; haveinfo = true; } - if (info->attrs[NL80211_ATTR_BEACON_TAIL]) { - bcn->tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]); - bcn->tail_len = - nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]); + if (attrs[NL80211_ATTR_BEACON_TAIL]) { + bcn->tail = nla_data(attrs[NL80211_ATTR_BEACON_TAIL]); + bcn->tail_len = nla_len(attrs[NL80211_ATTR_BEACON_TAIL]); haveinfo = true; } if (!haveinfo) return -EINVAL; - if (info->attrs[NL80211_ATTR_IE]) { - bcn->beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]); - bcn->beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]); + if (attrs[NL80211_ATTR_IE]) { + bcn->beacon_ies = nla_data(attrs[NL80211_ATTR_IE]); + bcn->beacon_ies_len = nla_len(attrs[NL80211_ATTR_IE]); } - if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) { + if (attrs[NL80211_ATTR_IE_PROBE_RESP]) { bcn->proberesp_ies = - nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]); + nla_data(attrs[NL80211_ATTR_IE_PROBE_RESP]); bcn->proberesp_ies_len = - nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]); + nla_len(attrs[NL80211_ATTR_IE_PROBE_RESP]); } - if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) { + if (attrs[NL80211_ATTR_IE_ASSOC_RESP]) { bcn->assocresp_ies = - nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); + nla_data(attrs[NL80211_ATTR_IE_ASSOC_RESP]); bcn->assocresp_ies_len = - nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); + nla_len(attrs[NL80211_ATTR_IE_ASSOC_RESP]); } - if (info->attrs[NL80211_ATTR_PROBE_RESP]) { - bcn->probe_resp = - nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]); - bcn->probe_resp_len = - nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]); + if (attrs[NL80211_ATTR_PROBE_RESP]) { + bcn->probe_resp = nla_data(attrs[NL80211_ATTR_PROBE_RESP]); + bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]); } return 0; @@ -2999,8 +2960,6 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev; bool ret = false; - mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) @@ -3014,8 +2973,6 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, break; } - mutex_unlock(&rdev->devlist_mtx); - return ret; } @@ -3070,7 +3027,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; - err = nl80211_parse_beacon(info, ¶ms.beacon); + err = nl80211_parse_beacon(info->attrs, ¶ms.beacon); if (err) return err; @@ -3177,13 +3134,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) params.radar_required = true; } - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, params.chandef.chan, CHAN_MODE_SHARED, radar_detect_width); - mutex_unlock(&rdev->devlist_mtx); - if (err) return err; @@ -3225,7 +3179,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) if (!wdev->beacon_interval) return -EINVAL; - err = nl80211_parse_beacon(info, ¶ms); + err = nl80211_parse_beacon(info->attrs, ¶ms); if (err) return err; @@ -3383,6 +3337,32 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, return true; } +static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal, + int id) +{ + void *attr; + int i = 0; + + if (!mask) + return true; + + attr = nla_nest_start(msg, id); + if (!attr) + return false; + + for (i = 0; i < IEEE80211_MAX_CHAINS; i++) { + if (!(mask & BIT(i))) + continue; + + if (nla_put_u8(msg, i, signal[i])) + return false; + } + + nla_nest_end(msg, attr); + + return true; +} + static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, @@ -3454,6 +3434,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, default: break; } + if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL) { + if (!nl80211_put_signal(msg, sinfo->chains, + sinfo->chain_signal, + NL80211_STA_INFO_CHAIN_SIGNAL)) + goto nla_put_failure; + } + if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL_AVG) { + if (!nl80211_put_signal(msg, sinfo->chains, + sinfo->chain_signal_avg, + NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) + goto nla_put_failure; + } if (sinfo->filled & STATION_INFO_TX_BITRATE) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) @@ -3841,6 +3833,8 @@ static int nl80211_set_station_tdls(struct genl_info *info, struct station_parameters *params) { /* Dummy STA entry gets updated once the peer capabilities are known */ + if (info->attrs[NL80211_ATTR_PEER_AID]) + params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params->ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); @@ -3981,7 +3975,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; - if (!info->attrs[NL80211_ATTR_STA_AID]) + if (!info->attrs[NL80211_ATTR_STA_AID] && + !info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@ -3992,7 +3987,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); - params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + if (info->attrs[NL80211_ATTR_PEER_AID]) + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); + else + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (!params.aid || params.aid > IEEE80211_MAX_AID) return -EINVAL; @@ -4044,7 +4042,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* TDLS peers cannot be added */ - if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || + info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; /* but don't bother the driver with it */ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); @@ -4070,7 +4069,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) return -EINVAL; /* TDLS peers cannot be added */ - if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) + if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || + info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; break; case NL80211_IFTYPE_STATION: @@ -4592,7 +4592,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, cur_params.power_mode) || nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, - cur_params.dot11MeshAwakeWindowDuration)) + cur_params.dot11MeshAwakeWindowDuration) || + nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, + cur_params.plink_timeout)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -4633,6 +4635,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 }, [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 }, [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, + [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, }; static const struct nla_policy @@ -4641,6 +4644,7 @@ static const struct nla_policy [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, + [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, @@ -4769,6 +4773,9 @@ do { \ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 0, 65535, mask, NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 1, 0xffffffff, + mask, NL80211_MESHCONF_PLINK_TIMEOUT, + nla_get_u32); if (mask_out) *mask_out = mask; @@ -4826,6 +4833,13 @@ static int nl80211_parse_mesh_setup(struct genl_info *info, if (setup->is_secure) setup->user_mpm = true; + if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) { + if (!setup->user_mpm) + return -EINVAL; + setup->auth_id = + nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]); + } + return 0; } @@ -4868,18 +4882,13 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) void *hdr = NULL; struct nlattr *nl_reg_rules; unsigned int i; - int err = -EINVAL; - - mutex_lock(&cfg80211_mutex); if (!cfg80211_regdomain) - goto out; + return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) { - err = -ENOBUFS; - goto out; - } + if (!msg) + return -ENOBUFS; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_REG); @@ -4938,8 +4947,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) nla_nest_end(msg, nl_reg_rules); genlmsg_end(msg, hdr); - err = genlmsg_reply(msg, info); - goto out; + return genlmsg_reply(msg, info); nla_put_failure_rcu: rcu_read_unlock(); @@ -4947,10 +4955,7 @@ nla_put_failure: genlmsg_cancel(msg, hdr); put_failure: nlmsg_free(msg); - err = -EMSGSIZE; -out: - mutex_unlock(&cfg80211_mutex); - return err; + return -EMSGSIZE; } static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) @@ -5016,12 +5021,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) } } - mutex_lock(&cfg80211_mutex); - r = set_regdom(rd); /* set_regdom took ownership */ rd = NULL; - mutex_unlock(&cfg80211_mutex); bad_reg: kfree(rd); @@ -5071,7 +5073,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->scan) return -EOPNOTSUPP; - mutex_lock(&rdev->sched_scan_mtx); if (rdev->scan_req) { err = -EBUSY; goto unlock; @@ -5257,7 +5258,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) } unlock: - mutex_unlock(&rdev->sched_scan_mtx); return err; } @@ -5329,8 +5329,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (ie_len > wiphy->max_sched_scan_ie_len) return -EINVAL; - mutex_lock(&rdev->sched_scan_mtx); - if (rdev->sched_scan_req) { err = -EINPROGRESS; goto out; @@ -5498,7 +5496,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, out_free: kfree(request); out: - mutex_unlock(&rdev->sched_scan_mtx); return err; } @@ -5506,17 +5503,12 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; - int err; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; - mutex_lock(&rdev->sched_scan_mtx); - err = __cfg80211_stop_sched_scan(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); - - return err; + return __cfg80211_stop_sched_scan(rdev, false); } static int nl80211_start_radar_detection(struct sk_buff *skb, @@ -5548,12 +5540,11 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, if (!rdev->ops->start_radar_detection) return -EOPNOTSUPP; - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, chandef.chan, CHAN_MODE_SHARED, BIT(chandef.width)); if (err) - goto err_locked; + return err; err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef); if (!err) { @@ -5561,9 +5552,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, wdev->cac_started = true; wdev->cac_start_time = jiffies; } -err_locked: - mutex_unlock(&rdev->devlist_mtx); - return err; } @@ -5946,10 +5934,13 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) if (local_state_change) return 0; - return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, - ssid, ssid_len, ie, ie_len, - key.p.key, key.p.key_len, key.idx, - sae_data, sae_data_len); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, + ssid, ssid_len, ie, ie_len, + key.p.key, key.p.key_len, key.idx, + sae_data, sae_data_len); + wdev_unlock(dev->ieee80211_ptr); + return err; } static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, @@ -6116,9 +6107,12 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) } err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); - if (!err) + if (!err) { + wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, ssid, ssid_len, &req); + wdev_unlock(dev->ieee80211_ptr); + } return err; } @@ -6128,7 +6122,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; - int ie_len = 0; + int ie_len = 0, err; u16 reason_code; bool local_state_change; @@ -6163,8 +6157,11 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; - return cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, - local_state_change); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, + local_state_change); + wdev_unlock(dev->ieee80211_ptr); + return err; } static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) @@ -6172,7 +6169,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; - int ie_len = 0; + int ie_len = 0, err; u16 reason_code; bool local_state_change; @@ -6207,8 +6204,11 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; - return cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, - local_state_change); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, + local_state_change); + wdev_unlock(dev->ieee80211_ptr); + return err; } static bool @@ -6295,11 +6295,16 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef)) return -EINVAL; - if (ibss.chandef.width > NL80211_CHAN_WIDTH_40) - return -EINVAL; - if (ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && - !(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) + switch (ibss.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + break; + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + if (rdev->wiphy.features & NL80211_FEATURE_HT_IBSS) + break; + default: return -EINVAL; + } ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; @@ -6426,6 +6431,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb, void *data = NULL; int data_len = 0; + rtnl_lock(); + if (cb->args[0]) { /* * 0 is a valid index, but not valid for args[0], @@ -6437,18 +6444,16 @@ static int nl80211_testmode_dump(struct sk_buff *skb, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) - return err; + goto out_err; - mutex_lock(&cfg80211_mutex); rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), nl80211_fam.attrbuf); if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); - return PTR_ERR(rdev); + err = PTR_ERR(rdev); + goto out_err; } phy_idx = rdev->wiphy_idx; rdev = NULL; - mutex_unlock(&cfg80211_mutex); if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = @@ -6460,14 +6465,11 @@ static int nl80211_testmode_dump(struct sk_buff *skb, data_len = nla_len((void *)cb->args[1]); } - mutex_lock(&cfg80211_mutex); rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); if (!rdev) { - mutex_unlock(&cfg80211_mutex); - return -ENOENT; + err = -ENOENT; + goto out_err; } - cfg80211_lock_rdev(rdev); - mutex_unlock(&cfg80211_mutex); if (!rdev->ops->testmode_dump) { err = -EOPNOTSUPP; @@ -6508,7 +6510,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb, /* see above */ cb->args[0] = phy_idx + 1; out_err: - cfg80211_unlock_rdev(rdev); + rtnl_unlock(); return err; } @@ -6716,7 +6718,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) sizeof(connect.vht_capa)); } - err = cfg80211_connect(rdev, dev, &connect, connkeys); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); + wdev_unlock(dev->ieee80211_ptr); if (err) kfree(connkeys); return err; @@ -6727,6 +6731,7 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 reason; + int ret; if (!info->attrs[NL80211_ATTR_REASON_CODE]) reason = WLAN_REASON_DEAUTH_LEAVING; @@ -6740,7 +6745,10 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; - return cfg80211_disconnect(rdev, dev, reason, true); + wdev_lock(dev->ieee80211_ptr); + ret = cfg80211_disconnect(rdev, dev, reason, true); + wdev_unlock(dev->ieee80211_ptr); + return ret; } static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) @@ -7159,6 +7167,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; switch (wdev->iftype) { + case NL80211_IFTYPE_P2P_DEVICE: + if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) + return -EINVAL; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: @@ -7166,7 +7177,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: - case NL80211_IFTYPE_P2P_DEVICE: break; default: return -EOPNOTSUPP; @@ -7194,9 +7204,18 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); - err = nl80211_parse_chandef(rdev, info, &chandef); - if (err) - return err; + /* get the channel if any has been specified, otherwise pass NULL to + * the driver. The latter will use the current one + */ + chandef.chan = NULL; + if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { + err = nl80211_parse_chandef(rdev, info, &chandef); + if (err) + return err; + } + + if (!chandef.chan && offchan) + return -EINVAL; if (!dont_wait_for_ack) { msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); @@ -7501,6 +7520,23 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) setup.chandef.chan = NULL; } + if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { + u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + int n_rates = + nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); + struct ieee80211_supported_band *sband; + + if (!setup.chandef.chan) + return -EINVAL; + + sband = rdev->wiphy.bands[setup.chandef.chan->band]; + + err = ieee80211_get_ratemask(sband, rates, n_rates, + &setup.basic_rates); + if (err) + return err; + } + return cfg80211_join_mesh(rdev, dev, &setup, &cfg); } @@ -7516,28 +7552,29 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info) static int nl80211_send_wowlan_patterns(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { + struct cfg80211_wowlan *wowlan = rdev->wiphy.wowlan_config; struct nlattr *nl_pats, *nl_pat; int i, pat_len; - if (!rdev->wowlan->n_patterns) + if (!wowlan->n_patterns) return 0; nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); if (!nl_pats) return -ENOBUFS; - for (i = 0; i < rdev->wowlan->n_patterns; i++) { + for (i = 0; i < wowlan->n_patterns; i++) { nl_pat = nla_nest_start(msg, i + 1); if (!nl_pat) return -ENOBUFS; - pat_len = rdev->wowlan->patterns[i].pattern_len; + pat_len = wowlan->patterns[i].pattern_len; if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), - rdev->wowlan->patterns[i].mask) || + wowlan->patterns[i].mask) || nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN, - pat_len, rdev->wowlan->patterns[i].pattern) || + pat_len, wowlan->patterns[i].pattern) || nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET, - rdev->wowlan->patterns[i].pkt_offset)) + wowlan->patterns[i].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } @@ -7596,16 +7633,15 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) void *hdr; u32 size = NLMSG_DEFAULT_SIZE; - if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && - !rdev->wiphy.wowlan.tcp) + if (!rdev->wiphy.wowlan) return -EOPNOTSUPP; - if (rdev->wowlan && rdev->wowlan->tcp) { + if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { /* adjust size to have room for all the data */ - size += rdev->wowlan->tcp->tokens_size + - rdev->wowlan->tcp->payload_len + - rdev->wowlan->tcp->wake_len + - rdev->wowlan->tcp->wake_len / 8; + size += rdev->wiphy.wowlan_config->tcp->tokens_size + + rdev->wiphy.wowlan_config->tcp->payload_len + + rdev->wiphy.wowlan_config->tcp->wake_len + + rdev->wiphy.wowlan_config->tcp->wake_len / 8; } msg = nlmsg_new(size, GFP_KERNEL); @@ -7617,33 +7653,34 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) if (!hdr) goto nla_put_failure; - if (rdev->wowlan) { + if (rdev->wiphy.wowlan_config) { struct nlattr *nl_wowlan; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!nl_wowlan) goto nla_put_failure; - if ((rdev->wowlan->any && + if ((rdev->wiphy.wowlan_config->any && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || - (rdev->wowlan->disconnect && + (rdev->wiphy.wowlan_config->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || - (rdev->wowlan->magic_pkt && + (rdev->wiphy.wowlan_config->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || - (rdev->wowlan->gtk_rekey_failure && + (rdev->wiphy.wowlan_config->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || - (rdev->wowlan->eap_identity_req && + (rdev->wiphy.wowlan_config->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || - (rdev->wowlan->four_way_handshake && + (rdev->wiphy.wowlan_config->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || - (rdev->wowlan->rfkill_release && + (rdev->wiphy.wowlan_config->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) goto nla_put_failure; if (nl80211_send_wowlan_patterns(msg, rdev)) goto nla_put_failure; - if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp)) + if (nl80211_send_wowlan_tcp(msg, + rdev->wiphy.wowlan_config->tcp)) goto nla_put_failure; nla_nest_end(msg, nl_wowlan); @@ -7669,7 +7706,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, u32 data_size, wake_size, tokens_size = 0, wake_mask_size; int err, port; - if (!rdev->wiphy.wowlan.tcp) + if (!rdev->wiphy.wowlan->tcp) return -EINVAL; err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP, @@ -7689,16 +7726,16 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, return -EINVAL; data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); - if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max) + if (data_size > rdev->wiphy.wowlan->tcp->data_payload_max) return -EINVAL; if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > - rdev->wiphy.wowlan.tcp->data_interval_max || + rdev->wiphy.wowlan->tcp->data_interval_max || nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) return -EINVAL; wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); - if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max) + if (wake_size > rdev->wiphy.wowlan->tcp->wake_payload_max) return -EINVAL; wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); @@ -7713,13 +7750,13 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, if (!tok->len || tokens_size % tok->len) return -EINVAL; - if (!rdev->wiphy.wowlan.tcp->tok) + if (!rdev->wiphy.wowlan->tcp->tok) return -EINVAL; - if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len) + if (tok->len > rdev->wiphy.wowlan->tcp->tok->max_len) return -EINVAL; - if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len) + if (tok->len < rdev->wiphy.wowlan->tcp->tok->min_len) return -EINVAL; - if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize) + if (tokens_size > rdev->wiphy.wowlan->tcp->tok->bufsize) return -EINVAL; if (tok->offset + tok->len > data_size) return -EINVAL; @@ -7727,7 +7764,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); - if (!rdev->wiphy.wowlan.tcp->seq) + if (!rdev->wiphy.wowlan->tcp->seq) return -EINVAL; if (seq->len == 0 || seq->len > 4) return -EINVAL; @@ -7808,17 +7845,16 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; struct cfg80211_wowlan new_triggers = {}; struct cfg80211_wowlan *ntrig; - struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; + const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; int err, i; - bool prev_enabled = rdev->wowlan; + bool prev_enabled = rdev->wiphy.wowlan_config; - if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && - !rdev->wiphy.wowlan.tcp) + if (!wowlan) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { cfg80211_rdev_free_wowlan(rdev); - rdev->wowlan = NULL; + rdev->wiphy.wowlan_config = NULL; goto set_wakeup; } @@ -7954,11 +7990,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) goto error; } cfg80211_rdev_free_wowlan(rdev); - rdev->wowlan = ntrig; + rdev->wiphy.wowlan_config = ntrig; set_wakeup: - if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) - rdev_set_wakeup(rdev, rdev->wowlan); + if (rdev->ops->set_wakeup && + prev_enabled != !!rdev->wiphy.wowlan_config) + rdev_set_wakeup(rdev, rdev->wiphy.wowlan_config); return 0; error: @@ -8143,9 +8180,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) if (wdev->p2p_started) return 0; - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_add_interface(rdev, wdev->iftype); - mutex_unlock(&rdev->devlist_mtx); if (err) return err; @@ -8154,9 +8189,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) return err; wdev->p2p_started = true; - mutex_lock(&rdev->devlist_mtx); rdev->opencount++; - mutex_unlock(&rdev->devlist_mtx); return 0; } @@ -8172,11 +8205,7 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->stop_p2p_device) return -EOPNOTSUPP; - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); cfg80211_stop_p2p_device(rdev, wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); return 0; } @@ -8319,11 +8348,11 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, info->user_ptr[0] = rdev; } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV || ops->internal_flags & NL80211_FLAG_NEED_WDEV) { - mutex_lock(&cfg80211_mutex); + ASSERT_RTNL(); + wdev = __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); if (IS_ERR(wdev)) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return PTR_ERR(wdev); @@ -8334,7 +8363,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { if (!dev) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -EINVAL; @@ -8348,7 +8376,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, if (dev) { if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && !netif_running(dev)) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -ENETDOWN; @@ -8357,17 +8384,12 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, dev_hold(dev); } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) { if (!wdev->p2p_started) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -ENETDOWN; } } - cfg80211_lock_rdev(rdev); - - mutex_unlock(&cfg80211_mutex); - info->user_ptr[0] = rdev; } @@ -8377,8 +8399,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { - if (info->user_ptr[0]) - cfg80211_unlock_rdev(info->user_ptr[0]); if (info->user_ptr[1]) { if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) { struct wireless_dev *wdev = info->user_ptr[1]; @@ -8400,7 +8420,8 @@ static struct genl_ops nl80211_ops[] = { .dumpit = nl80211_dump_wiphy, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_WIPHY, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WIPHY, @@ -8415,7 +8436,8 @@ static struct genl_ops nl80211_ops[] = { .dumpit = nl80211_dump_interface, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_WDEV, + .internal_flags = NL80211_FLAG_NEED_WDEV | + NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_INTERFACE, @@ -8574,6 +8596,7 @@ static struct genl_ops nl80211_ops[] = { .cmd = NL80211_CMD_GET_REG, .doit = nl80211_get_reg, .policy = nl80211_policy, + .internal_flags = NL80211_FLAG_NEED_RTNL, /* can be retrieved by unprivileged users */ }, { @@ -8581,6 +8604,7 @@ static struct genl_ops nl80211_ops[] = { .doit = nl80211_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REQ_SET_REG, @@ -9036,8 +9060,6 @@ static int nl80211_add_scan_req(struct sk_buff *msg, struct nlattr *nest; int i; - lockdep_assert_held(&rdev->sched_scan_mtx); - if (WARN_ON(!req)) return 0; @@ -9344,31 +9366,27 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, NL80211_CMD_DISASSOCIATE, gfp); } -void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf, - size_t len) +void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, + size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + const struct ieee80211_mgmt *mgmt = (void *)buf; + u32 cmd; - trace_cfg80211_send_unprot_deauth(dev); - nl80211_send_mlme_event(rdev, dev, buf, len, - NL80211_CMD_UNPROT_DEAUTHENTICATE, GFP_ATOMIC); -} -EXPORT_SYMBOL(cfg80211_send_unprot_deauth); + if (WARN_ON(len < 2)) + return; -void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf, - size_t len) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + if (ieee80211_is_deauth(mgmt->frame_control)) + cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE; + else + cmd = NL80211_CMD_UNPROT_DISASSOCIATE; - trace_cfg80211_send_unprot_disassoc(dev); - nl80211_send_mlme_event(rdev, dev, buf, len, - NL80211_CMD_UNPROT_DISASSOCIATE, GFP_ATOMIC); + trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); + nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC); } -EXPORT_SYMBOL(cfg80211_send_unprot_disassoc); +EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, int cmd, @@ -9879,7 +9897,6 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct sk_buff *msg; void *hdr; - int err; u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); if (!nlportid) @@ -9900,12 +9917,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) goto nla_put_failure; - err = genlmsg_end(msg, hdr); - if (err < 0) { - nlmsg_free(msg); - return true; - } - + genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); return true; @@ -10348,10 +10360,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -10417,7 +10426,6 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct sk_buff *msg; void *hdr; - int err; trace_cfg80211_probe_status(dev, addr, cookie, acked); @@ -10439,11 +10447,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, (acked && nla_put_flag(msg, NL80211_ATTR_ACK))) goto nla_put_failure; - err = genlmsg_end(msg, hdr); - if (err < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -10509,7 +10513,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct sk_buff *msg; void *hdr; - int err, size = 200; + int size = 200; trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup); @@ -10595,9 +10599,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, nla_nest_end(msg, reasons); } - err = genlmsg_end(msg, hdr); - if (err < 0) - goto free_msg; + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -10617,7 +10619,6 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct sk_buff *msg; void *hdr; - int err; trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper, reason_code); @@ -10640,11 +10641,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) goto nla_put_failure; - err = genlmsg_end(msg, hdr); - if (err < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -10702,7 +10699,6 @@ void cfg80211_ft_event(struct net_device *netdev, struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct sk_buff *msg; void *hdr; - int err; trace_cfg80211_ft_event(wiphy, netdev, ft_event); @@ -10728,11 +10724,7 @@ void cfg80211_ft_event(struct net_device *netdev, nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, ft_event->ric_ies); - err = genlmsg_end(msg, hdr); - if (err < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index cc35fbaa4578..5a24c986f34b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -81,7 +81,10 @@ static struct regulatory_request core_request_world = { .country_ie_env = ENVIRON_ANY, }; -/* Receipt of information from last regulatory request */ +/* + * Receipt of information from last regulatory request, + * protected by RTNL (and can be accessed with RCU protection) + */ static struct regulatory_request __rcu *last_request = (void __rcu *)&core_request_world; @@ -96,39 +99,25 @@ static struct device_type reg_device_type = { * Central wireless core regulatory domains, we only need two, * the current one and a world regulatory domain in case we have no * information to give us an alpha2. + * (protected by RTNL, can be read under RCU) */ const struct ieee80211_regdomain __rcu *cfg80211_regdomain; /* - * Protects static reg.c components: - * - cfg80211_regdomain (if not used with RCU) - * - cfg80211_world_regdom - * - last_request (if not used with RCU) - * - reg_num_devs_support_basehint - */ -static DEFINE_MUTEX(reg_mutex); - -/* * Number of devices that registered to the core * that support cellular base station regulatory hints + * (protected by RTNL) */ static int reg_num_devs_support_basehint; -static inline void assert_reg_lock(void) -{ - lockdep_assert_held(®_mutex); -} - static const struct ieee80211_regdomain *get_cfg80211_regdom(void) { - return rcu_dereference_protected(cfg80211_regdomain, - lockdep_is_held(®_mutex)); + return rtnl_dereference(cfg80211_regdomain); } static const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy) { - return rcu_dereference_protected(wiphy->regd, - lockdep_is_held(®_mutex)); + return rtnl_dereference(wiphy->regd); } static void rcu_free_regdom(const struct ieee80211_regdomain *r) @@ -140,8 +129,7 @@ static void rcu_free_regdom(const struct ieee80211_regdomain *r) static struct regulatory_request *get_last_request(void) { - return rcu_dereference_check(last_request, - lockdep_is_held(®_mutex)); + return rcu_dereference_rtnl(last_request); } /* Used to queue up regulatory hints */ @@ -200,6 +188,7 @@ static const struct ieee80211_regdomain world_regdom = { } }; +/* protected by RTNL */ static const struct ieee80211_regdomain *cfg80211_world_regdom = &world_regdom; @@ -215,7 +204,7 @@ static void reset_regdomains(bool full_reset, const struct ieee80211_regdomain *r; struct regulatory_request *lr; - assert_reg_lock(); + ASSERT_RTNL(); r = get_cfg80211_regdom(); @@ -377,7 +366,7 @@ static void reg_regdb_search(struct work_struct *work) const struct ieee80211_regdomain *curdom, *regdom = NULL; int i; - mutex_lock(&cfg80211_mutex); + rtnl_lock(); mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { @@ -402,7 +391,7 @@ static void reg_regdb_search(struct work_struct *work) if (!IS_ERR_OR_NULL(regdom)) set_regdom(regdom); - mutex_unlock(&cfg80211_mutex); + rtnl_unlock(); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@ -936,13 +925,7 @@ static bool reg_request_cell_base(struct regulatory_request *request) bool reg_last_request_cell_base(void) { - bool val; - - mutex_lock(®_mutex); - val = reg_request_cell_base(get_last_request()); - mutex_unlock(®_mutex); - - return val; + return reg_request_cell_base(get_last_request()); } #ifdef CONFIG_CFG80211_CERTIFICATION_ONUS @@ -1225,7 +1208,7 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) struct cfg80211_registered_device *rdev; struct wiphy *wiphy; - assert_cfg80211_lock(); + ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { wiphy = &rdev->wiphy; @@ -1362,7 +1345,7 @@ get_reg_request_treatment(struct wiphy *wiphy, return REG_REQ_OK; return REG_REQ_ALREADY_SET; } - return 0; + return REG_REQ_OK; case NL80211_REGDOM_SET_BY_DRIVER: if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(pending_request->alpha2)) @@ -1444,8 +1427,6 @@ static void reg_set_request_processed(void) * what it believes should be the current regulatory domain. * * Returns one of the different reg request treatment values. - * - * Caller must hold ®_mutex */ static enum reg_request_treatment __regulatory_hint(struct wiphy *wiphy, @@ -1570,21 +1551,19 @@ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request, *lr; - mutex_lock(&cfg80211_mutex); - mutex_lock(®_mutex); lr = get_last_request(); /* When last_request->processed becomes true this will be rescheduled */ if (lr && !lr->processed) { REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n"); - goto out; + return; } spin_lock(®_requests_lock); if (list_empty(®_requests_list)) { spin_unlock(®_requests_lock); - goto out; + return; } reg_request = list_first_entry(®_requests_list, @@ -1595,10 +1574,6 @@ static void reg_process_pending_hints(void) spin_unlock(®_requests_lock); reg_process_hint(reg_request, reg_request->initiator); - -out: - mutex_unlock(®_mutex); - mutex_unlock(&cfg80211_mutex); } /* Processes beacon hints -- this has nothing to do with country IEs */ @@ -1607,9 +1582,6 @@ static void reg_process_pending_beacon_hints(void) struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; - mutex_lock(&cfg80211_mutex); - mutex_lock(®_mutex); - /* This goes through the _pending_ beacon list */ spin_lock_bh(®_pending_beacons_lock); @@ -1626,14 +1598,14 @@ static void reg_process_pending_beacon_hints(void) } spin_unlock_bh(®_pending_beacons_lock); - mutex_unlock(®_mutex); - mutex_unlock(&cfg80211_mutex); } static void reg_todo(struct work_struct *work) { + rtnl_lock(); reg_process_pending_hints(); reg_process_pending_beacon_hints(); + rtnl_unlock(); } static void queue_regulatory_request(struct regulatory_request *request) @@ -1717,29 +1689,23 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) } EXPORT_SYMBOL(regulatory_hint); -/* - * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and - * therefore cannot iterate over the rdev list here. - */ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, const u8 *country_ie, u8 country_ie_len) { char alpha2[2]; enum environment_cap env = ENVIRON_ANY; - struct regulatory_request *request, *lr; - - mutex_lock(®_mutex); - lr = get_last_request(); - - if (unlikely(!lr)) - goto out; + struct regulatory_request *request = NULL, *lr; /* IE len must be evenly divisible by 2 */ if (country_ie_len & 0x01) - goto out; + return; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) - goto out; + return; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) + return; alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; @@ -1749,19 +1715,21 @@ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, else if (country_ie[2] == 'O') env = ENVIRON_OUTDOOR; + rcu_read_lock(); + lr = get_last_request(); + + if (unlikely(!lr)) + goto out; + /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold - * cfg80211_mutex. + * the RTNL. */ if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->wiphy_idx != WIPHY_IDX_INVALID) goto out; - request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); - if (!request) - goto out; - request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; @@ -1769,8 +1737,10 @@ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band, request->country_ie_env = env; queue_regulatory_request(request); + request = NULL; out: - mutex_unlock(®_mutex); + kfree(request); + rcu_read_unlock(); } static void restore_alpha2(char *alpha2, bool reset_user) @@ -1858,8 +1828,7 @@ static void restore_regulatory_settings(bool reset_user) LIST_HEAD(tmp_reg_req_list); struct cfg80211_registered_device *rdev; - mutex_lock(&cfg80211_mutex); - mutex_lock(®_mutex); + ASSERT_RTNL(); reset_regdomains(true, &world_regdom); restore_alpha2(alpha2, reset_user); @@ -1914,9 +1883,6 @@ static void restore_regulatory_settings(bool reset_user) list_splice_tail_init(&tmp_reg_req_list, ®_requests_list); spin_unlock(®_requests_lock); - mutex_unlock(®_mutex); - mutex_unlock(&cfg80211_mutex); - REG_DBG_PRINT("Kicking the queue\n"); schedule_work(®_work); @@ -2231,7 +2197,6 @@ int set_regdom(const struct ieee80211_regdomain *rd) struct regulatory_request *lr; int r; - mutex_lock(®_mutex); lr = get_last_request(); /* Note that this doesn't update the wiphys, this is done below */ @@ -2241,14 +2206,12 @@ int set_regdom(const struct ieee80211_regdomain *rd) reg_set_request_processed(); kfree(rd); - goto out; + return r; } /* This would make this whole thing pointless */ - if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) { - r = -EINVAL; - goto out; - } + if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) + return -EINVAL; /* update all wiphys now with the new established regulatory domain */ update_all_wiphy_regulatory(lr->initiator); @@ -2259,10 +2222,7 @@ int set_regdom(const struct ieee80211_regdomain *rd) reg_set_request_processed(); - out: - mutex_unlock(®_mutex); - - return r; + return 0; } int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) @@ -2287,23 +2247,17 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) void wiphy_regulatory_register(struct wiphy *wiphy) { - mutex_lock(®_mutex); - if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint++; wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); - - mutex_unlock(®_mutex); } -/* Caller must hold cfg80211_mutex */ void wiphy_regulatory_deregister(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; struct regulatory_request *lr; - mutex_lock(®_mutex); lr = get_last_request(); if (!reg_dev_ignore_cell_hint(wiphy)) @@ -2316,12 +2270,10 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy) request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (!request_wiphy || request_wiphy != wiphy) - goto out; + return; lr->wiphy_idx = WIPHY_IDX_INVALID; lr->country_ie_env = ENVIRON_ANY; -out: - mutex_unlock(®_mutex); } static void reg_timeout_work(struct work_struct *work) @@ -2385,9 +2337,9 @@ void regulatory_exit(void) cancel_delayed_work_sync(®_timeout); /* Lock to suppress warnings */ - mutex_lock(®_mutex); + rtnl_lock(); reset_regdomains(true, NULL); - mutex_unlock(®_mutex); + rtnl_unlock(); dev_set_uevent_suppress(®_pdev->dev, true); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index fd99ea495b7e..dd01b58fa78c 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) union iwreq_data wrqu; #endif - lockdep_assert_held(&rdev->sched_scan_mtx); + ASSERT_RTNL(); request = rdev->scan_req; @@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) rdev = container_of(wk, struct cfg80211_registered_device, scan_done_wk); - mutex_lock(&rdev->sched_scan_mtx); + rtnl_lock(); ___cfg80211_scan_done(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); + rtnl_unlock(); } void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) @@ -241,6 +241,7 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); request->aborted = aborted; + request->notified = true; queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk); } EXPORT_SYMBOL(cfg80211_scan_done); @@ -255,7 +256,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) request = rdev->sched_scan_req; - mutex_lock(&rdev->sched_scan_mtx); + rtnl_lock(); /* we don't have sched_scan_req anymore if the scan is stopping */ if (request) { @@ -270,7 +271,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) nl80211_send_sched_scan_results(rdev, request->dev); } - mutex_unlock(&rdev->sched_scan_mtx); + rtnl_unlock(); } void cfg80211_sched_scan_results(struct wiphy *wiphy) @@ -289,9 +290,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy) trace_cfg80211_sched_scan_stopped(wiphy); - mutex_lock(&rdev->sched_scan_mtx); + rtnl_lock(); __cfg80211_stop_sched_scan(rdev, true); - mutex_unlock(&rdev->sched_scan_mtx); + rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); @@ -300,7 +301,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, { struct net_device *dev; - lockdep_assert_held(&rdev->sched_scan_mtx); + ASSERT_RTNL(); if (!rdev->sched_scan_req) return -ENOENT; @@ -1040,6 +1041,25 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) EXPORT_SYMBOL(cfg80211_unlink_bss); #ifdef CONFIG_CFG80211_WEXT +static struct cfg80211_registered_device * +cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) +{ + struct cfg80211_registered_device *rdev; + struct net_device *dev; + + ASSERT_RTNL(); + + dev = dev_get_by_index(net, ifindex); + if (!dev) + return ERR_PTR(-ENODEV); + if (dev->ieee80211_ptr) + rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); + else + rdev = ERR_PTR(-ENODEV); + dev_put(dev); + return rdev; +} + int cfg80211_wext_siwscan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) @@ -1062,7 +1082,6 @@ int cfg80211_wext_siwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); - mutex_lock(&rdev->sched_scan_mtx); if (rdev->scan_req) { err = -EBUSY; goto out; @@ -1169,9 +1188,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, dev_hold(dev); } out: - mutex_unlock(&rdev->sched_scan_mtx); kfree(creq); - cfg80211_unlock_rdev(rdev); return err; } EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan); @@ -1470,10 +1487,8 @@ int cfg80211_wext_giwscan(struct net_device *dev, if (IS_ERR(rdev)) return PTR_ERR(rdev); - if (rdev->scan_req) { - res = -EAGAIN; - goto out; - } + if (rdev->scan_req) + return -EAGAIN; res = ieee80211_scan_results(rdev, info, extra, data->length); data->length = 0; @@ -1482,8 +1497,6 @@ int cfg80211_wext_giwscan(struct net_device *dev, res = 0; } - out: - cfg80211_unlock_rdev(rdev); return res; } EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 3ed35c345cae..ae7e2cbf45cb 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1,5 +1,7 @@ /* - * SME code for cfg80211's connect emulation. + * SME code for cfg80211 + * both driver SME event handling and the SME implementation + * (for nl80211's connect() and wext) * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2009 Intel Corporation. All rights reserved. @@ -18,18 +20,24 @@ #include "reg.h" #include "rdev-ops.h" +/* + * Software SME in cfg80211, using auth/assoc/deauth calls to the + * driver. This is is for implementing nl80211's connect/disconnect + * and wireless extensions (if configured.) + */ + struct cfg80211_conn { struct cfg80211_connect_params params; /* these are sub-states of the _CONNECTING sme_state */ enum { - CFG80211_CONN_IDLE, CFG80211_CONN_SCANNING, CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, - CFG80211_CONN_DEAUTH_ASSOC_FAIL, + CFG80211_CONN_DEAUTH, + CFG80211_CONN_CONNECTED, } state; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; u8 *ie; @@ -37,45 +45,16 @@ struct cfg80211_conn { bool auto_auth, prev_bssid_valid; }; -static bool cfg80211_is_all_idle(void) +static void cfg80211_sme_free(struct wireless_dev *wdev) { - struct cfg80211_registered_device *rdev; - struct wireless_dev *wdev; - bool is_all_idle = true; - - mutex_lock(&cfg80211_mutex); - - /* - * All devices must be idle as otherwise if you are actively - * scanning some new beacon hints could be learned and would - * count as new regulatory hints. - */ - list_for_each_entry(rdev, &cfg80211_rdev_list, list) { - cfg80211_lock_rdev(rdev); - list_for_each_entry(wdev, &rdev->wdev_list, list) { - wdev_lock(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) - is_all_idle = false; - wdev_unlock(wdev); - } - cfg80211_unlock_rdev(rdev); - } - - mutex_unlock(&cfg80211_mutex); - - return is_all_idle; -} - -static void disconnect_work(struct work_struct *work) -{ - if (!cfg80211_is_all_idle()) + if (!wdev->conn) return; - regulatory_hint_disconnect(); + kfree(wdev->conn->ie); + kfree(wdev->conn); + wdev->conn = NULL; } -static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); - static int cfg80211_conn_scan(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); @@ -85,7 +64,6 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); - lockdep_assert_held(&rdev->sched_scan_mtx); if (rdev->scan_req) return -EBUSY; @@ -171,18 +149,21 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) params = &wdev->conn->params; switch (wdev->conn->state) { + case CFG80211_CONN_SCANNING: + /* didn't find it during scan ... */ + return -ENOENT; case CFG80211_CONN_SCAN_AGAIN: return cfg80211_conn_scan(wdev); case CFG80211_CONN_AUTHENTICATE_NEXT: BUG_ON(!rdev->ops->auth); wdev->conn->state = CFG80211_CONN_AUTHENTICATING; - return __cfg80211_mlme_auth(rdev, wdev->netdev, - params->channel, params->auth_type, - params->bssid, - params->ssid, params->ssid_len, - NULL, 0, - params->key, params->key_len, - params->key_idx, NULL, 0); + return cfg80211_mlme_auth(rdev, wdev->netdev, + params->channel, params->auth_type, + params->bssid, + params->ssid, params->ssid_len, + NULL, 0, + params->key, params->key_len, + params->key_idx, NULL, 0); case CFG80211_CONN_ASSOCIATE_NEXT: BUG_ON(!rdev->ops->assoc); wdev->conn->state = CFG80211_CONN_ASSOCIATING; @@ -198,21 +179,20 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) req.vht_capa = params->vht_capa; req.vht_capa_mask = params->vht_capa_mask; - err = __cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel, - params->bssid, params->ssid, - params->ssid_len, &req); + err = cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel, + params->bssid, params->ssid, + params->ssid_len, &req); if (err) - __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, - NULL, 0, - WLAN_REASON_DEAUTH_LEAVING, - false); + cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, + NULL, 0, + WLAN_REASON_DEAUTH_LEAVING, + false); return err; - case CFG80211_CONN_DEAUTH_ASSOC_FAIL: - __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, - NULL, 0, - WLAN_REASON_DEAUTH_LEAVING, false); - /* return an error so that we call __cfg80211_connect_result() */ - return -EINVAL; + case CFG80211_CONN_DEAUTH: + cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, + NULL, 0, + WLAN_REASON_DEAUTH_LEAVING, false); + return 0; default: return 0; } @@ -226,9 +206,6 @@ void cfg80211_conn_work(struct work_struct *work) u8 bssid_buf[ETH_ALEN], *bssid = NULL; rtnl_lock(); - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (!wdev->netdev) @@ -239,7 +216,8 @@ void cfg80211_conn_work(struct work_struct *work) wdev_unlock(wdev); continue; } - if (wdev->sme_state != CFG80211_SME_CONNECTING || !wdev->conn) { + if (!wdev->conn || + wdev->conn->state == CFG80211_CONN_CONNECTED) { wdev_unlock(wdev); continue; } @@ -247,18 +225,17 @@ void cfg80211_conn_work(struct work_struct *work) memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } - if (cfg80211_conn_do_work(wdev)) + if (cfg80211_conn_do_work(wdev)) { __cfg80211_connect_result( wdev->netdev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); + cfg80211_sme_free(wdev); + } wdev_unlock(wdev); } - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); rtnl_unlock(); } @@ -299,9 +276,6 @@ static void __cfg80211_sme_scan_done(struct net_device *dev) ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state != CFG80211_SME_CONNECTING) - return; - if (!wdev->conn) return; @@ -310,20 +284,10 @@ static void __cfg80211_sme_scan_done(struct net_device *dev) return; bss = cfg80211_get_conn_bss(wdev); - if (bss) { + if (bss) cfg80211_put_bss(&rdev->wiphy, bss); - } else { - /* not found */ - if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) - schedule_work(&rdev->conn_work); - else - __cfg80211_connect_result( - wdev->netdev, - wdev->conn->params.bssid, - NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - false, NULL); - } + else + schedule_work(&rdev->conn_work); } void cfg80211_sme_scan_done(struct net_device *dev) @@ -335,10 +299,8 @@ void cfg80211_sme_scan_done(struct net_device *dev) wdev_unlock(wdev); } -void cfg80211_sme_rx_auth(struct net_device *dev, - const u8 *buf, size_t len) +void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) { - struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; @@ -346,11 +308,7 @@ void cfg80211_sme_rx_auth(struct net_device *dev, ASSERT_WDEV_LOCK(wdev); - /* should only RX auth frames when connecting */ - if (wdev->sme_state != CFG80211_SME_CONNECTING) - return; - - if (WARN_ON(!wdev->conn)) + if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) return; if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && @@ -379,46 +337,226 @@ void cfg80211_sme_rx_auth(struct net_device *dev, wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { - __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, + __cfg80211_connect_result(wdev->netdev, mgmt->bssid, + NULL, 0, NULL, 0, status_code, false, NULL); - } else if (wdev->sme_state == CFG80211_SME_CONNECTING && - wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { + } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); } } -bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) +bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) { - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - if (WARN_ON(!wdev->conn)) + if (!wdev->conn) return false; - if (!wdev->conn->prev_bssid_valid) + if (status == WLAN_STATUS_SUCCESS) { + wdev->conn->state = CFG80211_CONN_CONNECTED; return false; + } - /* - * Some stupid APs don't accept reassoc, so we - * need to fall back to trying regular assoc. - */ - wdev->conn->prev_bssid_valid = false; - wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; + if (wdev->conn->prev_bssid_valid) { + /* + * Some stupid APs don't accept reassoc, so we + * need to fall back to trying regular assoc; + * return true so no event is sent to userspace. + */ + wdev->conn->prev_bssid_valid = false; + wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; + schedule_work(&rdev->conn_work); + return true; + } + + wdev->conn->state = CFG80211_CONN_DEAUTH; schedule_work(&rdev->conn_work); + return false; +} - return true; +void cfg80211_sme_deauth(struct wireless_dev *wdev) +{ + cfg80211_sme_free(wdev); } -void cfg80211_sme_failed_assoc(struct wireless_dev *wdev) +void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) { - struct wiphy *wiphy = wdev->wiphy; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + cfg80211_sme_free(wdev); +} + +void cfg80211_sme_disassoc(struct wireless_dev *wdev) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL; + if (!wdev->conn) + return; + + wdev->conn->state = CFG80211_CONN_DEAUTH; schedule_work(&rdev->conn_work); } +void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) +{ + cfg80211_sme_disassoc(wdev); +} + +static int cfg80211_sme_connect(struct wireless_dev *wdev, + struct cfg80211_connect_params *connect, + const u8 *prev_bssid) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct cfg80211_bss *bss; + int err; + + if (!rdev->ops->auth || !rdev->ops->assoc) + return -EOPNOTSUPP; + + if (wdev->current_bss) + return -EALREADY; + + if (WARN_ON(wdev->conn)) + return -EINPROGRESS; + + wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); + if (!wdev->conn) + return -ENOMEM; + + /* + * Copy all parameters, and treat explicitly IEs, BSSID, SSID. + */ + memcpy(&wdev->conn->params, connect, sizeof(*connect)); + if (connect->bssid) { + wdev->conn->params.bssid = wdev->conn->bssid; + memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); + } + + if (connect->ie) { + wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, + GFP_KERNEL); + wdev->conn->params.ie = wdev->conn->ie; + if (!wdev->conn->ie) { + kfree(wdev->conn); + wdev->conn = NULL; + return -ENOMEM; + } + } + + if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { + wdev->conn->auto_auth = true; + /* start with open system ... should mostly work */ + wdev->conn->params.auth_type = + NL80211_AUTHTYPE_OPEN_SYSTEM; + } else { + wdev->conn->auto_auth = false; + } + + wdev->conn->params.ssid = wdev->ssid; + wdev->conn->params.ssid_len = connect->ssid_len; + + /* see if we have the bss already */ + bss = cfg80211_get_conn_bss(wdev); + + if (prev_bssid) { + memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); + wdev->conn->prev_bssid_valid = true; + } + + /* we're good if we have a matching bss struct */ + if (bss) { + wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; + err = cfg80211_conn_do_work(wdev); + cfg80211_put_bss(wdev->wiphy, bss); + } else { + /* otherwise we'll need to scan for the AP first */ + err = cfg80211_conn_scan(wdev); + + /* + * If we can't scan right now, then we need to scan again + * after the current scan finished, since the parameters + * changed (unless we find a good AP anyway). + */ + if (err == -EBUSY) { + err = 0; + wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; + } + } + + if (err) + cfg80211_sme_free(wdev); + + return err; +} + +static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + int err; + + if (!wdev->conn) + return 0; + + if (!rdev->ops->deauth) + return -EOPNOTSUPP; + + if (wdev->conn->state == CFG80211_CONN_SCANNING || + wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) { + err = 0; + goto out; + } + + /* wdev->conn->params.bssid must be set if > SCANNING */ + err = cfg80211_mlme_deauth(rdev, wdev->netdev, + wdev->conn->params.bssid, + NULL, 0, reason, false); + out: + cfg80211_sme_free(wdev); + return err; +} + +/* + * code shared for in-device and software SME + */ + +static bool cfg80211_is_all_idle(void) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + bool is_all_idle = true; + + /* + * All devices must be idle as otherwise if you are actively + * scanning some new beacon hints could be learned and would + * count as new regulatory hints. + */ + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wdev_list, list) { + wdev_lock(wdev); + if (wdev->conn || wdev->current_bss) + is_all_idle = false; + wdev_unlock(wdev); + } + } + + return is_all_idle; +} + +static void disconnect_work(struct work_struct *work) +{ + rtnl_lock(); + if (cfg80211_is_all_idle()) + regulatory_hint_disconnect(); + rtnl_unlock(); +} + +static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); + + +/* + * API calls for drivers implementing connect/disconnect and + * SME event handling + */ + void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, @@ -437,9 +575,6 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; - if (wdev->sme_state != CFG80211_SME_CONNECTING) - return; - nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, @@ -476,15 +611,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, wdev->current_bss = NULL; } - if (wdev->conn) - wdev->conn->state = CFG80211_CONN_IDLE; - if (status != WLAN_STATUS_SUCCESS) { - wdev->sme_state = CFG80211_SME_IDLE; - if (wdev->conn) - kfree(wdev->conn->ie); - kfree(wdev->conn); - wdev->conn = NULL; kfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; @@ -493,21 +620,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, } if (!bss) - bss = cfg80211_get_bss(wdev->wiphy, - wdev->conn ? wdev->conn->params.channel : - NULL, - bssid, + bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); - if (WARN_ON(!bss)) return; cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); - wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_upload_connect_keys(wdev); rcu_read_lock(); @@ -543,8 +665,6 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, struct cfg80211_event *ev; unsigned long flags; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); - ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); if (!ev) return; @@ -585,14 +705,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; - if (wdev->sme_state != CFG80211_SME_CONNECTED) + if (WARN_ON(!wdev->current_bss)) goto out; - /* internal error -- how did we get to CONNECTED w/o BSS? */ - if (WARN_ON(!wdev->current_bss)) { - goto out; - } - cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); wdev->current_bss = NULL; @@ -641,8 +756,6 @@ void cfg80211_roamed(struct net_device *dev, struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); @@ -664,8 +777,6 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_event *ev; unsigned long flags; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - if (WARN_ON(!bss)) return; @@ -707,25 +818,14 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; - if (wdev->sme_state != CFG80211_SME_CONNECTED) - return; - if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); } wdev->current_bss = NULL; - wdev->sme_state = CFG80211_SME_IDLE; wdev->ssid_len = 0; - if (wdev->conn) { - kfree(wdev->conn->ie); - wdev->conn->ie = NULL; - kfree(wdev->conn); - wdev->conn = NULL; - } - nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); /* @@ -754,8 +854,6 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, struct cfg80211_event *ev; unsigned long flags; - CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); - ev = kzalloc(sizeof(*ev) + ie_len, gfp); if (!ev) return; @@ -773,21 +871,20 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, } EXPORT_SYMBOL(cfg80211_disconnected); -int __cfg80211_connect(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct cfg80211_connect_params *connect, - struct cfg80211_cached_keys *connkeys, - const u8 *prev_bssid) +/* + * API calls for nl80211/wext compatibility code + */ +int cfg80211_connect(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_connect_params *connect, + struct cfg80211_cached_keys *connkeys, + const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_bss *bss = NULL; int err; ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) - return -EALREADY; - if (WARN_ON(wdev->connect_keys)) { kfree(wdev->connect_keys); wdev->connect_keys = NULL; @@ -823,219 +920,43 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, } } - if (!rdev->ops->connect) { - if (!rdev->ops->auth || !rdev->ops->assoc) - return -EOPNOTSUPP; - - if (WARN_ON(wdev->conn)) - return -EINPROGRESS; - - wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); - if (!wdev->conn) - return -ENOMEM; - - /* - * Copy all parameters, and treat explicitly IEs, BSSID, SSID. - */ - memcpy(&wdev->conn->params, connect, sizeof(*connect)); - if (connect->bssid) { - wdev->conn->params.bssid = wdev->conn->bssid; - memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); - } - - if (connect->ie) { - wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, - GFP_KERNEL); - wdev->conn->params.ie = wdev->conn->ie; - if (!wdev->conn->ie) { - kfree(wdev->conn); - wdev->conn = NULL; - return -ENOMEM; - } - } - - if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { - wdev->conn->auto_auth = true; - /* start with open system ... should mostly work */ - wdev->conn->params.auth_type = - NL80211_AUTHTYPE_OPEN_SYSTEM; - } else { - wdev->conn->auto_auth = false; - } - - memcpy(wdev->ssid, connect->ssid, connect->ssid_len); - wdev->ssid_len = connect->ssid_len; - wdev->conn->params.ssid = wdev->ssid; - wdev->conn->params.ssid_len = connect->ssid_len; + wdev->connect_keys = connkeys; + memcpy(wdev->ssid, connect->ssid, connect->ssid_len); + wdev->ssid_len = connect->ssid_len; - /* see if we have the bss already */ - bss = cfg80211_get_conn_bss(wdev); - - wdev->sme_state = CFG80211_SME_CONNECTING; - wdev->connect_keys = connkeys; - - if (prev_bssid) { - memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); - wdev->conn->prev_bssid_valid = true; - } - - /* we're good if we have a matching bss struct */ - if (bss) { - wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; - err = cfg80211_conn_do_work(wdev); - cfg80211_put_bss(wdev->wiphy, bss); - } else { - /* otherwise we'll need to scan for the AP first */ - err = cfg80211_conn_scan(wdev); - /* - * If we can't scan right now, then we need to scan again - * after the current scan finished, since the parameters - * changed (unless we find a good AP anyway). - */ - if (err == -EBUSY) { - err = 0; - wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; - } - } - if (err) { - kfree(wdev->conn->ie); - kfree(wdev->conn); - wdev->conn = NULL; - wdev->sme_state = CFG80211_SME_IDLE; - wdev->connect_keys = NULL; - wdev->ssid_len = 0; - } - - return err; - } else { - wdev->sme_state = CFG80211_SME_CONNECTING; - wdev->connect_keys = connkeys; + if (!rdev->ops->connect) + err = cfg80211_sme_connect(wdev, connect, prev_bssid); + else err = rdev_connect(rdev, dev, connect); - if (err) { - wdev->connect_keys = NULL; - wdev->sme_state = CFG80211_SME_IDLE; - return err; - } - memcpy(wdev->ssid, connect->ssid, connect->ssid_len); - wdev->ssid_len = connect->ssid_len; - - return 0; + if (err) { + wdev->connect_keys = NULL; + wdev->ssid_len = 0; + return err; } -} -int cfg80211_connect(struct cfg80211_registered_device *rdev, - struct net_device *dev, - struct cfg80211_connect_params *connect, - struct cfg80211_cached_keys *connkeys) -{ - int err; - - mutex_lock(&rdev->devlist_mtx); - /* might request scan - scan_mtx -> wdev_mtx dependency */ - mutex_lock(&rdev->sched_scan_mtx); - wdev_lock(dev->ieee80211_ptr); - err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); - wdev_unlock(dev->ieee80211_ptr); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - - return err; + return 0; } -int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, - struct net_device *dev, u16 reason, bool wextev) +int cfg80211_disconnect(struct cfg80211_registered_device *rdev, + struct net_device *dev, u16 reason, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); - if (wdev->sme_state == CFG80211_SME_IDLE) - return -EINVAL; - kfree(wdev->connect_keys); wdev->connect_keys = NULL; - if (!rdev->ops->disconnect) { - if (!rdev->ops->deauth) - return -EOPNOTSUPP; - - /* was it connected by userspace SME? */ - if (!wdev->conn) { - cfg80211_mlme_down(rdev, dev); - goto disconnect; - } - - if (wdev->sme_state == CFG80211_SME_CONNECTING && - (wdev->conn->state == CFG80211_CONN_SCANNING || - wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { - wdev->sme_state = CFG80211_SME_IDLE; - kfree(wdev->conn->ie); - kfree(wdev->conn); - wdev->conn = NULL; - wdev->ssid_len = 0; - return 0; - } - - /* wdev->conn->params.bssid must be set if > SCANNING */ - err = __cfg80211_mlme_deauth(rdev, dev, - wdev->conn->params.bssid, - NULL, 0, reason, false); - if (err) - return err; + if (wdev->conn) { + err = cfg80211_sme_disconnect(wdev, reason); + } else if (!rdev->ops->disconnect) { + cfg80211_mlme_down(rdev, dev); + err = 0; } else { err = rdev_disconnect(rdev, dev, reason); - if (err) - return err; } - disconnect: - if (wdev->sme_state == CFG80211_SME_CONNECTED) - __cfg80211_disconnected(dev, NULL, 0, 0, false); - else if (wdev->sme_state == CFG80211_SME_CONNECTING) - __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - wextev, NULL); - - return 0; -} - -int cfg80211_disconnect(struct cfg80211_registered_device *rdev, - struct net_device *dev, - u16 reason, bool wextev) -{ - int err; - - wdev_lock(dev->ieee80211_ptr); - err = __cfg80211_disconnect(rdev, dev, reason, wextev); - wdev_unlock(dev->ieee80211_ptr); - return err; } - -void cfg80211_sme_disassoc(struct net_device *dev, - struct cfg80211_internal_bss *bss) -{ - struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); - u8 bssid[ETH_ALEN]; - - ASSERT_WDEV_LOCK(wdev); - - if (!wdev->conn) - return; - - if (wdev->conn->state == CFG80211_CONN_IDLE) - return; - - /* - * Ok, so the association was made by this SME -- we don't - * want it any more so deauthenticate too. - */ - - memcpy(bssid, bss->pub.bssid, ETH_ALEN); - - __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, - WLAN_REASON_DEAUTH_LEAVING, false); -} diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 8f28b9f798d8..360a42c6f694 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -91,6 +91,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) cfg80211_leave(rdev, wdev); } +#ifdef CONFIG_PM static int wiphy_suspend(struct device *dev, pm_message_t state) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); @@ -100,10 +101,10 @@ static int wiphy_suspend(struct device *dev, pm_message_t state) rtnl_lock(); if (rdev->wiphy.registered) { - if (!rdev->wowlan) + if (!rdev->wiphy.wowlan_config) cfg80211_leave_all(rdev); if (rdev->ops->suspend) - ret = rdev_suspend(rdev, rdev->wowlan); + ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); if (ret == 1) { /* Driver refuse to configure wowlan */ cfg80211_leave_all(rdev); @@ -132,6 +133,7 @@ static int wiphy_resume(struct device *dev) return ret; } +#endif static const void *wiphy_namespace(struct device *d) { @@ -146,8 +148,10 @@ struct class ieee80211_class = { .dev_release = wiphy_dev_release, .dev_attrs = ieee80211_dev_attrs, .dev_uevent = wiphy_uevent, +#ifdef CONFIG_PM .suspend = wiphy_suspend, .resume = wiphy_resume, +#endif .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, }; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 5755bc14abbd..e1534baf2ebb 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1911,24 +1911,46 @@ TRACE_EVENT(cfg80211_send_rx_assoc, NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG) ); -DEFINE_EVENT(netdev_evt_only, __cfg80211_send_deauth, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +DECLARE_EVENT_CLASS(netdev_frame_event, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len), + TP_STRUCT__entry( + NETDEV_ENTRY + __dynamic_array(u8, frame, len) + ), + TP_fast_assign( + NETDEV_ASSIGN; + memcpy(__get_dynamic_array(frame), buf, len); + ), + TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", + NETDEV_PR_ARG, + le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); -DEFINE_EVENT(netdev_evt_only, __cfg80211_send_disassoc, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +DEFINE_EVENT(netdev_frame_event, cfg80211_rx_unprot_mlme_mgmt, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len) ); -DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len) ); -DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc, - TP_PROTO(struct net_device *netdev), - TP_ARGS(netdev) +TRACE_EVENT(cfg80211_tx_mlme_mgmt, + TP_PROTO(struct net_device *netdev, const u8 *buf, int len), + TP_ARGS(netdev, buf, len), + TP_STRUCT__entry( + NETDEV_ENTRY + __dynamic_array(u8, frame, len) + ), + TP_fast_assign( + NETDEV_ASSIGN; + memcpy(__get_dynamic_array(frame), buf, len); + ), + TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", + NETDEV_PR_ARG, + le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); DECLARE_EVENT_CLASS(netdev_mac_evt, diff --git a/net/wireless/util.c b/net/wireless/util.c index f5ad4d94ba88..74458b7f61eb 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -33,6 +33,29 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband, } EXPORT_SYMBOL(ieee80211_get_response_rate); +u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband) +{ + struct ieee80211_rate *bitrates; + u32 mandatory_rates = 0; + enum ieee80211_rate_flags mandatory_flag; + int i; + + if (WARN_ON(!sband)) + return 1; + + if (sband->band == IEEE80211_BAND_2GHZ) + mandatory_flag = IEEE80211_RATE_MANDATORY_B; + else + mandatory_flag = IEEE80211_RATE_MANDATORY_A; + + bitrates = sband->bitrates; + for (i = 0; i < sband->n_bitrates; i++) + if (bitrates[i].flags & mandatory_flag) + mandatory_rates |= BIT(i); + return mandatory_rates; +} +EXPORT_SYMBOL(ieee80211_mandatory_rates); + int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band) { /* see 802.11 17.3.8.3.2 and Annex J @@ -785,12 +808,8 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev) ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); - mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) cfg80211_process_wdev_events(wdev); - - mutex_unlock(&rdev->devlist_mtx); } int cfg80211_change_iface(struct cfg80211_registered_device *rdev, @@ -822,10 +841,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, return -EBUSY; if (ntype != otype && netif_running(dev)) { - mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, ntype); - mutex_unlock(&rdev->devlist_mtx); if (err) return err; @@ -841,8 +858,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: + wdev_lock(dev->ieee80211_ptr); cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); + wdev_unlock(dev->ieee80211_ptr); break; case NL80211_IFTYPE_MESH_POINT: /* mesh should be handled? */ @@ -1169,6 +1188,9 @@ bool ieee80211_operating_class_to_band(u8 operating_class, case 84: *band = IEEE80211_BAND_2GHZ; return true; + case 180: + *band = IEEE80211_BAND_60GHZ; + return true; } return false; @@ -1184,8 +1206,6 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, if (!beacon_int) return -EINVAL; - mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (!wdev->beacon_interval) continue; @@ -1195,8 +1215,6 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, } } - mutex_unlock(&rdev->devlist_mtx); - return res; } @@ -1220,7 +1238,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, int i, j; ASSERT_RTNL(); - lockdep_assert_held(&rdev->devlist_mtx); if (WARN_ON(hweight32(radar_detect) > 1)) return -EINVAL; diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index d997d0f0c54a..e7c6e862580d 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -72,7 +72,6 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, struct cfg80211_registered_device *rdev; struct vif_params vifparams; enum nl80211_iftype type; - int ret; rdev = wiphy_to_dev(wdev->wiphy); @@ -98,11 +97,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info, memset(&vifparams, 0, sizeof(vifparams)); - cfg80211_lock_rdev(rdev); - ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); - cfg80211_unlock_rdev(rdev); - - return ret; + return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams); } EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode); @@ -579,13 +574,10 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev, { int err; - /* devlist mutex needed for possible IBSS re-join */ - mutex_lock(&rdev->devlist_mtx); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_set_encryption(rdev, dev, pairwise, addr, remove, tx_key, idx, params); wdev_unlock(dev->ieee80211_ptr); - mutex_unlock(&rdev->devlist_mtx); return err; } @@ -787,7 +779,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, struct cfg80211_chan_def chandef = { .width = NL80211_CHAN_WIDTH_20_NOHT, }; - int freq, err; + int freq; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: @@ -804,10 +796,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; - mutex_lock(&rdev->devlist_mtx); - err = cfg80211_set_monitor_channel(rdev, &chandef); - mutex_unlock(&rdev->devlist_mtx); - return err; + return cfg80211_set_monitor_channel(rdev, &chandef); case NL80211_IFTYPE_MESH_POINT: freq = cfg80211_wext_freq(wdev->wiphy, wextfreq); if (freq < 0) @@ -818,10 +807,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev, chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); if (!chandef.chan) return -EINVAL; - mutex_lock(&rdev->devlist_mtx); - err = cfg80211_set_mesh_channel(rdev, wdev, &chandef); - mutex_unlock(&rdev->devlist_mtx); - return err; + return cfg80211_set_mesh_channel(rdev, wdev, &chandef); default: return -EOPNOTSUPP; } diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index e79cb5c0655a..14c9a2583ba0 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -54,8 +54,8 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, if (wdev->wext.prev_bssid_valid) prev_bssid = wdev->wext.prev_bssid; - err = __cfg80211_connect(rdev, wdev->netdev, - &wdev->wext.connect, ck, prev_bssid); + err = cfg80211_connect(rdev, wdev->netdev, + &wdev->wext.connect, ck, prev_bssid); if (err) kfree(ck); @@ -87,12 +87,9 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, return -EINVAL; } - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { bool event = true; if (wdev->wext.connect.channel == chan) { @@ -103,8 +100,8 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, /* if SSID set, we'll try right again, avoid event */ if (wdev->wext.connect.ssid_len) event = false; - err = __cfg80211_disconnect(rdev, dev, - WLAN_REASON_DEAUTH_LEAVING, event); + err = cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } @@ -136,9 +133,6 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); return err; } @@ -190,14 +184,11 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, if (len > 0 && ssid[len - 1] == '\0') len--; - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); err = 0; - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { bool event = true; if (wdev->wext.connect.ssid && len && @@ -208,8 +199,8 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, /* if SSID set now, we'll try to connect, avoid event */ if (len) event = false; - err = __cfg80211_disconnect(rdev, dev, - WLAN_REASON_DEAUTH_LEAVING, event); + err = cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } @@ -226,9 +217,6 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); return err; } @@ -287,12 +275,9 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; - cfg80211_lock_rdev(rdev); - mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); - if (wdev->sme_state != CFG80211_SME_IDLE) { + if (wdev->conn) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) @@ -303,8 +288,8 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, ether_addr_equal(bssid, wdev->wext.connect.bssid)) goto out; - err = __cfg80211_disconnect(rdev, dev, - WLAN_REASON_DEAUTH_LEAVING, false); + err = cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } @@ -318,9 +303,6 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx); - cfg80211_unlock_rdev(rdev); return err; } @@ -382,9 +364,9 @@ int cfg80211_wext_siwgenie(struct net_device *dev, wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; - if (wdev->sme_state != CFG80211_SME_IDLE) { - err = __cfg80211_disconnect(rdev, dev, - WLAN_REASON_DEAUTH_LEAVING, false); + if (wdev->conn) { + err = cfg80211_disconnect(rdev, dev, + WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } @@ -420,8 +402,7 @@ int cfg80211_wext_siwmlme(struct net_device *dev, switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: - err = __cfg80211_disconnect(rdev, dev, mlme->reason_code, - true); + err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true); break; default: err = -EOPNOTSUPP; diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index 071ce1b5f2b4..872d59e35ee2 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c @@ -583,8 +583,6 @@ static int atmel_abdac_remove(struct platform_device *pdev) free_irq(dac->irq, dac); snd_card_free(card); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index 6b7e2b5a72de..ae63d22c0f88 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c @@ -1199,8 +1199,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev) snd_card_set_dev(card, NULL); snd_card_free(card); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c index 7420c59444ab..2b7f6e8bdd24 100644 --- a/sound/mips/hal2.c +++ b/sound/mips/hal2.c @@ -922,7 +922,6 @@ static int hal2_remove(struct platform_device *pdev) struct snd_card *card = platform_get_drvdata(pdev); snd_card_free(card); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c index 01a03efdc8b0..cfe99ae149fe 100644 --- a/sound/mips/sgio2audio.c +++ b/sound/mips/sgio2audio.c @@ -963,7 +963,6 @@ static int snd_sgio2audio_remove(struct platform_device *pdev) struct snd_card *card = platform_get_drvdata(pdev); snd_card_free(card); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig index 5849b129e50d..51c4ba95a32d 100644 --- a/sound/oss/Kconfig +++ b/sound/oss/Kconfig @@ -250,6 +250,7 @@ config MSND_FIFOSIZE menuconfig SOUND_OSS tristate "OSS sound modules" depends on ISA_DMA_API && VIRT_TO_BUS + depends on !ISA_DMA_SUPPORT_BROKEN help OSS is the Open Sound System suite of sound card drivers. They make sound programming easier since they provide a common API. Say Y or diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 6f9b64700f6e..55108b5fb291 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex) struct hda_bus_unsolicited *unsol; unsigned int wp; + if (!bus || !bus->workq) + return 0; + trace_hda_unsol_event(bus, res, res_ex); unsol = bus->unsol; if (!unsol) @@ -1580,7 +1583,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n", nid, stream_tag, channel_id, format); p = get_hda_cvt_setup(codec, nid); - if (!p || p->active) + if (!p) return; if (codec->pcm_format_first) @@ -1627,7 +1630,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid, snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid); p = get_hda_cvt_setup(codec, nid); - if (p && p->active) { + if (p) { /* here we just clear the active flag when do_now isn't set; * actual clean-ups will be done later in * purify_inactive_streams() called from snd_hda_codec_prpapre() diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 7b213d589ef6..de18722c4873 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -615,7 +615,7 @@ enum { /* quirks for Nvidia */ #define AZX_DCAPS_PRESET_NVIDIA \ (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\ - AZX_DCAPS_ALIGN_BUFSIZE) + AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT) #define AZX_DCAPS_PRESET_CTHDA \ (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY) diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 84b81c874a4a..b314d3e6d7fa 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -64,6 +64,7 @@ struct conexant_spec { /* extra EAPD pins */ unsigned int num_eapds; hda_nid_t eapds[4]; + bool dynamic_eapd; #ifdef ENABLE_CXT_STATIC_QUIRKS const struct snd_kcontrol_new *mixers[5]; @@ -3155,7 +3156,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec) * thus it might control over all pins. */ if (spec->num_eapds > 2) - spec->gen.own_eapd_ctl = 1; + spec->dynamic_eapd = 1; } static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins, @@ -3194,10 +3195,19 @@ static int cx_auto_build_controls(struct hda_codec *codec) return 0; } +static int cx_auto_init(struct hda_codec *codec) +{ + struct conexant_spec *spec = codec->spec; + snd_hda_gen_init(codec); + if (!spec->dynamic_eapd) + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true); + return 0; +} + static const struct hda_codec_ops cx_auto_patch_ops = { .build_controls = cx_auto_build_controls, .build_pcms = snd_hda_gen_build_pcms, - .init = snd_hda_gen_init, + .init = cx_auto_init, .free = snd_hda_gen_free, .unsol_event = snd_hda_jack_unsol_event, #ifdef CONFIG_PM @@ -3348,7 +3358,8 @@ static int patch_conexant_auto(struct hda_codec *codec) cx_auto_parse_beep(codec); cx_auto_parse_eapd(codec); - if (spec->gen.own_eapd_ctl) + spec->gen.own_eapd_ctl = 1; + if (spec->dynamic_eapd) spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook; switch (codec->vendor_id) { diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 32930e668854..e12f7a030c58 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1832,12 +1832,10 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec, #define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */ static void intel_haswell_enable_all_pins(struct hda_codec *codec, - const struct hda_fixup *fix, int action) + bool update_tree) { unsigned int vendor_param; - if (action != HDA_FIXUP_ACT_PRE_PROBE) - return; vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0, INTEL_GET_VENDOR_VERB, 0); if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS) @@ -1849,8 +1847,8 @@ static void intel_haswell_enable_all_pins(struct hda_codec *codec, if (vendor_param == -1) return; - snd_hda_codec_update_widgets(codec); - return; + if (update_tree) + snd_hda_codec_update_widgets(codec); } static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec) @@ -1868,30 +1866,20 @@ static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec) INTEL_SET_VENDOR_VERB, vendor_param); } +/* Haswell needs to re-issue the vendor-specific verbs before turning to D0. + * Otherwise you may get severe h/w communication errors. + */ +static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg, + unsigned int power_state) +{ + if (power_state == AC_PWRST_D0) { + intel_haswell_enable_all_pins(codec, false); + intel_haswell_fixup_enable_dp12(codec); + } - -/* available models for fixup */ -enum { - INTEL_HASWELL, -}; - -static const struct hda_model_fixup hdmi_models[] = { - {.id = INTEL_HASWELL, .name = "Haswell"}, - {} -}; - -static const struct snd_pci_quirk hdmi_fixup_tbl[] = { - SND_PCI_QUIRK(0x8086, 0x2010, "Haswell", INTEL_HASWELL), - {} /* terminator */ -}; - -static const struct hda_fixup hdmi_fixups[] = { - [INTEL_HASWELL] = { - .type = HDA_FIXUP_FUNC, - .v.func = intel_haswell_enable_all_pins, - }, -}; - + snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, power_state); + snd_hda_codec_set_power_to_all(codec, fg, power_state); +} static int patch_generic_hdmi(struct hda_codec *codec) { @@ -1904,11 +1892,10 @@ static int patch_generic_hdmi(struct hda_codec *codec) codec->spec = spec; hdmi_array_init(spec, 4); - snd_hda_pick_fixup(codec, hdmi_models, hdmi_fixup_tbl, hdmi_fixups); - snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); - - if (codec->vendor_id == 0x80862807) + if (codec->vendor_id == 0x80862807) { + intel_haswell_enable_all_pins(codec, true); intel_haswell_fixup_enable_dp12(codec); + } if (hdmi_parse_codec(codec) < 0) { codec->spec = NULL; @@ -1916,6 +1903,9 @@ static int patch_generic_hdmi(struct hda_codec *codec) return -EINVAL; } codec->patch_ops = generic_hdmi_patch_ops; + if (codec->vendor_id == 0x80862807) + codec->patch_ops.set_power_state = haswell_set_power_state; + generic_hdmi_init_per_pins(codec); init_channel_allocations(); diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 14094f558e03..1eb152cb1097 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -2882,6 +2882,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream, default: return 0; } + break; default: return 0; } diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 8b85049daab0..56ecfc72f2e9 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c @@ -505,7 +505,10 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE); mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE); - mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX); + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, + ACLKX | ACLKR); + mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, + AFSX | AFSR); break; case SND_SOC_DAIFMT_CBM_CFS: /* codec is clock master and frame slave */ @@ -565,7 +568,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL); mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL); - mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL); + mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL); mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL); break; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 21779a6a781a..a80c883bb8be 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1095,9 +1095,9 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w, #ifdef CONFIG_HAVE_CLK if (SND_SOC_DAPM_EVENT_ON(event)) { - return clk_enable(w->clk); + return clk_prepare_enable(w->clk); } else { - clk_disable(w->clk); + clk_disable_unprepare(w->clk); return 0; } #endif diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 321e066a0753..9e9d34871195 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -46,6 +46,7 @@ unsigned int skip_c0; unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; +unsigned int do_c8_c9_c10; unsigned int has_aperf; unsigned int has_epb; unsigned int units = 1000000000; /* Ghz etc */ @@ -120,6 +121,9 @@ struct pkg_data { unsigned long long pc3; unsigned long long pc6; unsigned long long pc7; + unsigned long long pc8; + unsigned long long pc9; + unsigned long long pc10; unsigned int package_id; unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */ @@ -282,6 +286,11 @@ void print_header(void) outp += sprintf(outp, " %%pc6"); if (do_snb_cstates) outp += sprintf(outp, " %%pc7"); + if (do_c8_c9_c10) { + outp += sprintf(outp, " %%pc8"); + outp += sprintf(outp, " %%pc9"); + outp += sprintf(outp, " %%pc10"); + } if (do_rapl & RAPL_PKG) outp += sprintf(outp, " Pkg_W"); @@ -336,6 +345,9 @@ int dump_counters(struct thread_data *t, struct core_data *c, fprintf(stderr, "pc3: %016llX\n", p->pc3); fprintf(stderr, "pc6: %016llX\n", p->pc6); fprintf(stderr, "pc7: %016llX\n", p->pc7); + fprintf(stderr, "pc8: %016llX\n", p->pc8); + fprintf(stderr, "pc9: %016llX\n", p->pc9); + fprintf(stderr, "pc10: %016llX\n", p->pc10); fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg); fprintf(stderr, "Joules COR: %0X\n", p->energy_cores); fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx); @@ -493,6 +505,11 @@ int format_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc); if (do_snb_cstates) outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc); + if (do_c8_c9_c10) { + outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc); + outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc); + outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc); + } /* * If measurement interval exceeds minimum RAPL Joule Counter range, @@ -569,6 +586,9 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pc3 = new->pc3 - old->pc3; old->pc6 = new->pc6 - old->pc6; old->pc7 = new->pc7 - old->pc7; + old->pc8 = new->pc8 - old->pc8; + old->pc9 = new->pc9 - old->pc9; + old->pc10 = new->pc10 - old->pc10; old->pkg_temp_c = new->pkg_temp_c; DELTA_WRAP32(new->energy_pkg, old->energy_pkg); @@ -702,6 +722,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->pc3 = 0; p->pc6 = 0; p->pc7 = 0; + p->pc8 = 0; + p->pc9 = 0; + p->pc10 = 0; p->energy_pkg = 0; p->energy_dram = 0; @@ -740,6 +763,9 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.packages.pc3 += p->pc3; average.packages.pc6 += p->pc6; average.packages.pc7 += p->pc7; + average.packages.pc8 += p->pc8; + average.packages.pc9 += p->pc9; + average.packages.pc10 += p->pc10; average.packages.energy_pkg += p->energy_pkg; average.packages.energy_dram += p->energy_dram; @@ -781,6 +807,10 @@ void compute_average(struct thread_data *t, struct core_data *c, average.packages.pc3 /= topo.num_packages; average.packages.pc6 /= topo.num_packages; average.packages.pc7 /= topo.num_packages; + + average.packages.pc8 /= topo.num_packages; + average.packages.pc9 /= topo.num_packages; + average.packages.pc10 /= topo.num_packages; } static unsigned long long rdtsc(void) @@ -880,6 +910,14 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; } + if (do_c8_c9_c10) { + if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) + return -13; + if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) + return -13; + if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) + return -13; + } if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; @@ -1762,6 +1800,19 @@ int is_snb(unsigned int family, unsigned int model) return 0; } +int has_c8_c9_c10(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + switch (model) { + case 0x45: + return 1; + } + return 0; +} + + double discover_bclk(unsigned int family, unsigned int model) { if (is_snb(family, model)) @@ -1918,6 +1969,7 @@ void check_cpuid() do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */ do_smi = do_nhm_cstates; do_snb_cstates = is_snb(family, model); + do_c8_c9_c10 = has_c8_c9_c10(family, model); bclk = discover_bclk(family, model); do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model); @@ -2279,7 +2331,7 @@ int main(int argc, char **argv) cmdline(argc, argv); if (verbose) - fprintf(stderr, "turbostat v3.3 March 15, 2013" + fprintf(stderr, "turbostat v3.4 April 17, 2013" " - Len Brown <lenb@kernel.org>\n"); turbostat_init(); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 45f09362ee7b..302681c4aa44 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1978,7 +1978,7 @@ static long kvm_vcpu_ioctl(struct file *filp, if (vcpu->kvm->mm != current->mm) return -EIO; -#if defined(CONFIG_S390) || defined(CONFIG_PPC) +#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) /* * Special cases: vcpu ioctls that are asynchronous to vcpu execution, * so vcpu_load() would break it. @@ -3105,13 +3105,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, int r; int cpu; - r = kvm_irqfd_init(); - if (r) - goto out_irqfd; r = kvm_arch_init(opaque); if (r) goto out_fail; + /* + * kvm_arch_init makes sure there's at most one caller + * for architectures that support multiple implementations, + * like intel and amd on x86. + * kvm_arch_init must be called before kvm_irqfd_init to avoid creating + * conflicts in case kvm is already setup for another implementation. + */ + r = kvm_irqfd_init(); + if (r) + goto out_irqfd; + if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; goto out_free_0; @@ -3186,10 +3194,10 @@ out_free_1: out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: - kvm_arch_exit(); -out_fail: kvm_irqfd_exit(); out_irqfd: + kvm_arch_exit(); +out_fail: return r; } EXPORT_SYMBOL_GPL(kvm_init); |