diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-15 20:59:54 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-15 20:59:54 +0300 |
commit | 568d135d337d3114688fef9fdbce7fb6dbbd04c7 (patch) | |
tree | b9355b94182a51eec5cfc69dd335e39a2e97ac7d /arch/mips/mm | |
parent | 4ecd4ff55ac5c7fe9e232f34a41c4d54f2d825c1 (diff) | |
parent | d40e0d4fb5613099a58c95a9403f51b03e40e861 (diff) | |
download | linux-568d135d337d3114688fef9fdbce7fb6dbbd04c7.tar.xz |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle:
"Boston platform support:
- Document DT bindings
- Add CLK driver for board clocks
CM:
- Avoid per-core locking with CM3 & higher
- WARN on attempt to lock invalid VP, not BUG
CPS:
- Select CONFIG_SYS_SUPPORTS_SCHED_SMT for MIPSr6
- Prevent multi-core with dcache aliasing
- Handle cores not powering down more gracefully
- Handle spurious VP starts more gracefully
DSP:
- Add lwx & lhx missaligned access support
eBPF:
- Add MIPS support along with many supporting change to add the
required infrastructure
Generic arch code:
- Misc sysmips MIPS_ATOMIC_SET fixes
- Drop duplicate HAVE_SYSCALL_TRACEPOINTS
- Negate error syscall return in trace
- Correct forced syscall errors
- Traced negative syscalls should return -ENOSYS
- Allow samples/bpf/tracex5 to access syscall arguments for sane
traces
- Cleanup from old Kconfig options in defconfigs
- Fix PREF instruction usage by memcpy for MIPS R6
- Fix various special cases in the FPU eulation
- Fix some special cases in MIPS16e2 support
- Fix MIPS I ISA /proc/cpuinfo reporting
- Sort MIPS Kconfig alphabetically
- Fix minimum alignment requirement of IRQ stack as required by
ABI / GCC
- Fix special cases in the module loader
- Perform post-DMA cache flushes on systems with MAARs
- Probe the I6500 CPU
- Cleanup cmpxchg and add support for 1 and 2 byte operations
- Use queued read/write locks (qrwlock)
- Use queued spinlocks (qspinlock)
- Add CPU shared FTLB feature detection
- Handle tlbex-tlbp race condition
- Allow storing pgd in C0_CONTEXT for MIPSr6
- Use current_cpu_type() in m4kc_tlbp_war()
- Support Boston in the generic kernel
Generic platform:
- yamon-dt: Pull YAMON DT shim code out of SEAD-3 board
- yamon-dt: Support > 256MB of RAM
- yamon-dt: Use serial* rather than uart* aliases
- Abstract FDT fixup application
- Set RTC_ALWAYS_BCD to 0
- Add a MAINTAINERS entry
core kernel:
- qspinlock.c: include linux/prefetch.h
Loongson 3:
- Add support
Perf:
- Add I6500 support
SEAD-3:
- Remove GIC timer from DT
- Set interrupt-parent per-device, not at root node
- Fix GIC interrupt specifiers
SMP:
- Skip IPI setup if we only have a single CPU
VDSO:
- Make comment match reality
- Improvements to time code in VDSO"
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (86 commits)
locking/qspinlock: Include linux/prefetch.h
MIPS: Fix MIPS I ISA /proc/cpuinfo reporting
MIPS: Fix minimum alignment requirement of IRQ stack
MIPS: generic: Support MIPS Boston development boards
MIPS: DTS: img: Don't attempt to build-in all .dtb files
clk: boston: Add a driver for MIPS Boston board clocks
dt-bindings: Document img,boston-clock binding
MIPS: Traced negative syscalls should return -ENOSYS
MIPS: Correct forced syscall errors
MIPS: Negate error syscall return in trace
MIPS: Drop duplicate HAVE_SYSCALL_TRACEPOINTS select
MIPS16e2: Provide feature overrides for non-MIPS16 systems
MIPS: MIPS16e2: Report ASE presence in /proc/cpuinfo
MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
MIPS: MIPS16e2: Identify ASE presence
MIPS: VDSO: Fix a mismatch between comment and preprocessor constant
MIPS: VDSO: Add implementation of gettimeofday() fallback
MIPS: VDSO: Add implementation of clock_gettime() fallback
MIPS: VDSO: Fix conversions in do_monotonic()/do_monotonic_coarse()
MIPS: Use current_cpu_type() in m4kc_tlbp_war()
...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 41 | ||||
-rw-r--r-- | arch/mips/mm/uasm-micromips.c | 188 | ||||
-rw-r--r-- | arch/mips/mm/uasm-mips.c | 238 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 61 |
5 files changed, 305 insertions, 225 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 3fe99cb271a9..81d6a15c93d0 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1453,6 +1453,7 @@ static void probe_pcache(void) case CPU_20KC: case CPU_25KF: case CPU_I6400: + case CPU_I6500: case CPU_SB1: case CPU_SB1A: case CPU_XLR: @@ -1512,6 +1513,7 @@ static void probe_pcache(void) case CPU_ALCHEMY: case CPU_I6400: + case CPU_I6500: c->icache.flags |= MIPS_CACHE_IC_F_DC; break; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ed1c5297547a..5aadc69c8ce3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -153,8 +153,7 @@ static int scratchpad_offset(int i) */ static int m4kc_tlbp_war(void) { - return (current_cpu_data.processor_id & 0xffff00) == - (PRID_COMP_MIPS | PRID_IMP_4KC); + return current_cpu_type() == CPU_4KC; } /* Handle labels (which must be positive integers). */ @@ -2015,6 +2014,26 @@ static void build_r3000_tlb_modify_handler(void) } #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */ +static bool cpu_has_tlbex_tlbp_race(void) +{ + /* + * When a Hardware Table Walker is running it can replace TLB entries + * at any time, leading to a race between it & the CPU. + */ + if (cpu_has_htw) + return true; + + /* + * If the CPU shares FTLB RAM with its siblings then our entry may be + * replaced at any time by a sibling performing a write to the FTLB. + */ + if (cpu_has_shared_ftlb_ram) + return true; + + /* In all other cases there ought to be no race condition to handle */ + return false; +} + /* * R4000 style TLB load/store/modify handlers. */ @@ -2051,7 +2070,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ if (!m4kc_tlbp_war()) { build_tlb_probe_entry(p); - if (cpu_has_htw) { + if (cpu_has_tlbex_tlbp_race()) { /* race condition happens, leaving */ uasm_i_ehb(p); uasm_i_mfc0(p, wr.r3, C0_INDEX); @@ -2125,6 +2144,14 @@ static void build_r4000_tlb_load_handler(void) } uasm_i_nop(&p); + /* + * Warn if something may race with us & replace the TLB entry + * before we read it here. Everything with such races should + * also have dedicated RiXi exception handlers, so this + * shouldn't be hit. + */ + WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); + uasm_i_tlbr(&p); switch (current_cpu_type()) { @@ -2192,6 +2219,14 @@ static void build_r4000_tlb_load_handler(void) } uasm_i_nop(&p); + /* + * Warn if something may race with us & replace the TLB entry + * before we read it here. Everything with such races should + * also have dedicated RiXi exception handlers, so this + * shouldn't be hit. + */ + WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path"); + uasm_i_tlbr(&p); switch (current_cpu_type()) { diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 277cf52d80e1..c28ff53c8da0 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -40,93 +40,92 @@ #include "uasm.c" -static struct insn insn_table_MM[] = { - { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, - { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, - { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, - { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, - { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beql, 0, 0 }, - { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM }, - { insn_bgezl, 0, 0 }, - { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM }, - { insn_bltzl, 0, 0 }, - { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM }, - { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM }, - { insn_cfc1, M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS }, - { insn_cfcmsa, M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE }, - { insn_ctc1, M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS }, - { insn_ctcmsa, M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE }, - { insn_daddu, 0, 0 }, - { insn_daddiu, 0, 0 }, - { insn_di, M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS }, - { insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS }, - { insn_dmfc0, 0, 0 }, - { insn_dmtc0, 0, 0 }, - { insn_dsll, 0, 0 }, - { insn_dsll32, 0, 0 }, - { insn_dsra, 0, 0 }, - { insn_dsrl, 0, 0 }, - { insn_dsrl32, 0, 0 }, - { insn_drotr, 0, 0 }, - { insn_drotr32, 0, 0 }, - { insn_dsubu, 0, 0 }, - { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 }, - { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE }, - { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE }, - { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jalr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS }, - { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, - { insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, - { insn_ld, 0, 0 }, - { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM }, - { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, - { insn_lld, 0, 0 }, - { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, - { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, - { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD }, - { insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS }, - { insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS }, - { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD }, - { insn_mthi, M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS }, - { insn_mtlo, M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS }, - { insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD }, - { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD }, - { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, - { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM }, - { insn_rfe, 0, 0 }, - { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM }, - { insn_scd, 0, 0 }, - { insn_sd, 0, 0 }, - { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, - { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, - { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD }, - { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, - { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD }, - { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, - { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD }, - { insn_srlv, M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD }, - { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, - { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, - { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, - { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS }, - { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, - { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, - { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, - { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, - { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM }, - { insn_wsbh, M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS }, - { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, - { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, - { insn_dins, 0, 0 }, - { insn_dinsm, 0, 0 }, - { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM}, - { insn_bbit0, 0, 0 }, - { insn_bbit1, 0, 0 }, - { insn_lwx, 0, 0 }, - { insn_ldx, 0, 0 }, - { insn_invalid, 0, 0 } +static const struct insn const insn_table_MM[insn_invalid] = { + [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD}, + [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, + [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD}, + [insn_andi] = {M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, + [insn_beq] = {M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, + [insn_beql] = {0, 0}, + [insn_bgez] = {M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM}, + [insn_bgezl] = {0, 0}, + [insn_bltz] = {M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM}, + [insn_bltzl] = {0, 0}, + [insn_bne] = {M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM}, + [insn_cache] = {M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM}, + [insn_cfc1] = {M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS}, + [insn_cfcmsa] = {M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE}, + [insn_ctc1] = {M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS}, + [insn_ctcmsa] = {M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE}, + [insn_daddu] = {0, 0}, + [insn_daddiu] = {0, 0}, + [insn_di] = {M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS}, + [insn_divu] = {M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS}, + [insn_dmfc0] = {0, 0}, + [insn_dmtc0] = {0, 0}, + [insn_dsll] = {0, 0}, + [insn_dsll32] = {0, 0}, + [insn_dsra] = {0, 0}, + [insn_dsrl] = {0, 0}, + [insn_dsrl32] = {0, 0}, + [insn_drotr] = {0, 0}, + [insn_drotr32] = {0, 0}, + [insn_dsubu] = {0, 0}, + [insn_eret] = {M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0}, + [insn_ins] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE}, + [insn_ext] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE}, + [insn_j] = {M(mm_j32_op, 0, 0, 0, 0, 0), JIMM}, + [insn_jal] = {M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM}, + [insn_jalr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS}, + [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS}, + [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, + [insn_ld] = {0, 0}, + [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM}, + [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM}, + [insn_lld] = {0, 0}, + [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM}, + [insn_lw] = {M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, + [insn_mfc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD}, + [insn_mfhi] = {M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS}, + [insn_mflo] = {M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS}, + [insn_mtc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD}, + [insn_mthi] = {M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS}, + [insn_mtlo] = {M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS}, + [insn_mul] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD}, + [insn_or] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD}, + [insn_ori] = {M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, + [insn_pref] = {M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM}, + [insn_rfe] = {0, 0}, + [insn_sc] = {M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM}, + [insn_scd] = {0, 0}, + [insn_sd] = {0, 0}, + [insn_sll] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD}, + [insn_sllv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD}, + [insn_slt] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD}, + [insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, + [insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD}, + [insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD}, + [insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD}, + [insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD}, + [insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD}, + [insn_subu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD}, + [insn_sw] = {M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, + [insn_sync] = {M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS}, + [insn_tlbp] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0}, + [insn_tlbr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0}, + [insn_tlbwi] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0}, + [insn_tlbwr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0}, + [insn_wait] = {M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM}, + [insn_wsbh] = {M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS}, + [insn_xor] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD}, + [insn_xori] = {M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, + [insn_dins] = {0, 0}, + [insn_dinsm] = {0, 0}, + [insn_syscall] = {M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM}, + [insn_bbit0] = {0, 0}, + [insn_bbit1] = {0, 0}, + [insn_lwx] = {0, 0}, + [insn_ldx] = {0, 0}, }; #undef M @@ -156,20 +155,17 @@ static inline u32 build_jimm(u32 arg) */ static void build_insn(u32 **buf, enum opcode opc, ...) { - struct insn *ip = NULL; - unsigned int i; + const struct insn *ip; va_list ap; u32 op; - for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++) - if (insn_table_MM[i].opcode == opc) { - ip = &insn_table_MM[i]; - break; - } - - if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) + if (opc < 0 || opc >= insn_invalid || + (opc == insn_daddiu && r4k_daddiu_bug()) || + (insn_table_MM[opc].match == 0 && insn_table_MM[opc].fields == 0)) panic("Unsupported Micro-assembler instruction %d", opc); + ip = &insn_table_MM[opc]; + op = ip->match; va_start(ap, opc); if (ip->fields & RS) { diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 2277499fe6ae..3f74f6c1f065 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -48,126 +48,145 @@ #include "uasm.c" -static struct insn insn_table[] = { - { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, - { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, - { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, - { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, - { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, - { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, - { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, +static const struct insn const insn_table[insn_invalid] = { + [insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD}, + [insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD}, + [insn_andi] = {M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM}, + [insn_bbit0] = {M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, + [insn_bbit1] = {M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, + [insn_beq] = {M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, + [insn_beql] = {M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, + [insn_bgez] = {M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM}, + [insn_bgezl] = {M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM}, + [insn_bgtz] = {M(bgtz_op, 0, 0, 0, 0, 0), RS | BIMM}, + [insn_blez] = {M(blez_op, 0, 0, 0, 0, 0), RS | BIMM}, + [insn_bltz] = {M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM}, + [insn_bltzl] = {M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM}, + [insn_bne] = {M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, + [insn_break] = {M(spec_op, 0, 0, 0, 0, break_op), SCIMM}, #ifndef CONFIG_CPU_MIPSR6 - { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + [insn_cache] = {M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, #else - { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, + [insn_cache] = {M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9}, #endif - { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD }, - { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE }, - { insn_ctc1, M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD }, - { insn_ctcmsa, M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE }, - { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, - { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, - { insn_di, M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT }, - { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, - { insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT }, - { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, - { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, - { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, - { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, - { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, - { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, - { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, - { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, - { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, - { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, - { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, + [insn_cfc1] = {M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD}, + [insn_cfcmsa] = {M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE}, + [insn_ctc1] = {M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD}, + [insn_ctcmsa] = {M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE}, + [insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD}, + [insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT}, + [insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT}, + [insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE}, + [insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE}, + [insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE}, + [insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT}, + [insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT}, + [insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE}, + [insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE}, + [insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD}, + [insn_dshd] = {M(spec3_op, 0, 0, 0, dshd_op, dbshfl_op), RT | RD}, + [insn_dsll] = {M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE}, + [insn_dsll32] = {M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE}, + [insn_dsllv] = {M(spec_op, 0, 0, 0, 0, dsllv_op), RS | RT | RD}, + [insn_dsra] = {M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE}, + [insn_dsra32] = {M(spec_op, 0, 0, 0, 0, dsra32_op), RT | RD | RE}, + [insn_dsrav] = {M(spec_op, 0, 0, 0, 0, dsrav_op), RS | RT | RD}, + [insn_dsrl] = {M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE}, + [insn_dsrl32] = {M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE}, + [insn_dsrlv] = {M(spec_op, 0, 0, 0, 0, dsrlv_op), RS | RT | RD}, + [insn_dsubu] = {M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD}, + [insn_eret] = {M(cop0_op, cop_op, 0, 0, 0, eret_op), 0}, + [insn_ext] = {M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE}, + [insn_ins] = {M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE}, + [insn_j] = {M(j_op, 0, 0, 0, 0, 0), JIMM}, + [insn_jal] = {M(jal_op, 0, 0, 0, 0, 0), JIMM}, + [insn_jalr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD}, #ifndef CONFIG_CPU_MIPSR6 - { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, + [insn_jr] = {M(spec_op, 0, 0, 0, 0, jr_op), RS}, #else - { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS }, + [insn_jr] = {M(spec_op, 0, 0, 0, 0, jalr_op), RS}, #endif - { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, - { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_lhu, M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + [insn_lb] = {M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_lbu] = {M(lbu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_ld] = {M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_lddir] = {M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD}, + [insn_ldpte] = {M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD}, + [insn_ldx] = {M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD}, + [insn_lh] = {M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_lhu] = {M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, #ifndef CONFIG_CPU_MIPSR6 - { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + [insn_ll] = {M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_lld] = {M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, #else - { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 }, - { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 }, + [insn_ll] = {M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9}, + [insn_lld] = {M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9}, #endif - { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, - { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, - { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_mfhc0, M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD }, - { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD }, - { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_mthi, M(spec_op, 0, 0, 0, 0, mthi_op), RS }, - { insn_mtlo, M(spec_op, 0, 0, 0, 0, mtlo_op), RS }, + [insn_lui] = {M(lui_op, 0, 0, 0, 0, 0), RT | SIMM}, + [insn_lw] = {M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_lwu] = {M(lwu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_lwx] = {M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD}, + [insn_mfc0] = {M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD}, + [insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD}, + [insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD}, + [insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD}, + [insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET}, + [insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS}, + [insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS}, #ifndef CONFIG_CPU_MIPSR6 - { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, + [insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, #else - { insn_mul, M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD}, + [insn_mul] = {M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD}, #endif - { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, + [insn_multu] = {M(spec_op, 0, 0, 0, 0, multu_op), RS | RT}, + [insn_nor] = {M(spec_op, 0, 0, 0, 0, nor_op), RS | RT | RD}, + [insn_or] = {M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD}, + [insn_ori] = {M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM}, #ifndef CONFIG_CPU_MIPSR6 - { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + [insn_pref] = {M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, #else - { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 }, + [insn_pref] = {M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9}, #endif - { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, - { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, + [insn_rfe] = {M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0}, + [insn_rotr] = {M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE}, + [insn_sb] = {M(sb_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, #ifndef CONFIG_CPU_MIPSR6 - { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + [insn_sc] = {M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_scd] = {M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, #else - { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 }, - { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 }, + [insn_sc] = {M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9}, + [insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9}, #endif - { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, - { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, - { insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD }, - { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD }, - { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, - { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, - { insn_srlv, M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD }, - { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, - { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE }, - { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, - { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, - { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, - { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, - { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, - { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM }, - { insn_wsbh, M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD }, - { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, - { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD }, - { insn_ldpte, M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD }, - { insn_lddir, M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD }, - { insn_invalid, 0, 0 } + [insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE}, + [insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD}, + [insn_slt] = {M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD}, + [insn_slti] = {M(slti_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD}, + [insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE}, + [insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE}, + [insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD}, + [insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD}, + [insn_sw] = {M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM}, + [insn_sync] = {M(spec_op, 0, 0, 0, 0, sync_op), RE}, + [insn_syscall] = {M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, + [insn_tlbp] = {M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0}, + [insn_tlbr] = {M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0}, + [insn_tlbwi] = {M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0}, + [insn_tlbwr] = {M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0}, + [insn_wait] = {M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM}, + [insn_wsbh] = {M(spec3_op, 0, 0, 0, wsbh_op, bshfl_op), RT | RD}, + [insn_xor] = {M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD}, + [insn_xori] = {M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM}, + [insn_yield] = {M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD}, }; #undef M @@ -196,20 +215,17 @@ static inline u32 build_jimm(u32 arg) */ static void build_insn(u32 **buf, enum opcode opc, ...) { - struct insn *ip = NULL; - unsigned int i; + const struct insn *ip; va_list ap; u32 op; - for (i = 0; insn_table[i].opcode != insn_invalid; i++) - if (insn_table[i].opcode == opc) { - ip = &insn_table[i]; - break; - } - - if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) + if (opc < 0 || opc >= insn_invalid || + (opc == insn_daddiu && r4k_daddiu_bug()) || + (insn_table[opc].match == 0 && insn_table[opc].fields == 0)) panic("Unsupported Micro-assembler instruction %d", opc); + ip = &insn_table[opc]; + op = ip->match; va_start(ap, opc); if (ip->fields & RS) diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 730363b59bac..57570c0649b4 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -46,26 +46,29 @@ enum fields { #define SIMM9_MASK 0x1ff enum opcode { - insn_invalid, insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1, - insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, - insn_bne, insn_cache, insn_cfc1, insn_cfcmsa, insn_ctc1, insn_ctcmsa, - insn_daddiu, insn_daddu, insn_di, insn_dins, insn_dinsm, insn_divu, - insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, - insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, - insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, - insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, - insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0, - insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_or, insn_ori, - insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, - insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl, + insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez, + insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1, + insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu, + insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0, + insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, + insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, + insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, + insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, + insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, + insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, + insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0, + insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor, + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, + insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv, + insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor, - insn_xori, insn_yield, insn_lddir, insn_ldpte, insn_lhu, + insn_xori, insn_yield, + insn_invalid /* insn_invalid must be last */ }; struct insn { - enum opcode opcode; u32 match; enum fields fields; }; @@ -215,6 +218,13 @@ Ip_u2u1msbu3(op) \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u2u1msb32msb3(op) \ +Ip_u2u1msbu3(op) \ +{ \ + build_insn(buf, insn##op, b, a, c+d-33, c-32); \ +} \ +UASM_EXPORT_SYMBOL(uasm_i##op); + #define I_u2u1msbdu3(op) \ Ip_u2u1msbu3(op) \ { \ @@ -265,25 +275,36 @@ I_u1u2s3(_beq) I_u1u2s3(_beql) I_u1s2(_bgez) I_u1s2(_bgezl) +I_u1s2(_bgtz) +I_u1s2(_blez) I_u1s2(_bltz) I_u1s2(_bltzl) I_u1u2s3(_bne) +I_u1(_break) I_u2s3u1(_cache) I_u1u2(_cfc1) I_u2u1(_cfcmsa) I_u1u2(_ctc1) I_u2u1(_ctcmsa) +I_u1u2(_ddivu) I_u1u2u3(_dmfc0) I_u1u2u3(_dmtc0) +I_u1u2(_dmultu) I_u2u1s3(_daddiu) I_u3u1u2(_daddu) I_u1(_di); I_u1u2(_divu) +I_u2u1(_dsbh); +I_u2u1(_dshd); I_u2u1u3(_dsll) I_u2u1u3(_dsll32) +I_u3u2u1(_dsllv) I_u2u1u3(_dsra) +I_u2u1u3(_dsra32) +I_u3u2u1(_dsrav) I_u2u1u3(_dsrl) I_u2u1u3(_dsrl32) +I_u3u2u1(_dsrlv) I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) @@ -295,6 +316,7 @@ I_u1(_jal) I_u2u1(_jalr) I_u1(_jr) I_u2s3u1(_lb) +I_u2s3u1(_lbu) I_u2s3u1(_ld) I_u2s3u1(_lh) I_u2s3u1(_lhu) @@ -302,8 +324,11 @@ I_u2s3u1(_ll) I_u2s3u1(_lld) I_u1s2(_lui) I_u2s3u1(_lw) +I_u2s3u1(_lwu) I_u1u2u3(_mfc0) I_u1u2u3(_mfhc0) +I_u3u1u2(_movn) +I_u3u1u2(_movz) I_u1(_mfhi) I_u1(_mflo) I_u1u2u3(_mtc0) @@ -311,15 +336,20 @@ I_u1u2u3(_mthc0) I_u1(_mthi) I_u1(_mtlo) I_u3u1u2(_mul) -I_u2u1u3(_ori) +I_u1u2(_multu) +I_u3u1u2(_nor) I_u3u1u2(_or) +I_u2u1u3(_ori) I_0(_rfe) +I_u2s3u1(_sb) I_u2s3u1(_sc) I_u2s3u1(_scd) I_u2s3u1(_sd) +I_u2s3u1(_sh) I_u2u1u3(_sll) I_u3u2u1(_sllv) I_s3s1s2(_slt) +I_u2u1s3(_slti) I_u2u1s3(_sltiu) I_u3u1u2(_sltu) I_u2u1u3(_sra) @@ -340,6 +370,7 @@ I_u2u1u3(_xori) I_u2u1(_yield) I_u2u1msbu3(_dins); I_u2u1msb32u3(_dinsm); +I_u2u1msb32msb3(_dinsu); I_u1(_syscall); I_u1u2s3(_bbit0); I_u1u2s3(_bbit1); |