summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig19
-rw-r--r--arch/powerpc/Kconfig.debug7
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile11
-rw-r--r--arch/powerpc/boot/dts/a4m072.dts168
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts129
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi10
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi7
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi7
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p3060si-post.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi10
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi6
-rw-r--r--arch/powerpc/boot/dts/ge_imp3a.dts255
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts6
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dtsi93
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds_36b.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts306
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dtsi306
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds_32b.dts86
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds_36b.dts86
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dtsi50
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi4
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc.dtsi247
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_32b.dts90
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_36b.dts90
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts64
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts142
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dtsi13
-rw-r--r--arch/powerpc/boot/dts/p1021mds.dts3
-rw-r--r--arch/powerpc/boot/dts/p1021rdb.dts96
-rw-r--r--arch/powerpc/boot/dts/p1021rdb.dtsi236
-rw-r--r--arch/powerpc/boot/dts/p1021rdb_36b.dts96
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts274
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi234
-rw-r--r--arch/powerpc/boot/dts/p1022ds_32b.dts103
-rw-r--r--arch/powerpc/boot/dts/p1022ds_36b.dts103
-rw-r--r--arch/powerpc/boot/dts/p1025rdb.dtsi286
-rw-r--r--arch/powerpc/boot/dts/p1025rdb_32b.dts135
-rw-r--r--arch/powerpc/boot/dts/p1025rdb_36b.dts88
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dtsi3
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc.dtsi241
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc_32b.dts96
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc_36b.dts96
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts7
-rwxr-xr-xarch/powerpc/boot/wrapper22
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig257
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig2
-rw-r--r--arch/powerpc/configs/iseries_defconfig236
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig27
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig5
-rw-r--r--arch/powerpc/include/asm/abs_addr.h21
-rw-r--r--arch/powerpc/include/asm/atomic.h59
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/dma.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h134
-rw-r--r--arch/powerpc/include/asm/eeh_event.h33
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h113
-rw-r--r--arch/powerpc/include/asm/fadump.h218
-rw-r--r--arch/powerpc/include/asm/firmware.h9
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h6
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h63
-rw-r--r--arch/powerpc/include/asm/i8259.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/irq.h253
-rw-r--r--arch/powerpc/include/asm/irqflags.h37
-rw-r--r--arch/powerpc/include/asm/iseries/alpaca.h31
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call.h111
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_event.h201
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_sc.h50
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_xm.h61
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_config.h128
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_event.h162
-rw-r--r--arch/powerpc/include/asm/iseries/hv_types.h112
-rw-r--r--arch/powerpc/include/asm/iseries/iommu.h37
-rw-r--r--arch/powerpc/include/asm/iseries/it_lp_queue.h78
-rw-r--r--arch/powerpc/include/asm/iseries/lpar_map.h85
-rw-r--r--arch/powerpc/include/asm/iseries/mf.h51
-rw-r--r--arch/powerpc/include/asm/iseries/vio.h265
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/keylargo.h2
-rw-r--r--arch/powerpc/include/asm/kvm.h46
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h98
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h180
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h52
-rw-r--r--arch/powerpc/include/asm/kvm_host.h90
-rw-r--r--arch/powerpc/include/asm/kvm_para.h41
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/lppaca.h8
-rw-r--r--arch/powerpc/include/asm/machdep.h4
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h14
-rw-r--r--arch/powerpc/include/asm/mpic.h11
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h132
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h16
-rw-r--r--arch/powerpc/include/asm/pci.h9
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h4
-rw-r--r--arch/powerpc/include/asm/phyp_dump.h47
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h96
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h20
-rw-r--r--arch/powerpc/include/asm/reg.h27
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/asm/rtas.h34
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/socket.h4
-rw-r--r--arch/powerpc/include/asm/spinlock.h5
-rw-r--r--arch/powerpc/include/asm/system.h38
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/time.h15
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/vio.h10
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/kernel/Makefile10
-rw-r--r--arch/powerpc/kernel/asm-offsets.c32
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S248
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S236
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S324
-rw-r--r--arch/powerpc/kernel/fadump.c1315
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_40x.S4
-rw-r--r--arch/powerpc/kernel/head_64.S62
-rw-r--r--arch/powerpc/kernel/head_8xx.S4
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/ibmebus.c2
-rw-r--r--arch/powerpc/kernel/idle.c14
-rw-r--r--arch/powerpc/kernel/idle_book3e.S25
-rw-r--r--arch/powerpc/kernel/idle_power4.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S23
-rw-r--r--arch/powerpc/kernel/iommu.c8
-rw-r--r--arch/powerpc/kernel/irq.c829
-rw-r--r--arch/powerpc/kernel/isa-bridge.c3
-rw-r--r--arch/powerpc/kernel/kvm.c307
-rw-r--r--arch/powerpc/kernel/kvm_emul.S112
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c108
-rw-r--r--arch/powerpc/kernel/misc.S1
-rw-r--r--arch/powerpc/kernel/of_platform.c6
-rw-r--r--arch/powerpc/kernel/paca.c12
-rw-r--r--arch/powerpc/kernel/pci-common.c101
-rw-r--r--arch/powerpc/kernel/pci_32.c6
-rw-r--r--arch/powerpc/kernel/pci_64.c7
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c12
-rw-r--r--arch/powerpc/kernel/pmc.c1
-rw-r--r--arch/powerpc/kernel/process.c33
-rw-r--r--arch/powerpc/kernel/prom.c98
-rw-r--r--arch/powerpc/kernel/prom_init.c17
-rw-r--r--arch/powerpc/kernel/rtas.c39
-rw-r--r--arch/powerpc/kernel/rtas_pci.c13
-rw-r--r--arch/powerpc/kernel/setup-common.c14
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c25
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/sysfs.c7
-rw-r--r--arch/powerpc/kernel/time.c116
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kernel/udbg.c3
-rw-r--r--arch/powerpc/kernel/vdso.c14
-rw-r--r--arch/powerpc/kernel/vio.c30
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c57
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c66
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c919
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv.c466
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c209
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c835
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S176
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c182
-rw-r--r--arch/powerpc/kvm/booke.c150
-rw-r--r--arch/powerpc/kvm/booke.h4
-rw-r--r--arch/powerpc/kvm/booke_emulate.c23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S18
-rw-r--r--arch/powerpc/kvm/e500.c32
-rw-r--r--arch/powerpc/kvm/e500_emulate.c38
-rw-r--r--arch/powerpc/kvm/e500_tlb.c775
-rw-r--r--arch/powerpc/kvm/e500_tlb.h80
-rw-r--r--arch/powerpc/kvm/emulate.c61
-rw-r--r--arch/powerpc/kvm/powerpc.c148
-rw-r--r--arch/powerpc/kvm/trace.h62
-rw-r--r--arch/powerpc/lib/locks.c24
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c5
-rw-r--r--arch/powerpc/mm/fault.c181
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c19
-rw-r--r--arch/powerpc/mm/hash_utils_64.c20
-rw-r--r--arch/powerpc/mm/hugetlbpage.c9
-rw-r--r--arch/powerpc/mm/icswx.c23
-rw-r--r--arch/powerpc/mm/icswx.h6
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/slb.c6
-rw-r--r--arch/powerpc/mm/slb_low.S16
-rw-r--r--arch/powerpc/mm/stab.c9
-rw-r--r--arch/powerpc/oprofile/common.c3
-rw-r--r--arch/powerpc/perf/Makefile14
-rw-r--r--arch/powerpc/perf/callchain.c (renamed from arch/powerpc/kernel/perf_callchain.c)2
-rw-r--r--arch/powerpc/perf/core-book3s.c (renamed from arch/powerpc/kernel/perf_event.c)64
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c (renamed from arch/powerpc/kernel/perf_event_fsl_emb.c)0
-rw-r--r--arch/powerpc/perf/e500-pmu.c (renamed from arch/powerpc/kernel/e500-pmu.c)0
-rw-r--r--arch/powerpc/perf/mpc7450-pmu.c (renamed from arch/powerpc/kernel/mpc7450-pmu.c)0
-rw-r--r--arch/powerpc/perf/power4-pmu.c (renamed from arch/powerpc/kernel/power4-pmu.c)1
-rw-r--r--arch/powerpc/perf/power5+-pmu.c (renamed from arch/powerpc/kernel/power5+-pmu.c)0
-rw-r--r--arch/powerpc/perf/power5-pmu.c (renamed from arch/powerpc/kernel/power5-pmu.c)0
-rw-r--r--arch/powerpc/perf/power6-pmu.c (renamed from arch/powerpc/kernel/power6-pmu.c)2
-rw-r--r--arch/powerpc/perf/power7-pmu.c (renamed from arch/powerpc/kernel/power7-pmu.c)0
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c (renamed from arch/powerpc/kernel/ppc970-pmu.c)3
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/currituck.c2
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c3
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c12
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c15
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c10
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c16
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c12
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c14
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig27
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c246
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c84
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c222
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c5
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c208
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c5
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c3
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c3
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c15
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c3
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c2
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c4
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/86xx/Makefile7
-rw-r--r--arch/powerpc/platforms/86xx/gef_gpio.c171
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c2
-rw-r--r--arch/powerpc/platforms/86xx/pic.c5
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c29
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/setup.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c14
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c15
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c24
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c29
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c3
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig38
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h102
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h309
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h37
-rw-r--r--arch/powerpc/platforms/iseries/dt.c643
-rw-r--r--arch/powerpc/platforms/iseries/exception.S311
-rw-r--r--arch/powerpc/platforms/iseries/exception.h58
-rw-r--r--arch/powerpc/platforms/iseries/htab.c257
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S94
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c35
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c39
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c260
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h68
-rw-r--r--arch/powerpc/platforms/iseries/irq.c400
-rw-r--r--arch/powerpc/platforms/iseries/irq.h13
-rw-r--r--arch/powerpc/platforms/iseries/it_exp_vpd_panel.h51
-rw-r--r--arch/powerpc/platforms/iseries/it_lp_naca.h80
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c21
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c318
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c341
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h165
-rw-r--r--arch/powerpc/platforms/iseries/mf.c1275
-rw-r--r--arch/powerpc/platforms/iseries/misc.S26
-rw-r--r--arch/powerpc/platforms/iseries/naca.h24
-rw-r--r--arch/powerpc/platforms/iseries/pci.c919
-rw-r--r--arch/powerpc/platforms/iseries/pci.h58
-rw-r--r--arch/powerpc/platforms/iseries/proc.c120
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h85
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h63
-rw-r--r--arch/powerpc/platforms/iseries/setup.c722
-rw-r--r--arch/powerpc/platforms/iseries/setup.h27
-rw-r--r--arch/powerpc/platforms/iseries/smp.c88
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h34
-rw-r--r--arch/powerpc/platforms/iseries/vio.c556
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c677
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h88
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c3
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c42
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pic.c27
-rw-r--r--arch/powerpc/platforms/powermac/smp.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c48
-rw-r--r--arch/powerpc/platforms/powernv/pci.c28
-rw-r--r--arch/powerpc/platforms/powernv/setup.c1
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c11
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig5
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1059
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c44
-rw-r--r--arch/powerpc/platforms/pseries/eeh_dev.c102
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c213
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c55
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c565
-rw-r--r--arch/powerpc/platforms/pseries/eeh_sysfs.c25
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c68
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c29
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/phyp_dump.c513
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c18
-rw-r--r--arch/powerpc/platforms/pseries/ras.c195
-rw-r--r--arch/powerpc/platforms/pseries/setup.c15
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c6
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig1
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c26
-rw-r--r--arch/powerpc/platforms/wsp/smp.c2
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c9
-rw-r--r--arch/powerpc/sysdev/Kconfig4
-rw-r--r--arch/powerpc/sysdev/Makefile4
-rw-r--r--arch/powerpc/sysdev/cpm1.c9
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c23
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c14
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c1
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c11
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c53
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c42
-rw-r--r--arch/powerpc/sysdev/ge/Makefile1
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c (renamed from arch/powerpc/platforms/86xx/gef_pic.c)17
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.h (renamed from arch/powerpc/platforms/86xx/gef_pic.h)0
-rw-r--r--arch/powerpc/sysdev/i8259.c15
-rw-r--r--arch/powerpc/sysdev/ipic.c31
-rw-r--r--arch/powerpc/sysdev/ipic.h2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c11
-rw-r--r--arch/powerpc/sysdev/mpic.c121
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c282
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c6
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c11
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c70
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c26
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h2
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c13
-rw-r--r--arch/powerpc/sysdev/uic.c26
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c25
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c19
-rw-r--r--arch/powerpc/xmon/ppc-opc.c1
-rw-r--r--arch/powerpc/xmon/spu-opc.c1
-rw-r--r--arch/powerpc/xmon/xmon.c33
403 files changed, 14531 insertions, 16686 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1919634a9b32..d219ebecabf0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,7 +134,9 @@ config PPC
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
+ select SPARSE_IRQ
select IRQ_PER_CPU
+ select IRQ_DOMAIN
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select IRQ_FORCED_THREADING
@@ -376,13 +378,16 @@ config CRASH_DUMP
The same kernel binary can be used as production kernel and dump
capture kernel.
-config PHYP_DUMP
- bool "Hypervisor-assisted dump (EXPERIMENTAL)"
- depends on PPC_PSERIES && EXPERIMENTAL
+config FA_DUMP
+ bool "Firmware-assisted dump"
+ depends on PPC64 && PPC_RTAS && CRASH_DUMP
help
- Hypervisor-assisted dump is meant to be a kdump replacement
- offering robustness and speed not possible without system
- hypervisor assistance.
+ A robust mechanism to get reliable kernel crash dump with
+ assistance from firmware. This approach does not use kexec,
+ instead firmware assists in booting the kdump kernel
+ while preserving memory contents. Firmware-assisted dump
+ is meant to be a kdump replacement offering robustness and
+ speed not possible without system firmware assistance.
If unsure, say "N"
@@ -611,7 +616,7 @@ endmenu
config ISA_DMA_API
bool
- default !PPC_ISERIES || PCI
+ default PCI
menu "Bus options"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 4ccb2a009f74..72d55dbc6119 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -196,13 +196,6 @@ config PPC_EARLY_DEBUG_MAPLE
help
Select this to enable early debugging for Maple.
-config PPC_EARLY_DEBUG_ISERIES
- bool "iSeries HV Console"
- depends on PPC_ISERIES
- help
- Select this to enable early debugging for legacy iSeries. You need
- to hit "Ctrl-x Ctrl-x" to see the messages on the console.
-
config PPC_EARLY_DEBUG_PAS_REALMODE
bool "PA Semi real mode"
depends on PPC_PASEMI
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index b8b105c01c64..6524c6e21896 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -157,6 +157,7 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/net/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_KVM) += arch/powerpc/kvm/
+core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 12da77ec0228..1c1aadc8c48f 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -27,7 +27,6 @@ zImage.bin.*
zImage.chrp
zImage.coff
zImage.holly
-zImage.iseries
zImage.*lds
zImage.miboot
zImage.pmac
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8844a17ce8ed..e8461cb18d04 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -184,7 +184,6 @@ image-$(CONFIG_PPC_EFIKA) += zImage.chrp
image-$(CONFIG_PPC_PMAC) += zImage.pmac
image-$(CONFIG_PPC_HOLLY) += dtbImage.holly
image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
-image-$(CONFIG_PPC_ISERIES) += zImage.iseries
image-$(CONFIG_DEFAULT_UIMAGE) += uImage
image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
@@ -247,7 +246,7 @@ image-$(CONFIG_ASP834x) += dtbImage.asp834x-redboot
image-$(CONFIG_MPC8540_ADS) += cuImage.mpc8540ads
image-$(CONFIG_MPC8560_ADS) += cuImage.mpc8560ads
image-$(CONFIG_MPC85xx_CDS) += cuImage.mpc8541cds \
- cuImage.mpc8548cds \
+ cuImage.mpc8548cds_32b \
cuImage.mpc8555cds
image-$(CONFIG_MPC85xx_MDS) += cuImage.mpc8568mds
image-$(CONFIG_MPC85xx_DS) += cuImage.mpc8544ds \
@@ -311,12 +310,6 @@ $(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb
$(obj)/vmlinux.strip: vmlinux
$(STRIP) -s -R .comment $< -o $@
-# The iseries hypervisor won't take an ET_DYN executable, so this
-# changes the type (byte 17) in the file to ET_EXEC (2).
-$(obj)/zImage.iseries: vmlinux
- $(STRIP) -s -R .comment $< -o $@
- printf "\x02" | dd of=$@ conv=notrunc bs=1 seek=17
-
$(obj)/uImage: vmlinux $(wrapperbits)
$(call if_changed,wrap,uboot)
@@ -364,7 +357,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
# anything not in $(targets)
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
- zImage.iseries zImage.miboot zImage.pmac zImage.pseries \
+ zImage.miboot zImage.pmac zImage.pseries \
zImage.maple simpleImage.* otheros.bld *.dtb
# clean up files cached by wrapper
diff --git a/arch/powerpc/boot/dts/a4m072.dts b/arch/powerpc/boot/dts/a4m072.dts
new file mode 100644
index 000000000000..fabe7b7d5f13
--- /dev/null
+++ b/arch/powerpc/boot/dts/a4m072.dts
@@ -0,0 +1,168 @@
+/*
+ * a4m072 board Device Tree Source
+ *
+ * Copyright (C) 2011 DENX Software Engineering GmbH
+ * Heiko Schocher <hs@denx.de>
+ *
+ * Copyright (C) 2007 Semihalf
+ * Marian Balakowicz <m8@semihalf.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "mpc5200b.dtsi"
+
+/ {
+ model = "anonymous,a4m072";
+ compatible = "anonymous,a4m072";
+
+ soc5200@f0000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc5200b-immr";
+ ranges = <0 0xf0000000 0x0000c000>;
+ reg = <0xf0000000 0x00000100>;
+ bus-frequency = <0>; /* From boot loader */
+ system-frequency = <0>; /* From boot loader */
+
+ cdm@200 {
+ fsl,init-ext-48mhz-en = <0x0>;
+ fsl,init-fd-enable = <0x01>;
+ fsl,init-fd-counters = <0x3333>;
+ };
+
+ timer@600 {
+ fsl,has-wdt;
+ };
+
+ gpt3: timer@630 { /* General Purpose Timer in GPIO mode */
+ compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpt4: timer@640 { /* General Purpose Timer in GPIO mode */
+ compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpt5: timer@650 { /* General Purpose Timer in GPIO mode */
+ compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ spi@f00 {
+ status = "disabled";
+ };
+
+ psc@2000 {
+ compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ reg = <0x2000 0x100>;
+ interrupts = <2 1 0>;
+ };
+
+ psc@2200 {
+ compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ reg = <0x2200 0x100>;
+ interrupts = <2 2 0>;
+ };
+
+ psc@2400 {
+ compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ reg = <0x2400 0x100>;
+ interrupts = <2 3 0>;
+ };
+
+ psc@2600 {
+ status = "disabled";
+ };
+
+ psc@2800 {
+ status = "disabled";
+ };
+
+ psc@2c00 {
+ compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+ reg = <0x2c00 0x100>;
+ interrupts = <2 4 0>;
+ };
+
+ ethernet@3000 {
+ phy-handle = <&phy0>;
+ };
+
+ mdio@3000 {
+ phy0: ethernet-phy@1f {
+ reg = <0x1f>;
+ interrupts = <1 2 0>; /* IRQ 2 active low */
+ };
+ };
+
+ i2c@3d00 {
+ status = "disabled";
+ };
+
+ i2c@3d40 {
+ hwmon@2e {
+ compatible = "nsc,lm87";
+ reg = <0x2e>;
+ };
+ rtc@51 {
+ compatible = "nxp,rtc8564";
+ reg = <0x51>;
+ };
+ };
+ };
+
+ localbus {
+ compatible = "fsl,mpc5200b-lpb","simple-bus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0xfe000000 0x02000000
+ 1 0 0x62000000 0x00400000
+ 2 0 0x64000000 0x00200000
+ 3 0 0x66000000 0x01000000
+ 6 0 0x68000000 0x01000000
+ 7 0 0x6a000000 0x00000004>;
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x02000000>;
+ bank-width = <2>;
+ #size-cells = <1>;
+ #address-cells = <1>;
+ };
+ sram0@1,0 {
+ compatible = "mtd-ram";
+ reg = <1 0x00000 0x00400000>;
+ bank-width = <2>;
+ };
+ };
+
+ pci@f0000d00 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ compatible = "fsl,mpc5200-pci";
+ reg = <0xf0000d00 0x100>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x16 */
+ 0xc000 0 0 1 &mpc5200_pic 1 3 3
+ 0xc000 0 0 2 &mpc5200_pic 1 3 3
+ 0xc000 0 0 3 &mpc5200_pic 1 3 3
+ 0xc000 0 0 4 &mpc5200_pic 1 3 3>;
+ clock-frequency = <0>; /* From boot loader */
+ interrupts = <2 8 0 2 9 0 2 10 0>;
+ bus-range = <0 0>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
+ 0x02000000 0 0x90000000 0x90000000 0 0x10000000
+ 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ };
+};
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index 2a56a0dbd1f7..7bda373f10ef 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -33,7 +33,7 @@
aliases {
ethernet0 = &EMAC0;
serial0 = &UART0;
- //serial1 = &UART1; --gcl missing UART1 label
+ serial1 = &UART1;
};
cpus {
@@ -52,7 +52,7 @@
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
- //next-level-cache = <&L2C0>; --gcl missing L2C0 label
+ next-level-cache = <&L2C0>;
};
};
@@ -117,6 +117,16 @@
dcr-reg = <0x00c 0x002>;
};
+ L2C0: l2c {
+ compatible = "ibm,l2-cache-apm82181", "ibm,l2-cache";
+ dcr-reg = <0x020 0x008
+ 0x030 0x008>;
+ cache-line-size = <32>;
+ cache-size = <262144>;
+ interrupt-parent = <&UIC1>;
+ interrupts = <11 1>;
+ };
+
plb {
compatible = "ibm,plb4";
#address-cells = <2>;
@@ -182,6 +192,53 @@
reg = <0x001a0000 0x00060000>;
};
};
+
+ ndfc@1,0 {
+ compatible = "ibm,ndfc";
+ reg = <0x00000003 0x00000000 0x00002000>;
+ ccr = <0x00001000>;
+ bank-settings = <0x80002222>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* 2Gb Nand Flash */
+ nand {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "firmware";
+ reg = <0x00000000 0x00C00000>;
+ };
+ partition@c00000 {
+ label = "environment";
+ reg = <0x00C00000 0x00B00000>;
+ };
+ partition@1700000 {
+ label = "kernel";
+ reg = <0x01700000 0x00E00000>;
+ };
+ partition@2500000 {
+ label = "root";
+ reg = <0x02500000 0x08200000>;
+ };
+ partition@a700000 {
+ label = "device-tree";
+ reg = <0x0A700000 0x00B00000>;
+ };
+ partition@b200000 {
+ label = "config";
+ reg = <0x0B200000 0x00D00000>;
+ };
+ partition@bf00000 {
+ label = "diag";
+ reg = <0x0BF00000 0x00C00000>;
+ };
+ partition@cb00000 {
+ label = "vendor";
+ reg = <0x0CB00000 0x3500000>;
+ };
+ };
+ };
};
UART0: serial@ef600300 {
@@ -195,11 +252,36 @@
interrupts = <0x1 0x4>;
};
+ UART1: serial@ef600400 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0xef600400 0x00000008>;
+ virtual-reg = <0xef600400>;
+ clock-frequency = <0>; /* Filled in by U-Boot */
+ current-speed = <0>; /* Filled in by U-Boot */
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x1 0x4>;
+ };
+
IIC0: i2c@ef600700 {
compatible = "ibm,iic";
reg = <0xef600700 0x00000014>;
interrupt-parent = <&UIC0>;
interrupts = <0x2 0x4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ rtc@68 {
+ compatible = "stm,m41t80";
+ reg = <0x68>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x9 0x8>;
+ };
+ sttm@4C {
+ compatible = "adm,adm1032";
+ reg = <0x4C>;
+ interrupt-parent = <&UIC1>;
+ interrupts = <0x1E 0x8>; /* CPU_THERNAL_L */
+ };
};
IIC1: i2c@ef600800 {
@@ -222,7 +304,7 @@
EMAC0: ethernet@ef600c00 {
device_type = "network";
- compatible = "ibm,emac4sync";
+ compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
interrupt-parent = <&EMAC0>;
interrupts = <0x0 0x1>;
#interrupt-cells = <1>;
@@ -250,5 +332,46 @@
};
};
+ PCIE0: pciex@d00000000 {
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "ibm,plb-pciex-apm821xx", "ibm,plb-pciex";
+ primary;
+ port = <0x0>; /* port number */
+ reg = <0x0000000d 0x00000000 0x20000000 /* Config space access */
+ 0x0000000c 0x08010000 0x00001000>; /* Registers */
+ dcr-reg = <0x100 0x020>;
+ sdr-base = <0x300>;
+
+ /* Outbound ranges, one memory and one IO,
+ * later cannot be changed
+ */
+ ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000f 0x00000000 0x00000000 0x00100000
+ 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
+
+ /* Inbound 2GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+
+ /* This drives busses 40 to 0x7f */
+ bus-range = <0x40 0x7f>;
+
+ /* Legacy interrupts (note the weird polarity, the bridge seems
+ * to invert PCIe legacy interrupts).
+ * We are de-swizzling here because the numbers are actually for
+ * port of the root complex virtual P2P bridge. But I want
+ * to avoid putting a node for it in the tree, so the numbers
+ * below are basically de-swizzled numbers.
+ * The real slot is on idsel 0, so the swizzling is 1:1
+ */
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x0 0x0 0x0 0x1 &UIC3 0xc 0x4 /* swizzled int A */
+ 0x0 0x0 0x0 0x2 &UIC3 0xd 0x4 /* swizzled int B */
+ 0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */
+ 0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
+ };
};
};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
index 89af62637707..c8b2daa40ac8 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
@@ -202,7 +202,7 @@
/include/ "pq3-etsec1-timer-0.dtsi"
usb@22000 {
- compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
+ compatible = "fsl-usb2-mph-v1.2", "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
reg = <0x22000 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
@@ -210,7 +210,7 @@
};
usb@23000 {
- compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
+ compatible = "fsl-usb2-mph-v1.2", "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
reg = <0x23000 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
@@ -236,6 +236,10 @@
};
/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
+ };
+
/include/ "pq3-sec3.0-0.dtsi"
/include/ "pq3-mpic.dtsi"
/include/ "pq3-mpic-timer-B.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
index 9d8023a69d7d..579d76cb8e32 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
@@ -89,6 +89,21 @@
};
};
+&rio {
+ compatible = "fsl,srio";
+ interrupts = <48 2 0 0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ fsl,srio-rmu-handle = <&rmu>;
+ ranges;
+
+ port1 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ cell-index = <1>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -134,6 +149,7 @@
/include/ "pq3-sec2.1-0.dtsi"
/include/ "pq3-mpic.dtsi"
+/include/ "pq3-rmu-0.dtsi"
global-utilities@e0000 {
compatible = "fsl,mpc8548-guts";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
index 289f1218d755..720422d83529 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
@@ -43,7 +43,9 @@
serial0 = &serial0;
serial1 = &serial1;
ethernet0 = &enet0;
- ethernet1 = &enet2;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
pci0 = &pci0;
pci1 = &pci1;
pci2 = &pci2;
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index bd9e163c764b..0bde9ee8afaf 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -156,9 +156,13 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-usb2-dr-0.dtsi"
+ usb@22000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-esdhc-0.dtsi"
sdhc@2e000 {
- fsl,sdhci-auto-cmd12;
+ compatible = "fsl,p1010-esdhc", "fsl,esdhc";
+ sdhci,auto-cmd12;
};
/include/ "pq3-sec4.4-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
index fc924c5ffebe..68cc5e7f6477 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -142,9 +142,19 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-usb2-dr-0.dtsi"
+ usb@22000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-usb2-dr-1.dtsi"
+ usb@23000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ compatible = "fsl,p1020-esdhc", "fsl,esdhc";
+ sdhci,auto-cmd12;
+ };
/include/ "pq3-sec3.3-0.dtsi"
/include/ "pq3-mpic.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index 38ba54d1e32e..4252ef85fb7a 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -142,8 +142,15 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-usb2-dr-0.dtsi"
+ usb@22000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ sdhci,auto-cmd12;
+ };
+
/include/ "pq3-sec3.3-0.dtsi"
/include/ "pq3-mpic.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index 16239b199d0a..06216b8c0af5 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -35,7 +35,11 @@
&lbc {
#address-cells = <2>;
#size-cells = <1>;
- compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus";
+ /*
+ * The localbus on the P1022 is not a simple-bus because of the eLBC
+ * pin muxing when the DIU is enabled.
+ */
+ compatible = "fsl,p1022-elbc", "fsl,elbc";
interrupts = <19 2 0 0>;
};
@@ -199,11 +203,18 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-usb2-dr-0.dtsi"
+ usb@22000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-usb2-dr-1.dtsi"
+ usb@23000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-esdhc-0.dtsi"
sdhc@2e000 {
- fsl,sdhci-auto-cmd12;
+ compatible = "fsl,p1022-esdhc", "fsl,esdhc";
+ sdhci,auto-cmd12;
};
/include/ "pq3-sec3.3-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index b06bb4cc1fe8..941fa159cefb 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -142,6 +142,9 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-usb2-dr-0.dtsi"
+ usb@22000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
crypto: crypto@300000 {
compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
index c041050561a7..884e01bcb243 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -171,6 +171,9 @@
/include/ "pq3-dma-0.dtsi"
/include/ "pq3-usb2-dr-0.dtsi"
+ usb@22000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
+ };
/include/ "pq3-etsec1-0.dtsi"
/include/ "pq3-etsec1-timer-0.dtsi"
@@ -182,6 +185,10 @@
/include/ "pq3-etsec1-1.dtsi"
/include/ "pq3-etsec1-2.dtsi"
/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ compatible = "fsl,p2020-esdhc", "fsl,esdhc";
+ };
+
/include/ "pq3-sec3.1-0.dtsi"
/include/ "pq3-mpic.dtsi"
/include/ "pq3-mpic-timer-B.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 234a399ddeb2..531eab82c6c9 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -309,12 +309,14 @@
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
+ compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
phy_type = "utmi";
port0;
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
dr_mode = "host";
phy_type = "utmi";
};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index d41d08de7f7e..af4ebc8009e3 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -336,12 +336,14 @@
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
+ compatible = "fsl-usb2-mph-v1.6", "fsl-usb2-mph";
phy_type = "utmi";
port0;
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
dr_mode = "host";
phy_type = "utmi";
};
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
index a63edd195ae5..b3e56929eee2 100644
--- a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
@@ -291,6 +291,12 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
+ usb@210000 {
+ compatible = "fsl-usb2-mph-v2.2", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ };
/include/ "qoriq-usb2-dr-0.dtsi"
+ usb@211000 {
+ compatible = "fsl-usb2-dr-v2.2", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ };
/include/ "qoriq-sec4.1-0.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 914074b91a85..64b6abea8464 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -339,12 +339,14 @@
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
+ compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
phy_type = "utmi";
port0;
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
dr_mode = "host";
phy_type = "utmi";
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
index a1979ae334a7..3b0650a98478 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
@@ -1,7 +1,7 @@
/*
* PQ3 eTSEC device tree stub [ @ offsets 0x24000 ]
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@24000 {
compatible = "gianfar";
reg = <0x24000 0x1000>;
ranges = <0x0 0x24000 0x1000>;
+ fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
index 4c4fdde1ec2a..96693b41f0f1 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
@@ -1,7 +1,7 @@
/*
* PQ3 eTSEC device tree stub [ @ offsets 0x25000 ]
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@25000 {
compatible = "gianfar";
reg = <0x25000 0x1000>;
ranges = <0x0 0x25000 0x1000>;
+ fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
index 4b8ab438668a..6b3fab19da1f 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
@@ -1,7 +1,7 @@
/*
* PQ3 eTSEC device tree stub [ @ offsets 0x26000 ]
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@26000 {
compatible = "gianfar";
reg = <0x26000 0x1000>;
ranges = <0x0 0x26000 0x1000>;
+ fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
index 40c9137729ae..0da592d93ddd 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
@@ -1,7 +1,7 @@
/*
* PQ3 eTSEC device tree stub [ @ offsets 0x27000 ]
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@27000 {
compatible = "gianfar";
reg = <0x27000 0x1000>;
ranges = <0x0 0x27000 0x1000>;
+ fsl,magic-packet;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 0 0 38 2 0 0 39 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
index 5c8046065844..fdedf7b1fe0f 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -39,6 +39,9 @@ mpic: pic@40000 {
reg = <0x40000 0x40000>;
compatible = "fsl,mpic";
device_type = "open-pic";
+ big-endian;
+ single-cpu-affinity;
+ last-interrupt-source = <255>;
};
timer@41100 {
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
index bf957a7fca2a..d4c9d5daab21 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
@@ -33,32 +33,32 @@
*/
crypto@30000 {
- compatible = "fsl,sec4.4", "fsl,sec4.0";
+ compatible = "fsl,sec-v4.4", "fsl,sec-v4.0";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x30000 0x10000>;
interrupts = <58 2 0 0>;
sec_jr0: jr@1000 {
- compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <45 2 0 0>;
};
sec_jr1: jr@2000 {
- compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <45 2 0 0>;
};
sec_jr2: jr@3000 {
- compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
reg = <0x3000 0x1000>;
interrupts = <45 2 0 0>;
};
sec_jr3: jr@4000 {
- compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring";
+ compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
reg = <0x4000 0x1000>;
interrupts = <45 2 0 0>;
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
index b9bada6a87dc..08f42271f86a 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
@@ -53,7 +53,7 @@ timer@41100 {
msi0: msi@41600 {
compatible = "fsl,mpic-msi";
- reg = <0x41600 0x200>;
+ reg = <0x41600 0x200 0x44140 4>;
msi-available-ranges = <0 0x100>;
interrupts = <
0xe0 0 0 0
@@ -68,7 +68,7 @@ msi0: msi@41600 {
msi1: msi@41800 {
compatible = "fsl,mpic-msi";
- reg = <0x41800 0x200>;
+ reg = <0x41800 0x200 0x45140 4>;
msi-available-ranges = <0 0x100>;
interrupts = <
0xe8 0 0 0
@@ -83,7 +83,7 @@ msi1: msi@41800 {
msi2: msi@41a00 {
compatible = "fsl,mpic-msi";
- reg = <0x41a00 0x200>;
+ reg = <0x41a00 0x200 0x46140 4>;
msi-available-ranges = <0 0x100>;
interrupts = <
0xf0 0 0 0
diff --git a/arch/powerpc/boot/dts/ge_imp3a.dts b/arch/powerpc/boot/dts/ge_imp3a.dts
new file mode 100644
index 000000000000..fefae416a097
--- /dev/null
+++ b/arch/powerpc/boot/dts/ge_imp3a.dts
@@ -0,0 +1,255 @@
+/*
+ * GE IMP3A Device Tree Source
+ *
+ * Copyright 2010-2011 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Based on: P2020 DS Device Tree Source
+ * Copyright 2009 Freescale Semiconductor Inc.
+ */
+
+/include/ "fsl/p2020si-pre.dtsi"
+
+/ {
+ model = "GE_IMP3A";
+ compatible = "ge,imp3a";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fef05000 {
+ reg = <0 0xfef05000 0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xff000000 0x01000000
+ 0x1 0x0 0x0 0xe0000000 0x08000000
+ 0x2 0x0 0x0 0xe8000000 0x08000000
+ 0x3 0x0 0x0 0xfc100000 0x00020000
+ 0x4 0x0 0x0 0xfc000000 0x00008000
+ 0x5 0x0 0x0 0xfc008000 0x00008000
+ 0x6 0x0 0x0 0xfee00000 0x00040000
+ 0x7 0x0 0x0 0xfee80000 0x00040000>;
+
+ /* nor@0,0 is a mirror of part of the memory in nor@1,0
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ge,imp3a-firmware-mirror", "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ label = "firmware";
+ reg = <0x0 0x1000000>;
+ read-only;
+ };
+ };
+ */
+
+ nor@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ge,imp3a-paged-flash", "cfi-flash";
+ reg = <0x1 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ label = "user";
+ reg = <0x0 0x7800000>;
+ };
+
+ partition@7800000 {
+ label = "firmware";
+ reg = <0x7800000 0x800000>;
+ read-only;
+ };
+ };
+
+ nvram@3,0 {
+ device_type = "nvram";
+ compatible = "simtek,stk14ca8";
+ reg = <0x3 0x0 0x20000>;
+ };
+
+ fpga@4,0 {
+ compatible = "ge,imp3a-fpga-regs";
+ reg = <0x4 0x0 0x20>;
+ };
+
+ gef_pic: pic@4,20 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ compatible = "ge,imp3a-fpga-pic", "gef,fpga-pic-1.00";
+ reg = <0x4 0x20 0x20>;
+ interrupts = <6 7 0 0>;
+ };
+
+ gef_gpio: gpio@4,400 {
+ #gpio-cells = <2>;
+ compatible = "ge,imp3a-gpio";
+ reg = <0x4 0x400 0x24>;
+ gpio-controller;
+ };
+
+ wdt@4,800 {
+ compatible = "ge,imp3a-fpga-wdt", "gef,fpga-wdt-1.00",
+ "gef,fpga-wdt";
+ reg = <0x4 0x800 0x8>;
+ interrupts = <10 4>;
+ interrupt-parent = <&gef_pic>;
+ };
+
+ /* Second watchdog available, driver currently supports one.
+ wdt@4,808 {
+ compatible = "gef,imp3a-fpga-wdt", "gef,fpga-wdt-1.00",
+ "gef,fpga-wdt";
+ reg = <0x4 0x808 0x8>;
+ interrupts = <9 4>;
+ interrupt-parent = <&gef_pic>;
+ };
+ */
+
+ nand@6,0 {
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x6 0x0 0x40000>;
+ };
+
+ nand@7,0 {
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x7 0x0 0x40000>;
+ };
+ };
+
+ soc: soc@fef00000 {
+ ranges = <0x0 0 0xfef00000 0x100000>;
+
+ i2c@3000 {
+ hwmon@48 {
+ compatible = "national,lm92";
+ reg = <0x48>;
+ };
+
+ hwmon@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+
+ rtc@51 {
+ compatible = "epson,rx8581";
+ reg = <0x51>;
+ };
+
+ eti@6b {
+ compatible = "dallas,ds1682";
+ reg = <0x6b>;
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ dr_mode = "host";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&gef_pic>;
+ interrupts = <0xc 0x4>;
+ reg = <0x1>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&gef_pic>;
+ interrupts = <0xb 0x4>;
+ reg = <0x2>;
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "gmii";
+ };
+
+ enet1: ethernet@25000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "gmii";
+ };
+
+ enet2: ethernet@26000 {
+ status = "disabled";
+ };
+ };
+
+ pci0: pcie@fef08000 {
+ ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xfe020000 0x0 0x10000>;
+ reg = <0 0xfef08000 0 0x1000>;
+
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ };
+ };
+
+ pci1: pcie@fef09000 {
+ reg = <0 0xfef09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xfe010000 0x0 0x10000>;
+
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ };
+
+ };
+
+ pci2: pcie@fef0a000 {
+ reg = <0 0xfef0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xfe000000 0x0 0x10000>;
+
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x10000>;
+ };
+ };
+};
+
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index c0e450a551bf..81dd513d6308 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -405,6 +405,10 @@
reg = <0x1>;
device_type = "ethernet-phy";
};
+ tbi-phy@2 {
+ device_type = "tbi-phy";
+ reg = <0x2>;
+ };
};
qeic: interrupt-controller@80 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index c15881574fdc..19736222a0b9 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -1,7 +1,7 @@
/*
* MPC8536 DS Device Tree Source
*
- * Copyright 2008 Freescale Semiconductor, Inc.
+ * Copyright 2008, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -34,6 +34,10 @@
lbc: localbus@ffe05000 {
reg = <0 0xffe05000 0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
+ 0x2 0x0 0x0 0xffa00000 0x00040000
+ 0x3 0x0 0x0 0xffdf0000 0x00008000>;
};
board_soc: soc: soc@ffe00000 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
index 1462e4cf49d7..cc46dbd9746d 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -32,6 +32,99 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ reg = <0x0 0x03000000>;
+ label = "ramdisk-nor";
+ };
+
+ partition@3000000 {
+ reg = <0x03000000 0x00e00000>;
+ label = "diagnostic-nor";
+ read-only;
+ };
+
+ partition@3e00000 {
+ reg = <0x03e00000 0x00200000>;
+ label = "dink-nor";
+ read-only;
+ };
+
+ partition@4000000 {
+ reg = <0x04000000 0x00400000>;
+ label = "kernel-nor";
+ };
+
+ partition@4400000 {
+ reg = <0x04400000 0x03b00000>;
+ label = "fs-nor";
+ };
+
+ partition@7f00000 {
+ reg = <0x07f00000 0x00080000>;
+ label = "dtb-nor";
+ };
+
+ partition@7f80000 {
+ reg = <0x07f80000 0x00080000>;
+ label = "u-boot-nor";
+ read-only;
+ };
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8536-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ partition@0 {
+ reg = <0x0 0x02000000>;
+ label = "u-boot-nand";
+ read-only;
+ };
+
+ partition@2000000 {
+ reg = <0x02000000 0x10000000>;
+ label = "fs-nand";
+ };
+
+ partition@12000000 {
+ reg = <0x12000000 0x08000000>;
+ label = "ramdisk-nand";
+ };
+
+ partition@1a000000 {
+ reg = <0x1a000000 0x04000000>;
+ label = "kernel-nand";
+ };
+
+ partition@1e000000 {
+ reg = <0x1e000000 0x01000000>;
+ label = "dtb-nand";
+ };
+
+ partition@1f000000 {
+ reg = <0x1f000000 0x21000000>;
+ label = "empty-nand";
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,mpc8536ds-fpga-pixis";
+ reg = <0x3 0x0 0x8000>;
+ };
+};
+
&board_soc {
i2c@3100 {
rtc@68 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
index 8f4b929b1d1d..f8a3b3413176 100644
--- a/arch/powerpc/boot/dts/mpc8536ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
@@ -1,7 +1,7 @@
/*
* MPC8536DS Device Tree Source (36-bit address map)
*
- * Copyright 2008-2009 Freescale Semiconductor, Inc.
+ * Copyright 2008-2009, 2011 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -33,7 +33,11 @@
};
lbc: localbus@ffe05000 {
- reg = <0 0xffe05000 0 0x1000>;
+ reg = <0xf 0xffe05000 0 0x1000>;
+
+ ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
+ 0x2 0x0 0xf 0xffa00000 0x00040000
+ 0x3 0x0 0xf 0xffdf0000 0x00008000>;
};
board_soc: soc: soc@fffe00000 {
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
deleted file mode 100644
index 07b8dae0f46e..000000000000
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * MPC8548 CDS Device Tree Source
- *
- * Copyright 2006, 2008 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/include/ "fsl/mpc8548si-pre.dtsi"
-
-/ {
- model = "MPC8548CDS";
- compatible = "MPC8548CDS", "MPC85xxCDS";
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- ethernet3 = &enet3;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- pci2 = &pci2;
- };
-
- memory {
- device_type = "memory";
- reg = <0 0 0x0 0x8000000>; // 128M at 0x0
- };
-
- lbc: localbus@e0005000 {
- reg = <0 0xe0005000 0 0x1000>;
- };
-
- soc: soc8548@e0000000 {
- ranges = <0 0x0 0xe0000000 0x100000>;
-
- i2c@3000 {
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
- };
-
- eeprom@56 {
- compatible = "atmel,24c64";
- reg = <0x56>;
- };
-
- eeprom@57 {
- compatible = "atmel,24c64";
- reg = <0x57>;
- };
- };
-
- i2c@3100 {
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
- };
- };
-
- enet0: ethernet@24000 {
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
- };
-
- mdio@24520 {
- phy0: ethernet-phy@0 {
- interrupts = <5 1 0 0>;
- reg = <0x0>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1 {
- interrupts = <5 1 0 0>;
- reg = <0x1>;
- device_type = "ethernet-phy";
- };
- phy2: ethernet-phy@2 {
- interrupts = <5 1 0 0>;
- reg = <0x2>;
- device_type = "ethernet-phy";
- };
- phy3: ethernet-phy@3 {
- interrupts = <5 1 0 0>;
- reg = <0x3>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet1: ethernet@25000 {
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
- };
-
- mdio@25520 {
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet2: ethernet@26000 {
- tbi-handle = <&tbi2>;
- phy-handle = <&phy2>;
- };
-
- mdio@26520 {
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet3: ethernet@27000 {
- tbi-handle = <&tbi3>;
- phy-handle = <&phy3>;
- };
-
- mdio@27520 {
- tbi3: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- pci0: pci@e0008000 {
- reg = <0 0xe0008000 0 0x1000>;
- ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000
- 0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>;
- clock-frequency = <66666666>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x4 (PCIX Slot 2) */
- 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0x5 (PCIX Slot 3) */
- 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
- 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
- 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
- 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
-
- /* IDSEL 0x6 (PCIX Slot 4) */
- 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
- 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
- 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
- 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
-
- /* IDSEL 0x8 (PCIX Slot 5) */
- 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0xC (Tsi310 bridge) */
- 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0x14 (Slot 2) */
- 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0x15 (Slot 3) */
- 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
- 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
- 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
- 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
-
- /* IDSEL 0x16 (Slot 4) */
- 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
- 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
- 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
- 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
-
- /* IDSEL 0x18 (Slot 5) */
- 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0x1C (Tsi310 bridge PCI primary) */
- 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
-
- pci_bridge@1c {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x00 (PrPMC Site) */
- 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0x04 (VIA chip) */
- 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
- 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
-
- /* IDSEL 0x05 (8139) */
- 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
-
- /* IDSEL 0x06 (Slot 6) */
- 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
- 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
- 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
- 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
-
- /* IDESL 0x07 (Slot 7) */
- 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
- 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0
- 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
- 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>;
-
- reg = <0xe000 0x0 0x0 0x0 0x0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- ranges = <0x2000000 0x0 0x80000000
- 0x2000000 0x0 0x80000000
- 0x0 0x20000000
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x80000>;
- clock-frequency = <33333333>;
-
- isa@4 {
- device_type = "isa";
- #interrupt-cells = <2>;
- #size-cells = <1>;
- #address-cells = <2>;
- reg = <0x2000 0x0 0x0 0x0 0x0>;
- ranges = <0x1 0x0 0x1000000 0x0 0x0 0x1000>;
- interrupt-parent = <&i8259>;
-
- i8259: interrupt-controller@20 {
- interrupt-controller;
- device_type = "interrupt-controller";
- reg = <0x1 0x20 0x2
- 0x1 0xa0 0x2
- 0x1 0x4d0 0x2>;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
- interrupts = <0 1 0 0>;
- interrupt-parent = <&mpic>;
- };
-
- rtc@70 {
- compatible = "pnpPNP,b00";
- reg = <0x1 0x70 0x2>;
- };
- };
- };
- };
-
- pci1: pci@e0009000 {
- reg = <0 0xe0009000 0 0x1000>;
- ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000
- 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>;
- clock-frequency = <66666666>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x15 */
- 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
- 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
- 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
- 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
- };
-
- pci2: pcie@e000a000 {
- reg = <0 0xe000a000 0 0x1000>;
- ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xa0000000
- 0x2000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-};
-
-/include/ "fsl/mpc8548si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dtsi b/arch/powerpc/boot/dts/mpc8548cds.dtsi
new file mode 100644
index 000000000000..c61f525e4740
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds.dtsi
@@ -0,0 +1,306 @@
+/*
+ * MPC8548CDS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x01000000>;
+ bank-width = <2>;
+ device-width = <2>;
+
+ partition@0 {
+ reg = <0x0 0x0b00000>;
+ label = "ramdisk-nor";
+ };
+
+ partition@300000 {
+ reg = <0x0b00000 0x0400000>;
+ label = "kernel-nor";
+ };
+
+ partition@700000 {
+ reg = <0x0f00000 0x060000>;
+ label = "dtb-nor";
+ };
+
+ partition@760000 {
+ reg = <0x0f60000 0x020000>;
+ label = "env-nor";
+ read-only;
+ };
+
+ partition@780000 {
+ reg = <0x0f80000 0x080000>;
+ label = "u-boot-nor";
+ read-only;
+ };
+ };
+
+ board-control@1,0 {
+ compatible = "fsl,mpc8548cds-fpga";
+ reg = <0x1 0x0 0x1000>;
+ };
+};
+
+&board_soc {
+ i2c@3000 {
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ eeprom@56 {
+ compatible = "atmel,24c64";
+ reg = <0x56>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c64";
+ reg = <0x57>;
+ };
+ };
+
+ i2c@3100 {
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ enet0: ethernet@24000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <5 1 0 0>;
+ reg = <0x0>;
+ device_type = "ethernet-phy";
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <5 1 0 0>;
+ reg = <0x1>;
+ device_type = "ethernet-phy";
+ };
+ phy2: ethernet-phy@2 {
+ interrupts = <5 1 0 0>;
+ reg = <0x2>;
+ device_type = "ethernet-phy";
+ };
+ phy3: ethernet-phy@3 {
+ interrupts = <5 1 0 0>;
+ reg = <0x3>;
+ device_type = "ethernet-phy";
+ };
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet1: ethernet@25000 {
+ tbi-handle = <&tbi1>;
+ phy-handle = <&phy1>;
+ };
+
+ mdio@25520 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet2: ethernet@26000 {
+ tbi-handle = <&tbi2>;
+ phy-handle = <&phy2>;
+ };
+
+ mdio@26520 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet3: ethernet@27000 {
+ tbi-handle = <&tbi3>;
+ phy-handle = <&phy3>;
+ };
+
+ mdio@27520 {
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+};
+
+&board_pci0 {
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x4 (PCIX Slot 2) */
+ 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0x5 (PCIX Slot 3) */
+ 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+ 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
+ 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
+ 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
+
+ /* IDSEL 0x6 (PCIX Slot 4) */
+ 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+ 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x8 (PCIX Slot 5) */
+ 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0xC (Tsi310 bridge) */
+ 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0x14 (Slot 2) */
+ 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0x15 (Slot 3) */
+ 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+ 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
+ 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
+ 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
+
+ /* IDSEL 0x16 (Slot 4) */
+ 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+ 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x18 (Slot 5) */
+ 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0x1C (Tsi310 bridge PCI primary) */
+ 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
+
+ pci_bridge@1c {
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ /* IDSEL 0x00 (PrPMC Site) */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0x04 (VIA chip) */
+ 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
+ 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
+
+ /* IDSEL 0x05 (8139) */
+ 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
+
+ /* IDSEL 0x06 (Slot 6) */
+ 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
+ 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
+ 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
+ 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
+
+ /* IDESL 0x07 (Slot 7) */
+ 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
+ 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0
+ 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
+ 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>;
+
+ reg = <0xe000 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x80000>;
+ clock-frequency = <33333333>;
+
+ isa@4 {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <0x2000 0x0 0x0 0x0 0x0>;
+ ranges = <0x1 0x0 0x1000000 0x0 0x0 0x1000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ reg = <0x1 0x20 0x2
+ 0x1 0xa0 0x2
+ 0x1 0x4d0 0x2>;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+ interrupts = <0 1 0 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <0x1 0x70 0x2>;
+ };
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8548cds_32b.dts b/arch/powerpc/boot/dts/mpc8548cds_32b.dts
new file mode 100644
index 000000000000..6fd63163fc6b
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds_32b.dts
@@ -0,0 +1,86 @@
+/*
+ * MPC8548 CDS Device Tree Source (32-bit address map)
+ *
+ * Copyright 2006, 2008, 2011-2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/mpc8548si-pre.dtsi"
+
+/ {
+ model = "MPC8548CDS";
+ compatible = "MPC8548CDS", "MPC85xxCDS";
+
+ memory {
+ device_type = "memory";
+ reg = <0 0 0x0 0x8000000>; // 128M at 0x0
+ };
+
+ board_lbc: lbc: localbus@e0005000 {
+ reg = <0 0xe0005000 0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xff000000 0x01000000
+ 0x1 0x0 0x0 0xf8004000 0x00001000>;
+
+ };
+
+ board_soc: soc: soc8548@e0000000 {
+ ranges = <0 0x0 0xe0000000 0x100000>;
+ };
+
+ board_pci0: pci0: pci@e0008000 {
+ reg = <0 0xe0008000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>;
+ clock-frequency = <66666666>;
+ };
+
+ pci1: pci@e0009000 {
+ reg = <0 0xe0009000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>;
+ clock-frequency = <66666666>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ /* IDSEL 0x15 */
+ 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
+ 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
+ };
+
+ pci2: pcie@e000a000 {
+ reg = <0 0xe000a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ rio: rapidio@e00c0000 {
+ reg = <0x0 0xe00c0000 0x0 0x20000>;
+ port1 {
+ ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>;
+ };
+ };
+};
+
+/*
+ * mpc8548cds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask.
+ */
+
+/include/ "fsl/mpc8548si-post.dtsi"
+/include/ "mpc8548cds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8548cds_36b.dts b/arch/powerpc/boot/dts/mpc8548cds_36b.dts
new file mode 100644
index 000000000000..10e551b11bd6
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds_36b.dts
@@ -0,0 +1,86 @@
+/*
+ * MPC8548 CDS Device Tree Source (36-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/mpc8548si-pre.dtsi"
+
+/ {
+ model = "MPC8548CDS";
+ compatible = "MPC8548CDS", "MPC85xxCDS";
+
+ memory {
+ device_type = "memory";
+ reg = <0 0 0x0 0x8000000>; // 128M at 0x0
+ };
+
+ board_lbc: lbc: localbus@fe0005000 {
+ reg = <0xf 0xe0005000 0 0x1000>;
+
+ ranges = <0x0 0x0 0xf 0xff000000 0x01000000
+ 0x1 0x0 0xf 0xf8004000 0x00001000>;
+
+ };
+
+ board_soc: soc: soc8548@fe0000000 {
+ ranges = <0 0xf 0xe0000000 0x100000>;
+ };
+
+ board_pci0: pci0: pci@fe0008000 {
+ reg = <0xf 0xe0008000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0xf 0xe2000000 0x0 0x800000>;
+ clock-frequency = <66666666>;
+ };
+
+ pci1: pci@fe0009000 {
+ reg = <0xf 0xe0009000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x10000000 0x0 0x10000000
+ 0x1000000 0x0 0x00000000 0xf 0xe2800000 0x0 0x800000>;
+ clock-frequency = <66666666>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+
+ /* IDSEL 0x15 */
+ 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
+ 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
+ 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
+ 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
+ };
+
+ pci2: pcie@fe000a000 {
+ reg = <0xf 0xe000a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xe3000000 0x0 0x100000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ rio: rapidio@fe00c0000 {
+ reg = <0xf 0xe00c0000 0x0 0x20000>;
+ port1 {
+ ranges = <0x0 0x0 0xc 0x40000000 0x0 0x20000000>;
+ };
+ };
+};
+
+/*
+ * mpc8548cds.dtsi must be last to ensure board_pci0 overrides pci0 settings
+ * for interrupt-map & interrupt-map-mask.
+ */
+
+/include/ "fsl/mpc8548si-post.dtsi"
+/include/ "mpc8548cds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dtsi b/arch/powerpc/boot/dts/mpc8572ds.dtsi
index c3d4fac0532a..14178944e220 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8572ds.dtsi
@@ -41,37 +41,47 @@
bank-width = <2>;
device-width = <1>;
- ramdisk@0 {
+ partition@0 {
reg = <0x0 0x03000000>;
- read-only;
+ label = "ramdisk-nor";
};
- diagnostic@3000000 {
+ partition@3000000 {
reg = <0x03000000 0x00e00000>;
+ label = "diagnostic-nor";
read-only;
};
- dink@3e00000 {
+ partition@3e00000 {
reg = <0x03e00000 0x00200000>;
+ label = "dink-nor";
read-only;
};
- kernel@4000000 {
+ partition@4000000 {
reg = <0x04000000 0x00400000>;
- read-only;
+ label = "kernel-nor";
};
- jffs2@4400000 {
+ partition@4400000 {
reg = <0x04400000 0x03b00000>;
+ label = "fs-nor";
+ };
+
+ partition@7f00000 {
+ reg = <0x07f00000 0x00060000>;
+ label = "dtb-nor";
};
- dtb@7f00000 {
- reg = <0x07f00000 0x00080000>;
+ partition@7f60000 {
+ reg = <0x07f60000 0x00020000>;
+ label = "env-nor";
read-only;
};
- u-boot@7f80000 {
+ partition@7f80000 {
reg = <0x07f80000 0x00080000>;
+ label = "u-boot-nor";
read-only;
};
};
@@ -83,31 +93,35 @@
"fsl,elbc-fcm-nand";
reg = <0x2 0x0 0x40000>;
- u-boot@0 {
+ partition@0 {
reg = <0x0 0x02000000>;
+ label = "u-boot-nand";
read-only;
};
- jffs2@2000000 {
+ partition@2000000 {
reg = <0x02000000 0x10000000>;
+ label = "fs-nand";
};
- ramdisk@12000000 {
+ partition@12000000 {
reg = <0x12000000 0x08000000>;
- read-only;
+ label = "ramdisk-nand";
};
- kernel@1a000000 {
+ partition@1a000000 {
reg = <0x1a000000 0x04000000>;
+ label = "kernel-nand";
};
- dtb@1e000000 {
+ partition@1e000000 {
reg = <0x1e000000 0x01000000>;
- read-only;
+ label = "dtb-nand";
};
- empty@1f000000 {
+ partition@1f000000 {
reg = <0x1f000000 0x21000000>;
+ label = "empty-nand";
};
};
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
index d4c4a7730285..49776143a1b8 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -138,7 +138,7 @@
#size-cells = <1>;
compatible = "spansion,s25sl12801";
reg = <0>;
- spi-max-frequency = <50000000>;
+ spi-max-frequency = <40000000>;
partition@0 {
/* 1MB for u-boot Bootloader Image */
@@ -196,7 +196,7 @@
};
tbi-phy@3 {
- device-type = "tbi-phy";
+ device_type = "tbi-phy";
reg = <0x3>;
};
};
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc.dtsi b/arch/powerpc/boot/dts/p1020rdb-pc.dtsi
new file mode 100644
index 000000000000..c952cd37cf6d
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc.dtsi
@@ -0,0 +1,247 @@
+/*
+ * P1020 RDB-PC Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 7MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00700000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1100000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x01100000 0x00f00000>;
+ label = "NAND Writable User area";
+ };
+ };
+
+ L2switch@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "vitesse-7385";
+ reg = <0x2 0x0 0x20000>;
+ };
+
+ cpld@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cpld";
+ reg = <0x3 0x0 0x20000>;
+ read-only;
+ };
+};
+
+&soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "pericom,pt7c4338";
+ reg = <0x68>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+
+ partition@u-boot {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "u-boot";
+ read-only;
+ };
+
+ partition@dtb {
+ /* 512KB for DTB Image*/
+ reg = <0x00080000 0x00080000>;
+ label = "dtb";
+ };
+
+ partition@kernel {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "kernel";
+ };
+
+ partition@fs {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "file system";
+ };
+
+ partition@jffs-fs {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "file system jffs2";
+ };
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ usb@23000 {
+ phy_type = "ulpi";
+ };
+ */
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x1>;
+ };
+
+ tbi0: tbi-phy@11 {
+ device_type = "tbi-phy";
+ reg = <0x11>;
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_32b.dts b/arch/powerpc/boot/dts/p1020rdb-pc_32b.dts
new file mode 100644
index 000000000000..4de69b726dc5
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_32b.dts
@@ -0,0 +1,90 @@
+/*
+ * P1020 RDB-PC Device Tree Source (32-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020RDB-PC";
+ compatible = "fsl,P1020RDB-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000
+ 0x1 0x0 0x0 0xff800000 0x00040000
+ 0x2 0x0 0x0 0xffb00000 0x00020000
+ 0x3 0x0 0x0 0xffa00000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ reg = <0 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020rdb-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_36b.dts b/arch/powerpc/boot/dts/p1020rdb-pc_36b.dts
new file mode 100644
index 000000000000..5237da7441bc
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_36b.dts
@@ -0,0 +1,90 @@
+/*
+ * P1020 RDB-PC Device Tree Source (36-bit address map)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1020RDB-PC";
+ compatible = "fsl,P1020RDB-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+ ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+ 0x1 0x0 0xf 0xff800000 0x00040000
+ 0x2 0x0 0xf 0xffb00000 0x00040000
+ 0x3 0x0 0xf 0xffa00000 0x00020000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1020rdb-pc.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts
new file mode 100644
index 000000000000..f411515937ec
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts
@@ -0,0 +1,64 @@
+/*
+ * P1020 RDB-PC Core0 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb,
+ * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi.
+ *
+ * Please note to add "-b 0" for core0's dts compiling.
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "p1020rdb-pc_32b.dts"
+
+/ {
+ model = "fsl,P1020RDB-PC";
+ compatible = "fsl,P1020RDB-PC";
+
+ aliases {
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ serial0 = &serial0;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ PowerPC,P1020@1 {
+ status = "disabled";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ localbus@ffe05000 {
+ status = "disabled";
+ };
+
+ soc@ffe00000 {
+ serial1: serial@4600 {
+ status = "disabled";
+ };
+
+ enet0: ethernet@b0000 {
+ status = "disabled";
+ };
+
+ mpic: pic@40000 {
+ protected-sources = <
+ 42 29 30 34 /* serial1, enet0-queue-group0 */
+ 17 18 24 45 /* enet0-queue-group1, crypto */
+ >;
+ pic-no-reset;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts
new file mode 100644
index 000000000000..a91335ad82c2
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts
@@ -0,0 +1,142 @@
+/*
+ * P1020 RDB-PC Core1 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts allows core1 to have l2, eth0, crypto.
+ *
+ * Please note to add "-b 1" for core1's dts compiling.
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "p1020rdb-pc_32b.dts"
+
+/ {
+ model = "fsl,P1020RDB-PC";
+ compatible = "fsl,P1020RDB-PC";
+
+ aliases {
+ ethernet0 = &enet0;
+ serial0 = &serial1;
+ };
+
+ cpus {
+ PowerPC,P1020@0 {
+ status = "disabled";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ localbus@ffe05000 {
+ status = "disabled";
+ };
+
+ soc@ffe00000 {
+ ecm-law@0 {
+ status = "disabled";
+ };
+
+ ecm@1000 {
+ status = "disabled";
+ };
+
+ memory-controller@2000 {
+ status = "disabled";
+ };
+
+ i2c@3000 {
+ status = "disabled";
+ };
+
+ i2c@3100 {
+ status = "disabled";
+ };
+
+ serial0: serial@4500 {
+ status = "disabled";
+ };
+
+ spi@7000 {
+ status = "disabled";
+ };
+
+ gpio: gpio-controller@f000 {
+ status = "disabled";
+ };
+
+ dma@21300 {
+ status = "disabled";
+ };
+
+ mdio@24000 {
+ status = "disabled";
+ };
+
+ mdio@25000 {
+ status = "disabled";
+ };
+
+ enet1: ethernet@b1000 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@b2000 {
+ status = "disabled";
+ };
+
+ usb@22000 {
+ status = "disabled";
+ };
+
+ sdhci@2e000 {
+ status = "disabled";
+ };
+
+ mpic: pic@40000 {
+ protected-sources = <
+ 16 /* ecm, mem, L2, pci0, pci1 */
+ 43 42 59 /* i2c, serial0, spi */
+ 47 63 62 /* gpio, tdm */
+ 20 21 22 23 /* dma */
+ 03 02 /* mdio */
+ 35 36 40 /* enet1-queue-group0 */
+ 51 52 67 /* enet1-queue-group1 */
+ 31 32 33 /* enet2-queue-group0 */
+ 25 26 27 /* enet2-queue-group1 */
+ 28 72 58 /* usb, sdhci, crypto */
+ 0xb0 0xb1 0xb2 /* message */
+ 0xb3 0xb4 0xb5
+ 0xb6 0xb7
+ 0xe0 0xe1 0xe2 /* msi */
+ 0xe3 0xe4 0xe5
+ 0xe6 0xe7 /* sdhci, crypto , pci */
+ >;
+ pic-no-reset;
+ };
+
+ msi@41600 {
+ status = "disabled";
+ };
+
+ global-utilities@e0000 { //global utilities block
+ status = "disabled";
+ };
+ };
+
+ pci0: pcie@ffe09000 {
+ status = "disabled";
+ };
+
+ pci1: pcie@ffe0a000 {
+ status = "disabled";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dtsi b/arch/powerpc/boot/dts/p1020rdb.dtsi
index b5bd86f4baf2..1fb7e0e0940f 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1020rdb.dtsi
@@ -1,7 +1,7 @@
/*
* P1020 RDB Device Tree Source stub (no addresses or top-level ranges)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -190,17 +190,16 @@
usb@22000 {
phy_type = "ulpi";
+ dr_mode = "host";
};
- /* USB2 is shared with localbus, so it must be disabled
- by default. We can't put 'status = "disabled";' here
- since U-Boot doesn't clear the status property when
- it enables USB2. OTOH, U-Boot does create a new node
- when there isn't any. So, just comment it out.
+ /* USB2 is shared with localbus. It is used
+ only in case of SPI and SD boot after
+ appropriate device-tree fixup done by uboot */
usb@23000 {
phy_type = "ulpi";
+ dr_mode = "host";
};
- */
mdio@24000 {
phy0: ethernet-phy@0 {
diff --git a/arch/powerpc/boot/dts/p1021mds.dts b/arch/powerpc/boot/dts/p1021mds.dts
index d9540791e434..97116f198a37 100644
--- a/arch/powerpc/boot/dts/p1021mds.dts
+++ b/arch/powerpc/boot/dts/p1021mds.dts
@@ -1,7 +1,7 @@
/*
* P1021 MDS Device Tree Source
*
- * Copyright 2010 Freescale Semiconductor Inc.
+ * Copyright 2010,2012 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -151,6 +151,7 @@
usb@22000 {
phy_type = "ulpi";
+ dr_mode = "host";
};
mdio@24000 {
diff --git a/arch/powerpc/boot/dts/p1021rdb.dts b/arch/powerpc/boot/dts/p1021rdb.dts
new file mode 100644
index 000000000000..90b6b4caa273
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021rdb.dts
@@ -0,0 +1,96 @@
+/*
+ * P1021 RDB Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1021si-pre.dtsi"
+/ {
+ model = "fsl,P1021RDB";
+ compatible = "fsl,P1021RDB-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000
+ 0x1 0x0 0x0 0xff800000 0x00040000
+ 0x2 0x0 0x0 0xffb00000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ reg = <0 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ qe: qe@ffe80000 {
+ ranges = <0x0 0x0 0xffe80000 0x40000>;
+ reg = <0 0xffe80000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+ };
+};
+
+/include/ "p1021rdb.dtsi"
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1021rdb.dtsi b/arch/powerpc/boot/dts/p1021rdb.dtsi
new file mode 100644
index 000000000000..b973461ab751
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021rdb.dtsi
@@ -0,0 +1,236 @@
+/*
+ * P1021 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1021-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 7MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00700000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1100000 {
+ /* 15MB for User Writable Area */
+ reg = <0x01100000 0x00f00000>;
+ label = "NAND Writable User area";
+ };
+ };
+
+ L2switch@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "vitesse-7385";
+ reg = <0x2 0x0 0x20000>;
+ };
+};
+
+&soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "pericom,pt7c4338";
+ reg = <0x68>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+
+ partition@u-boot {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "SPI Flash U-Boot Image";
+ read-only;
+ };
+
+ partition@dtb {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "SPI Flash DTB Image";
+ };
+
+ partition@kernel {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "SPI Flash Linux Kernel Image";
+ };
+
+ partition@fs {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "SPI Flash Compressed RFSImage";
+ };
+
+ partition@jffs-fs {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "SPI Flash JFFS2 RFS";
+ };
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ tbi-handle = <&tbi2>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1021rdb_36b.dts b/arch/powerpc/boot/dts/p1021rdb_36b.dts
new file mode 100644
index 000000000000..ea6d8b5fa10b
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021rdb_36b.dts
@@ -0,0 +1,96 @@
+/*
+ * P1021 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1021si-pre.dtsi"
+/ {
+ model = "fsl,P1021RDB";
+ compatible = "fsl,P1021RDB-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+ ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+ 0x1 0x0 0xf 0xff800000 0x00040000
+ 0x2 0x0 0xf 0xffb00000 0x00020000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ reg = <0xf 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ qe: qe@fffe80000 {
+ ranges = <0x0 0xf 0xffe80000 0x40000>;
+ reg = <0xf 0xffe80000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+ };
+};
+
+/include/ "p1021rdb.dtsi"
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
deleted file mode 100644
index ef95717db4bc..000000000000
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * P1022 DS 36Bit Physical Address Map Device Tree Source
- *
- * Copyright 2010 Freescale Semiconductor, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/include/ "fsl/p1022si-pre.dtsi"
-/ {
- model = "fsl,P1022DS";
- compatible = "fsl,P1022DS";
-
- memory {
- device_type = "memory";
- };
-
- lbc: localbus@fffe05000 {
- reg = <0xf 0xffe05000 0 0x1000>;
- ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
- 0x1 0x0 0xf 0xe0000000 0x08000000
- 0x2 0x0 0xf 0xff800000 0x00040000
- 0x3 0x0 0xf 0xffdf0000 0x00008000>;
-
- /*
- * This node is used to access the pixis via "indirect" mode,
- * which is done by writing the pixis register index to chip
- * select 0 and the value to/from chip select 1. Indirect
- * mode is the only way to access the pixis when DIU video
- * is enabled. Note that this assumes that the first column
- * of the 'ranges' property above is the chip select number.
- */
- board-control@0,0 {
- compatible = "fsl,p1022ds-indirect-pixis";
- reg = <0x0 0x0 1 /* CS0 */
- 0x1 0x0 1>; /* CS1 */
- };
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
-
- partition@0 {
- reg = <0x0 0x03000000>;
- label = "ramdisk-nor";
- read-only;
- };
-
- partition@3000000 {
- reg = <0x03000000 0x00e00000>;
- label = "diagnostic-nor";
- read-only;
- };
-
- partition@3e00000 {
- reg = <0x03e00000 0x00200000>;
- label = "dink-nor";
- read-only;
- };
-
- partition@4000000 {
- reg = <0x04000000 0x00400000>;
- label = "kernel-nor";
- read-only;
- };
-
- partition@4400000 {
- reg = <0x04400000 0x03b00000>;
- label = "jffs2-nor";
- };
-
- partition@7f00000 {
- reg = <0x07f00000 0x00080000>;
- label = "dtb-nor";
- read-only;
- };
-
- partition@7f80000 {
- reg = <0x07f80000 0x00080000>;
- label = "u-boot-nor";
- read-only;
- };
- };
-
- nand@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,elbc-fcm-nand";
- reg = <0x2 0x0 0x40000>;
-
- partition@0 {
- reg = <0x0 0x02000000>;
- label = "u-boot-nand";
- read-only;
- };
-
- partition@2000000 {
- reg = <0x02000000 0x10000000>;
- label = "jffs2-nand";
- };
-
- partition@12000000 {
- reg = <0x12000000 0x10000000>;
- label = "ramdisk-nand";
- read-only;
- };
-
- partition@22000000 {
- reg = <0x22000000 0x04000000>;
- label = "kernel-nand";
- };
-
- partition@26000000 {
- reg = <0x26000000 0x01000000>;
- label = "dtb-nand";
- read-only;
- };
-
- partition@27000000 {
- reg = <0x27000000 0x19000000>;
- label = "reserved-nand";
- };
- };
-
- board-control@3,0 {
- compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis";
- reg = <3 0 0x30>;
- interrupt-parent = <&mpic>;
- /*
- * IRQ8 is generated if the "EVENT" switch is pressed
- * and PX_CTL[EVESEL] is set to 00.
- */
- interrupts = <8 8 0 0>;
- };
- };
-
- soc: soc@fffe00000 {
- ranges = <0x0 0xf 0xffe00000 0x100000>;
-
- i2c@3100 {
- wm8776:codec@1a {
- compatible = "wlf,wm8776";
- reg = <0x1a>;
- /*
- * clock-frequency will be set by U-Boot if
- * the clock is enabled.
- */
- };
- };
-
- spi@7000 {
- flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,s25sl12801";
- reg = <0>;
- spi-max-frequency = <40000000>; /* input clock */
-
- partition@0 {
- label = "u-boot-spi";
- reg = <0x00000000 0x00100000>;
- read-only;
- };
- partition@100000 {
- label = "kernel-spi";
- reg = <0x00100000 0x00500000>;
- read-only;
- };
- partition@600000 {
- label = "dtb-spi";
- reg = <0x00600000 0x00100000>;
- read-only;
- };
- partition@700000 {
- label = "file system-spi";
- reg = <0x00700000 0x00900000>;
- };
- };
- };
-
- ssi@15000 {
- fsl,mode = "i2s-slave";
- codec-handle = <&wm8776>;
- fsl,ssi-asynchronous;
- };
-
- usb@22000 {
- phy_type = "ulpi";
- };
-
- usb@23000 {
- status = "disabled";
- };
-
- mdio@24000 {
- phy0: ethernet-phy@0 {
- interrupts = <3 1 0 0>;
- reg = <0x1>;
- };
- phy1: ethernet-phy@1 {
- interrupts = <9 1 0 0>;
- reg = <0x2>;
- };
- tbi-phy@2 {
- device_type = "tbi-phy";
- reg = <0x2>;
- };
- };
-
- ethernet@b0000 {
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
- };
-
- ethernet@b1000 {
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
- };
- };
-
- pci0: pcie@fffe09000 {
- reg = <0xf 0xffe09000 0 0x1000>;
- ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xe0000000
- 0x2000000 0x0 0xe0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci1: pcie@fffe0a000 {
- reg = <0xf 0xffe0a000 0 0x1000>;
- ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- ranges = <0x2000000 0x0 0xe0000000
- 0x2000000 0x0 0xe0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-
- pci2: pcie@fffe0b000 {
- reg = <0xf 0xffe0b000 0 0x1000>;
- ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
- 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
- pcie@0 {
- ranges = <0x2000000 0x0 0xe0000000
- 0x2000000 0x0 0xe0000000
- 0x0 0x20000000
-
- 0x1000000 0x0 0x0
- 0x1000000 0x0 0x0
- 0x0 0x100000>;
- };
- };
-};
-
-/include/ "fsl/p1022si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
new file mode 100644
index 000000000000..7cdb505036bb
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -0,0 +1,234 @@
+/*
+ * P1022 DS Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_lbc {
+ /*
+ * This node is used to access the pixis via "indirect" mode,
+ * which is done by writing the pixis register index to chip
+ * select 0 and the value to/from chip select 1. Indirect
+ * mode is the only way to access the pixis when DIU video
+ * is enabled. Note that this assumes that the first column
+ * of the 'ranges' property above is the chip select number.
+ */
+ board-control@0,0 {
+ compatible = "fsl,p1022ds-indirect-pixis";
+ reg = <0x0 0x0 1 /* CS0 */
+ 0x1 0x0 1>; /* CS1 */
+ interrupt-parent = <&mpic>;
+ interrupts = <8 0 0 0>;
+ };
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ reg = <0x0 0x03000000>;
+ label = "ramdisk-nor";
+ read-only;
+ };
+
+ partition@3000000 {
+ reg = <0x03000000 0x00e00000>;
+ label = "diagnostic-nor";
+ read-only;
+ };
+
+ partition@3e00000 {
+ reg = <0x03e00000 0x00200000>;
+ label = "dink-nor";
+ read-only;
+ };
+
+ partition@4000000 {
+ reg = <0x04000000 0x00400000>;
+ label = "kernel-nor";
+ read-only;
+ };
+
+ partition@4400000 {
+ reg = <0x04400000 0x03b00000>;
+ label = "jffs2-nor";
+ };
+
+ partition@7f00000 {
+ reg = <0x07f00000 0x00080000>;
+ label = "dtb-nor";
+ read-only;
+ };
+
+ partition@7f80000 {
+ reg = <0x07f80000 0x00080000>;
+ label = "u-boot-nor";
+ read-only;
+ };
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ partition@0 {
+ reg = <0x0 0x02000000>;
+ label = "u-boot-nand";
+ read-only;
+ };
+
+ partition@2000000 {
+ reg = <0x02000000 0x10000000>;
+ label = "jffs2-nand";
+ };
+
+ partition@12000000 {
+ reg = <0x12000000 0x10000000>;
+ label = "ramdisk-nand";
+ read-only;
+ };
+
+ partition@22000000 {
+ reg = <0x22000000 0x04000000>;
+ label = "kernel-nand";
+ };
+
+ partition@26000000 {
+ reg = <0x26000000 0x01000000>;
+ label = "dtb-nand";
+ read-only;
+ };
+
+ partition@27000000 {
+ reg = <0x27000000 0x19000000>;
+ label = "reserved-nand";
+ };
+ };
+
+ board-control@3,0 {
+ compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis";
+ reg = <3 0 0x30>;
+ interrupt-parent = <&mpic>;
+ /*
+ * IRQ8 is generated if the "EVENT" switch is pressed
+ * and PX_CTL[EVESEL] is set to 00.
+ */
+ interrupts = <8 0 0 0>;
+ };
+};
+
+&board_soc {
+ i2c@3100 {
+ wm8776:codec@1a {
+ compatible = "wlf,wm8776";
+ reg = <0x1a>;
+ /*
+ * clock-frequency will be set by U-Boot if
+ * the clock is enabled.
+ */
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+
+ partition@0 {
+ label = "u-boot-spi";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+ partition@100000 {
+ label = "kernel-spi";
+ reg = <0x00100000 0x00500000>;
+ read-only;
+ };
+ partition@600000 {
+ label = "dtb-spi";
+ reg = <0x00600000 0x00100000>;
+ read-only;
+ };
+ partition@700000 {
+ label = "file system-spi";
+ reg = <0x00700000 0x00900000>;
+ };
+ };
+ };
+
+ ssi@15000 {
+ fsl,mode = "i2s-slave";
+ codec-handle = <&wm8776>;
+ fsl,ssi-asynchronous;
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ usb@23000 {
+ status = "disabled";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x1>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <9 1 0 0>;
+ reg = <0x2>;
+ };
+ tbi-phy@2 {
+ device_type = "tbi-phy";
+ reg = <0x2>;
+ };
+ };
+
+ ethernet@b0000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ ethernet@b1000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1022ds_32b.dts b/arch/powerpc/boot/dts/p1022ds_32b.dts
new file mode 100644
index 000000000000..d96cae00a9e3
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022ds_32b.dts
@@ -0,0 +1,103 @@
+/*
+ * P1022 DS 32-bit Physical Address Map Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1022si-pre.dtsi"
+/ {
+ model = "fsl,P1022DS";
+ compatible = "fsl,P1022DS";
+
+ memory {
+ device_type = "memory";
+ };
+
+ board_lbc: lbc: localbus@ffe05000 {
+ ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
+ 0x1 0x0 0x0 0xe0000000 0x08000000
+ 0x2 0x0 0x0 0xff800000 0x00040000
+ 0x3 0x0 0x0 0xffdf0000 0x00008000>;
+ reg = <0x0 0xffe05000 0 0x1000>;
+ };
+
+ board_soc: soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xe0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ reg = <0x0 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ ranges = <0x2000000 0x0 0xe0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
+ reg = <0 0xffe0a000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci2: pcie@ffe0b000 {
+ ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ reg = <0 0xffe0b000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "fsl/p1022si-post.dtsi"
+/include/ "p1022ds.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds_36b.dts b/arch/powerpc/boot/dts/p1022ds_36b.dts
new file mode 100644
index 000000000000..f7aacce40bf6
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022ds_36b.dts
@@ -0,0 +1,103 @@
+/*
+ * P1022 DS 36-bit Physical Address Map Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1022si-pre.dtsi"
+/ {
+ model = "fsl,P1022DS";
+ compatible = "fsl,P1022DS";
+
+ memory {
+ device_type = "memory";
+ };
+
+ board_lbc: lbc: localbus@fffe05000 {
+ ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
+ 0x1 0x0 0xf 0xe0000000 0x08000000
+ 0x2 0x0 0xf 0xff800000 0x00040000
+ 0x3 0x0 0xf 0xffdf0000 0x00008000>;
+ reg = <0xf 0xffe05000 0 0x1000>;
+ };
+
+ board_soc: soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ reg = <0xf 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci2: pcie@fffe0b000 {
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ reg = <0xf 0xffe0b000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "fsl/p1022si-post.dtsi"
+/include/ "p1022ds.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025rdb.dtsi b/arch/powerpc/boot/dts/p1025rdb.dtsi
new file mode 100644
index 000000000000..cf3676fc714b
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025rdb.dtsi
@@ -0,0 +1,286 @@
+/*
+ * P1025 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1025-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 7MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00700000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1100000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x01100000 0x00f00000>;
+ label = "NAND Writable User area";
+ };
+ };
+
+};
+
+&soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <40000000>; /* input clock */
+
+ partition@u-boot {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "u-boot";
+ read-only;
+ };
+
+ partition@dtb {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "dtb";
+ };
+
+ partition@kernel {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "kernel";
+ };
+
+ partition@fs {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "file system";
+ };
+
+ partition@jffs-fs {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "file system jffs2";
+ };
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ /* USB2 is shared with localbus, so it must be disabled
+ by default. We can't put 'status = "disabled";' here
+ since U-Boot doesn't clear the status property when
+ it enables USB2. OTOH, U-Boot does create a new node
+ when there isn't any. So, just comment it out.
+ usb@23000 {
+ phy_type = "ulpi";
+ };
+ */
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <2 1>;
+ reg = <0x1>;
+ };
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet0: ethernet@b0000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi1>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ par_io@e0100 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe0100 0x60>;
+ ranges = <0x0 0xe0100 0x60>;
+ device_type = "par_io";
+ num-ports = <3>;
+ pio1: ucc_pin@01 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
+ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
+ 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
+ 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */
+ 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
+ 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
+ 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
+ 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */
+ 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */
+ 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */
+ 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */
+ 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */
+ 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */
+ 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */
+ 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */
+ 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */
+ 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */
+ 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */
+ };
+
+ pio2: ucc_pin@02 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
+ 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
+ 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */
+ 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */
+ 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */
+ 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */
+ 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */
+ 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */
+ 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
+ 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1025rdb_32b.dts b/arch/powerpc/boot/dts/p1025rdb_32b.dts
new file mode 100644
index 000000000000..ac5729c14eda
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025rdb_32b.dts
@@ -0,0 +1,135 @@
+/*
+ * P1025 RDB Device Tree Source (32-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1021si-pre.dtsi"
+/ {
+ model = "fsl,P1025RDB";
+ compatible = "fsl,P1025RDB";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes */
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000
+ 0x1 0x0 0x0 0xff800000 0x00040000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ ranges = <0x2000000 0x0 0xe0000000 0 0xe0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ reg = <0 0xffe09000 0 0x1000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0 0xe0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ qe: qe@ffe80000 {
+ ranges = <0x0 0x0 0xffe80000 0x40000>;
+ reg = <0 0xffe80000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+ status = "disabled"; /* no firmware loaded */
+
+ enet3: ucc@2000 {
+ device_type = "network";
+ compatible = "ucc_geth";
+ rx-clock-name = "clk12";
+ tx-clock-name = "clk9";
+ pio-handle = <&pio1>;
+ phy-handle = <&qe_phy0>;
+ phy-connection-type = "mii";
+ };
+
+ mdio@2120 {
+ qe_phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <4 1 0 0>;
+ reg = <0x6>;
+ device_type = "ethernet-phy";
+ };
+ qe_phy1: ethernet-phy@03 {
+ interrupt-parent = <&mpic>;
+ interrupts = <5 1 0 0>;
+ reg = <0x3>;
+ device_type = "ethernet-phy";
+ };
+ tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet4: ucc@2400 {
+ device_type = "network";
+ compatible = "ucc_geth";
+ rx-clock-name = "none";
+ tx-clock-name = "clk13";
+ pio-handle = <&pio2>;
+ phy-handle = <&qe_phy1>;
+ phy-connection-type = "rmii";
+ };
+ };
+};
+
+/include/ "p1025rdb.dtsi"
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025rdb_36b.dts b/arch/powerpc/boot/dts/p1025rdb_36b.dts
new file mode 100644
index 000000000000..4ce4bfa0eda4
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025rdb_36b.dts
@@ -0,0 +1,88 @@
+/*
+ * P1025 RDB Device Tree Source (36-bit address map)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1021si-pre.dtsi"
+/ {
+ model = "fsl,P1025RDB";
+ compatible = "fsl,P1025RDB";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
+
+ /* NOR, NAND Flashes */
+ ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+ 0x1 0x0 0xf 0xff800000 0x00040000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xe 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1025rdb.dtsi"
+/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020ds.dtsi b/arch/powerpc/boot/dts/p2020ds.dtsi
index c1cf6cef4dd6..d3b939c573b0 100644
--- a/arch/powerpc/boot/dts/p2020ds.dtsi
+++ b/arch/powerpc/boot/dts/p2020ds.dtsi
@@ -1,7 +1,7 @@
/*
* P2020DS Device Tree Source stub (no addresses or top-level ranges)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -134,6 +134,7 @@
&board_soc {
usb@22000 {
phy_type = "ulpi";
+ dr_mode = "host";
};
mdio@24520 {
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc.dtsi b/arch/powerpc/boot/dts/p2020rdb-pc.dtsi
new file mode 100644
index 000000000000..c21d1c7d16cd
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb-pc.dtsi
@@ -0,0 +1,241 @@
+/*
+ * P2020 RDB-PC Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p2020-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 7MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00700000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1100000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x01100000 0x00f00000>;
+ label = "NAND Writable User area";
+ };
+ };
+
+ L2switch@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "vitesse-7385";
+ reg = <0x2 0x0 0x20000>;
+ };
+
+ cpld@3,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cpld";
+ reg = <0x3 0x0 0x20000>;
+ read-only;
+ };
+};
+
+&soc {
+ i2c@3000 {
+ rtc@68 {
+ compatible = "pericom,pt7c4338";
+ reg = <0x68>;
+ };
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,m25p80";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+
+ partition@0 {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "SPI U-Boot Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "SPI DTB Image";
+ };
+
+ partition@100000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "SPI Linux Kernel Image";
+ };
+
+ partition@500000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "SPI Compressed RFS Image";
+ };
+
+ partition@900000 {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "SPI JFFS2 RFS";
+ };
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+ };
+
+ mdio@25520 {
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ ptp_clock@24e00 {
+ fsl,tclk-period = <5>;
+ fsl,tmr-prsc = <200>;
+ fsl,tmr-add = <0xCCCCCCCD>;
+ fsl,tmr-fiper1 = <0x3B9AC9FB>;
+ fsl,tmr-fiper2 = <0x0001869B>;
+ fsl,max-adj = <249999999>;
+ };
+
+ enet0: ethernet@24000 {
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@25000 {
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "sgmii";
+ };
+
+ enet2: ethernet@26000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
new file mode 100644
index 000000000000..852e5b27485d
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
@@ -0,0 +1,96 @@
+/*
+ * P2020 RDB-PC 32Bit Physical Address Map Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p2020si-pre.dtsi"
+
+/ {
+ model = "fsl,P2020RDB";
+ compatible = "fsl,P2020RDB-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ /* NOR and NAND Flashes */
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000
+ 0x1 0x0 0x0 0xff800000 0x00040000
+ 0x2 0x0 0x0 0xffb00000 0x00020000
+ 0x3 0x0 0x0 0xffa00000 0x00020000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe08000 {
+ reg = <0 0xffe08000 0 0x1000>;
+ status = "disabled";
+ };
+
+ pci1: pcie@ffe09000 {
+ reg = <0 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci2: pcie@ffe0a000 {
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p2020rdb-pc.dtsi"
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
new file mode 100644
index 000000000000..b5a56ca51cf7
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
@@ -0,0 +1,96 @@
+/*
+ * P2020 RDB-PC 36Bit Physical Address Map Device Tree Source
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p2020si-pre.dtsi"
+
+/ {
+ model = "fsl,P2020RDB";
+ compatible = "fsl,P2020RDB-PC";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
+
+ /* NOR and NAND Flashes */
+ ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+ 0x1 0x0 0xf 0xff800000 0x00040000
+ 0x2 0x0 0xf 0xffb00000 0x00020000
+ 0x3 0x0 0xf 0xffa00000 0x00020000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe08000 {
+ reg = <0xf 0xffe08000 0 0x1000>;
+ status = "disabled";
+ };
+
+ pci1: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci2: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p2020rdb-pc.dtsi"
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 26759a591712..153bc76bb48e 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -1,7 +1,7 @@
/*
* P2020 RDB Device Tree Source
*
- * Copyright 2009-2011 Freescale Semiconductor Inc.
+ * Copyright 2009-2012 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -34,7 +34,7 @@
/* NOR and NAND Flashes */
ranges = <0x0 0x0 0x0 0xef000000 0x01000000
- 0x1 0x0 0x0 0xffa00000 0x00040000
+ 0x1 0x0 0x0 0xff800000 0x00040000
0x2 0x0 0x0 0xffb00000 0x00020000>;
nor@0,0 {
@@ -157,7 +157,7 @@
#size-cells = <1>;
compatible = "spansion,s25sl12801";
reg = <0>;
- spi-max-frequency = <50000000>;
+ spi-max-frequency = <40000000>;
partition@0 {
/* 512KB for u-boot Bootloader Image */
@@ -197,6 +197,7 @@
usb@22000 {
phy_type = "ulpi";
+ dr_mode = "host";
};
mdio@24520 {
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index f090e6d2907e..6761c746048d 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -144,6 +144,7 @@ tmp=$tmpdir/zImage.$$.o
ksection=.kernel:vmlinux.strip
isection=.kernel:initrd
link_address='0x400000'
+make_space=y
case "$platform" in
pseries)
@@ -210,6 +211,7 @@ ps3)
ksection=.kernel:vmlinux.bin
isection=.kernel:initrd
link_address=''
+ make_space=n
pie=
;;
ep88xc|ep405|ep8248e)
@@ -278,17 +280,19 @@ else
rm -f $vmz.$$
fi
-# Round the size to next higher MB limit
-round_size=$(((strip_size + 0xfffff) & 0xfff00000))
+if [ "$make_space" = "y" ]; then
+ # Round the size to next higher MB limit
+ round_size=$(((strip_size + 0xfffff) & 0xfff00000))
-round_size=0x$(printf "%x" $round_size)
-link_addr=$(printf "%d" $link_address)
+ round_size=0x$(printf "%x" $round_size)
+ link_addr=$(printf "%d" $link_address)
-if [ $link_addr -lt $strip_size ]; then
- echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \
- "overlaps the address of the wrapper($link_address)"
- echo "INFO: Fixing the link_address of wrapper to ($round_size)"
- link_address=$round_size
+ if [ $link_addr -lt $strip_size ]; then
+ echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \
+ "overlaps the address of the wrapper($link_address)"
+ echo "INFO: Fixing the link_address of wrapper to ($round_size)"
+ link_address=$round_size
+ fi
fi
vmz="$vmz$gzip"
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
new file mode 100644
index 000000000000..f8c51a4ab995
--- /dev/null
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -0,0 +1,257 @@
+CONFIG_PPC_85xx=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_GE_IMP3A=y
+CONFIG_QUICC_ENGINE=y
+CONFIG_QE_GPIO=y
+CONFIG_CPM2=y
+CONFIG_HIGHMEM=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HZ_1000=y
+CONFIG_PREEMPT=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_MATH_EMULATION=y
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_FORCE_MAX_ZONEORDER=17
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_LOAD_CIS is not set
+CONFIG_YENTA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_NET_PKTGEN=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
+CONFIG_DS1682=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_SIL24=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_FS_ENET=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_QE=m
+CONFIG_NVRAM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CPM=m
+CONFIG_I2C_MPC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GE_FPGA=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_LM92=y
+CONFIG_WATCHDOG=y
+CONFIG_GEF_WDT=y
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
+CONFIG_USB_STORAGE=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_MPC85XX=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_INTF_PROC is not set
+CONFIG_RTC_DRV_RX8581=y
+CONFIG_DMADEVICES=y
+CONFIG_FSL_DMA=y
+# CONFIG_NET_DMA is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_NTFS_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index d41857a5152d..da731c2fe984 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -131,6 +131,7 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GE_FPGA=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index 38303ec11bcd..2149360a1e62 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -132,6 +132,7 @@ CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GE_FPGA=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 98533973d20f..af2e8e1edba6 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -183,6 +183,8 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GE_FPGA=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM92=y
CONFIG_WATCHDOG=y
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig
deleted file mode 100644
index 27c46d679968..000000000000
--- a/arch/powerpc/configs/iseries_defconfig
+++ /dev/null
@@ -1,236 +0,0 @@
-CONFIG_PPC64=y
-CONFIG_SMP=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_AUDITSYSCALL=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_PPC_PSERIES is not set
-CONFIG_LPARCFG=y
-CONFIG_PPC_ISERIES=y
-CONFIG_VIODASD=y
-CONFIG_VIOCD=m
-CONFIG_VIOTAPE=m
-# CONFIG_PPC_PMAC is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IRQ_ALL_CPUS=y
-# CONFIG_MIGRATION is not set
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_NET_IPIP=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_EVENTS=y
-# CONFIG_NF_CT_PROTO_SCTP is not set
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TPROXY=m
-CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_OWNER=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_RATEEST=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_PROC_DEVICETREE=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_SCSI_SAS_LIBSAS=m
-CONFIG_SCSI_IBMVSCSI=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_MD_RAID10=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=y
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_E100=y
-CONFIG_ACENIC=m
-CONFIG_E1000=m
-CONFIG_ISERIES_VETH=y
-CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-CONFIG_NETCONSOLE=y
-CONFIG_NETPOLL_TRAP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_ICOM=m
-# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
-CONFIG_RAW_DRIVER=y
-# CONFIG_HWMON is not set
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_REISERFS_FS=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_GFS2_FS=m
-CONFIG_AUTOFS_FS=m
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_CIFS=m
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DLM=m
-CONFIG_CRC_T10DIF=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_DEBUG_STACK_USAGE=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 2a1320fb2723..6640a35bebb7 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -1,8 +1,8 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -13,15 +13,12 @@ CONFIG_PPC_EFIKA=y
CONFIG_PPC_LITE5200=y
CONFIG_PPC_MEDIA5200=y
CONFIG_PPC_MPC5200_BUGFIX=y
-CONFIG_PPC_MPC5200_GPIO=y
CONFIG_PPC_MPC5200_LPBFIFO=m
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_BESTCOMM=y
CONFIG_SIMPLE_GPIO=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -36,23 +33,20 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PLATRAM=y
CONFIG_MTD_UBI=m
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
@@ -61,11 +55,10 @@ CONFIG_ATA=y
CONFIG_PATA_MPC52xx=y
CONFIG_PATA_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_LXT_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FEC_MPC52xx=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_AMD_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_FIXED_PHY=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -80,11 +73,17 @@ CONFIG_SPI_GPIO=m
CONFIG_SPI_MPC52xx=m
CONFIG_SPI_MPC52xx_PSC=m
CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_LM80=y
+CONFIG_SENSORS_LM87=m
CONFIG_WATCHDOG=y
+CONFIG_MFD_SM501=m
CONFIG_DRM=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
+CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_RADEON=y
+CONFIG_FB_SM501=m
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -124,10 +123,11 @@ CONFIG_USB_STORAGE=y
CONFIG_NEW_LEDS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_RTC_DRV_PCF8563=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -145,5 +145,4 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index f37a2ab48881..5fb0c8a94811 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -1,4 +1,5 @@
CONFIG_PPC_85xx=y
+CONFIG_PHYS_64BIT=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index abdcd317cda7..fb51bc90edd2 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -1,4 +1,5 @@
CONFIG_PPC_85xx=y
+CONFIG_PHYS_64BIT=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 2156e077859b..1acf65026773 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -24,10 +24,6 @@ CONFIG_PPC_SPLPAR=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_DTL=y
-CONFIG_PPC_ISERIES=y
-CONFIG_VIODASD=y
-CONFIG_VIOCD=m
-CONFIG_VIOTAPE=m
CONFIG_PPC_MAPLE=y
CONFIG_PPC_PASEMI=y
CONFIG_PPC_PASEMI_IOMMU=y
@@ -259,7 +255,6 @@ CONFIG_PASEMI_MAC=y
CONFIG_MLX4_EN=m
CONFIG_QLGE=m
CONFIG_BE2NET=m
-CONFIG_ISERIES_VETH=m
CONFIG_PPP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
index 5ab0b71531be..9d92ba04b033 100644
--- a/arch/powerpc/include/asm/abs_addr.h
+++ b/arch/powerpc/include/asm/abs_addr.h
@@ -17,7 +17,6 @@
#include <asm/types.h>
#include <asm/page.h>
#include <asm/prom.h>
-#include <asm/firmware.h>
struct mschunks_map {
unsigned long num_chunks;
@@ -46,30 +45,12 @@ static inline unsigned long addr_to_chunk(unsigned long addr)
static inline unsigned long phys_to_abs(unsigned long pa)
{
- unsigned long chunk;
-
- /* This is a no-op on non-iSeries */
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return pa;
-
- chunk = addr_to_chunk(pa);
-
- if (chunk < mschunks_map.num_chunks)
- chunk = mschunks_map.mapping[chunk];
-
- return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
+ return pa;
}
/* Convenience macros */
#define virt_to_abs(va) phys_to_abs(__pa(va))
#define abs_to_virt(aa) __va(aa)
-/*
- * Converts Virtual Address to Real Address for
- * Legacy iSeries Hypervisor calls
- */
-#define iseries_hv_addr(virtaddr) \
- (0x8000000000000000UL | virt_to_abs(virtaddr))
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 02e41b53488d..14174e838ad9 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -212,6 +212,36 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return t;
}
+/**
+ * atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ int atomic_inc_not_zero(atomic_t *v)
+{
+ int t1, t2;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
+ cmpwi 0,%0,0\n\
+ beq- 2f\n\
+ addic %1,%0,1\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (t1), "=&r" (t2)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
+
+ return t1;
+}
+#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
@@ -467,7 +497,34 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
return t != u;
}
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+/**
+ * atomic_inc64_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+{
+ long t1, t2;
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
+ cmpdi 0,%0,0\n\
+ beq- 2f\n\
+ addic %1,%0,1\n\
+ stdcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ "\n\
+2:"
+ : "=&r" (t1), "=&r" (t2)
+ : "r" (&v->counter)
+ : "cc", "xer", "memory");
+
+ return t1;
+}
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index ad55a1ccb9fb..b9219e99bd2a 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -390,6 +390,10 @@ extern const char *powerpc_base_platform;
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC)
+#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -442,7 +446,7 @@ extern const char *powerpc_base_platform;
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
-#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2)
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
@@ -483,7 +487,7 @@ enum {
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
- CPU_FTRS_E5500 |
+ CPU_FTRS_E5500 | CPU_FTRS_E6500 |
#endif
0,
};
@@ -491,7 +495,7 @@ enum {
#ifdef __powerpc64__
#ifdef CONFIG_PPC_BOOK3E
-#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2)
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
@@ -528,7 +532,7 @@ enum {
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
- CPU_FTRS_E5500 &
+ CPU_FTRS_E5500 & CPU_FTRS_E6500 &
#endif
CPU_FTRS_POSSIBLE,
};
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index d57c08acedfc..63d5ca49cece 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -31,6 +31,9 @@ struct dev_archdata {
#ifdef CONFIG_SWIOTLB
dma_addr_t max_direct_dma_addr;
#endif
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev;
+#endif
};
struct pdev_archdata {
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index a7e06e25c708..adadb9943610 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -34,8 +34,6 @@
/* Doesn't really apply... */
#define MAX_DMA_ADDRESS (~0UL)
-#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
-
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
@@ -354,7 +352,5 @@ extern int isa_dma_bridge_buggy;
#define isa_dma_bridge_buggy (0)
#endif
-#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 66ea9b8b95c5..d60f99814ffb 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -1,6 +1,6 @@
/*
- * eeh.h
* Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
+ * Copyright 2001-2012 IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,44 +31,105 @@ struct device_node;
#ifdef CONFIG_EEH
-extern int eeh_subsystem_enabled;
+/*
+ * The struct is used to trace EEH state for the associated
+ * PCI device node or PCI device. In future, it might
+ * represent PE as well so that the EEH device to form
+ * another tree except the currently existing tree of PCI
+ * buses and PCI devices
+ */
+#define EEH_MODE_SUPPORTED (1<<0) /* EEH supported on the device */
+#define EEH_MODE_NOCHECK (1<<1) /* EEH check should be skipped */
+#define EEH_MODE_ISOLATED (1<<2) /* The device has been isolated */
+#define EEH_MODE_RECOVERING (1<<3) /* Recovering the device */
+#define EEH_MODE_IRQ_DISABLED (1<<4) /* Interrupt disabled */
+
+struct eeh_dev {
+ int mode; /* EEH mode */
+ int class_code; /* Class code of the device */
+ int config_addr; /* Config address */
+ int pe_config_addr; /* PE config address */
+ int check_count; /* Times of ignored error */
+ int freeze_count; /* Times of froze up */
+ int false_positives; /* Times of reported #ff's */
+ u32 config_space[16]; /* Saved PCI config space */
+ struct pci_controller *phb; /* Associated PHB */
+ struct device_node *dn; /* Associated device node */
+ struct pci_dev *pdev; /* Associated PCI device */
+};
+
+static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
+{
+ return edev->dn;
+}
+
+static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
+{
+ return edev->pdev;
+}
-/* Values for eeh_mode bits in device_node */
-#define EEH_MODE_SUPPORTED (1<<0)
-#define EEH_MODE_NOCHECK (1<<1)
-#define EEH_MODE_ISOLATED (1<<2)
-#define EEH_MODE_RECOVERING (1<<3)
-#define EEH_MODE_IRQ_DISABLED (1<<4)
+/*
+ * The struct is used to trace the registered EEH operation
+ * callback functions. Actually, those operation callback
+ * functions are heavily platform dependent. That means the
+ * platform should register its own EEH operation callback
+ * functions before any EEH further operations.
+ */
+#define EEH_OPT_DISABLE 0 /* EEH disable */
+#define EEH_OPT_ENABLE 1 /* EEH enable */
+#define EEH_OPT_THAW_MMIO 2 /* MMIO enable */
+#define EEH_OPT_THAW_DMA 3 /* DMA enable */
+#define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */
+#define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */
+#define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */
+#define EEH_STATE_MMIO_ACTIVE (1 << 3) /* Active MMIO */
+#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
+#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
+#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
+#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
+#define EEH_RESET_HOT 1 /* Hot reset */
+#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
+#define EEH_LOG_TEMP 1 /* EEH temporary error log */
+#define EEH_LOG_PERM 2 /* EEH permanent error log */
+
+struct eeh_ops {
+ char *name;
+ int (*init)(void);
+ int (*set_option)(struct device_node *dn, int option);
+ int (*get_pe_addr)(struct device_node *dn);
+ int (*get_state)(struct device_node *dn, int *state);
+ int (*reset)(struct device_node *dn, int option);
+ int (*wait_state)(struct device_node *dn, int max_wait);
+ int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len);
+ int (*configure_bridge)(struct device_node *dn);
+ int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
+ int (*write_config)(struct device_node *dn, int where, int size, u32 val);
+};
+
+extern struct eeh_ops *eeh_ops;
+extern int eeh_subsystem_enabled;
-/* Max number of EEH freezes allowed before we consider the device
- * to be permanently disabled. */
+/*
+ * Max number of EEH freezes allowed before we consider the device
+ * to be permanently disabled.
+ */
#define EEH_MAX_ALLOWED_FREEZES 5
+void * __devinit eeh_dev_init(struct device_node *dn, void *data);
+void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb);
+void __init eeh_dev_phb_init(void);
void __init eeh_init(void);
+#ifdef CONFIG_PPC_PSERIES
+int __init eeh_pseries_init(void);
+#endif
+int __init eeh_ops_register(struct eeh_ops *ops);
+int __exit eeh_ops_unregister(const char *name);
unsigned long eeh_check_failure(const volatile void __iomem *token,
unsigned long val);
int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
void __init pci_addr_cache_build(void);
-
-/**
- * eeh_add_device_early
- * eeh_add_device_late
- *
- * Perform eeh initialization for devices added after boot.
- * Call eeh_add_device_early before doing any i/o to the
- * device (including config space i/o). Call eeh_add_device_late
- * to finish the eeh setup for this device.
- */
void eeh_add_device_tree_early(struct device_node *);
void eeh_add_device_tree_late(struct pci_bus *);
-
-/**
- * eeh_remove_device_recursive - undo EEH for device & children.
- * @dev: pci device to be removed
- *
- * As above, this removes the device; it also removes child
- * pci devices as well.
- */
void eeh_remove_bus_device(struct pci_dev *);
/**
@@ -87,8 +148,25 @@ void eeh_remove_bus_device(struct pci_dev *);
#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
#else /* !CONFIG_EEH */
+
+static inline void *eeh_dev_init(struct device_node *dn, void *data)
+{
+ return NULL;
+}
+
+static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
+
+static inline void eeh_dev_phb_init(void) { }
+
static inline void eeh_init(void) { }
+#ifdef CONFIG_PPC_PSERIES
+static inline int eeh_pseries_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_PSERIES */
+
static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
{
return val;
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
index cc3cb04539ac..c68b012b7797 100644
--- a/arch/powerpc/include/asm/eeh_event.h
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -1,6 +1,4 @@
/*
- * eeh_event.h
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -22,32 +20,19 @@
#define ASM_POWERPC_EEH_EVENT_H
#ifdef __KERNEL__
-/** EEH event -- structure holding pci controller data that describes
- * a change in the isolation status of a PCI slot. A pointer
- * to this struct is passed as the data pointer in a notify callback.
+/*
+ * structure holding pci controller data that describes a
+ * change in the isolation status of a PCI slot. A pointer
+ * to this struct is passed as the data pointer in a notify
+ * callback.
*/
struct eeh_event {
- struct list_head list;
- struct device_node *dn; /* struct device node */
- struct pci_dev *dev; /* affected device */
+ struct list_head list; /* to form event queue */
+ struct eeh_dev *edev; /* EEH device */
};
-/**
- * eeh_send_failure_event - generate a PCI error event
- * @dev pci device
- *
- * This routine builds a PCI error event which will be delivered
- * to all listeners on the eeh_notifier_chain.
- *
- * This routine can be called within an interrupt context;
- * the actual event will be delivered in a normal context
- * (from a workqueue).
- */
-int eeh_send_failure_event (struct device_node *dn,
- struct pci_dev *dev);
-
-/* Main recovery function */
-struct pci_dn * handle_eeh_events (struct eeh_event *);
+int eeh_send_failure_event(struct eeh_dev *edev);
+struct eeh_dev *handle_eeh_events(struct eeh_event *);
#endif /* __KERNEL__ */
#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
index a9e1f4f796f6..dc7d48e3ea90 100644
--- a/arch/powerpc/include/asm/ehv_pic.h
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -25,7 +25,7 @@
struct ehv_pic {
/* The remapper for this EHV_PIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8057f4f6980f..548da3aa0a30 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -232,23 +232,30 @@ label##_hv: \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
-#define __SOFTEN_TEST(h) \
+/* This associate vector numbers with bits in paca->irq_happened */
+#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
+#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
+#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
+
+#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi r10,0; \
+ li r10,SOFTEN_VALUE_##vec; \
beq masked_##h##interrupt
-#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
+#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
#define SOFTEN_TEST_PR(vec) \
KVMTEST_PR(vec); \
- _SOFTEN_TEST(EXC_STD)
+ _SOFTEN_TEST(EXC_STD, vec)
#define SOFTEN_TEST_HV(vec) \
KVMTEST(vec); \
- _SOFTEN_TEST(EXC_HV)
+ _SOFTEN_TEST(EXC_HV, vec)
#define SOFTEN_TEST_HV_201(vec) \
KVMTEST(vec); \
- _SOFTEN_TEST(EXC_STD)
+ _SOFTEN_TEST(EXC_STD, vec)
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
HMT_MEDIUM; \
@@ -272,73 +279,55 @@ label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV)
-#ifdef CONFIG_PPC_ISERIES
-#define DISABLE_INTS \
- li r11,0; \
- stb r11,PACASOFTIRQEN(r13); \
-BEGIN_FW_FTR_SECTION; \
- stb r11,PACAHARDIRQEN(r13); \
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
- TRACE_DISABLE_INTS; \
-BEGIN_FW_FTR_SECTION; \
- mfmsr r10; \
- ori r10,r10,MSR_EE; \
- mtmsrd r10,1; \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#else
-#define DISABLE_INTS \
- li r11,0; \
- stb r11,PACASOFTIRQEN(r13); \
- stb r11,PACAHARDIRQEN(r13); \
- TRACE_DISABLE_INTS
-#endif /* CONFIG_PPC_ISERIES */
+/*
+ * Our exception common code can be passed various "additions"
+ * to specify the behaviour of interrupts, whether to kick the
+ * runlatch, etc...
+ */
+
+/* Exception addition: Hard disable interrupts */
+#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
+/* Exception addition: Keep interrupt state */
#define ENABLE_INTS \
+ ld r11,PACAKMSR(r13); \
ld r12,_MSR(r1); \
- mfmsr r11; \
rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1
-#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- DISABLE_INTS; \
- bl .save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except
+#define ADD_NVGPRS \
+ bl .save_nvgprs
+
+#define RUNLATCH_ON \
+BEGIN_FTR_SECTION \
+ clrrdi r3,r1,THREAD_SHIFT; \
+ ld r4,TI_LOCAL_FLAGS(r3); \
+ andi. r0,r4,_TLF_RUNLATCH; \
+ beql ppc64_runlatch_on_trampoline; \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
+#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \
+ .align 7; \
+ .globl label##_common; \
+label##_common: \
+ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ additions; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr; \
+ b ret
+
+#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
+ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \
+ ADD_NVGPRS;DISABLE_INTS)
/*
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
- * in the idle task and therefore need the special idle handling.
+ * in the idle task and therefore need the special idle handling
+ * (finish nap and runlatch)
*/
-#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- FINISH_NAP; \
- DISABLE_INTS; \
- bl .save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except
-
-#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
- .align 7; \
- .globl label##_common; \
-label##_common: \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
- FINISH_NAP; \
- DISABLE_INTS; \
-BEGIN_FTR_SECTION \
- bl .ppc64_runlatch_on; \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b .ret_from_except_lite
+#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
+ EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
+ FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
new file mode 100644
index 000000000000..88dbf9659185
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump.h
@@ -0,0 +1,218 @@
+/*
+ * Firmware Assisted dump header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2011 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __PPC64_FA_DUMP_H__
+#define __PPC64_FA_DUMP_H__
+
+#ifdef CONFIG_FA_DUMP
+
+/*
+ * The RMA region will be saved for later dumping when kernel crashes.
+ * RMA is Real Mode Area, the first block of logical memory address owned
+ * by logical partition, containing the storage that may be accessed with
+ * translate off.
+ */
+#define RMA_START 0x0
+#define RMA_END (ppc64_rma_size)
+
+/*
+ * On some Power systems where RMO is 128MB, it still requires minimum of
+ * 256MB for kernel to boot successfully. When kdump infrastructure is
+ * configured to save vmcore over network, we run into OOM issue while
+ * loading modules related to network setup. Hence we need aditional 64M
+ * of memory to avoid OOM issue.
+ */
+#define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \
+ + (0x1UL << 26))
+
+#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
+
+#ifndef ELF_CORE_EFLAGS
+#define ELF_CORE_EFLAGS 0
+#endif
+
+/* Firmware provided dump sections */
+#define FADUMP_CPU_STATE_DATA 0x0001
+#define FADUMP_HPTE_REGION 0x0002
+#define FADUMP_REAL_MODE_REGION 0x0011
+
+/* Dump request flag */
+#define FADUMP_REQUEST_FLAG 0x00000001
+
+/* FAD commands */
+#define FADUMP_REGISTER 1
+#define FADUMP_UNREGISTER 2
+#define FADUMP_INVALIDATE 3
+
+/* Dump status flag */
+#define FADUMP_ERROR_FLAG 0x2000
+
+#define FADUMP_CPU_ID_MASK ((1UL << 32) - 1)
+
+#define CPU_UNKNOWN (~((u32)0))
+
+/* Utility macros */
+#define SKIP_TO_NEXT_CPU(reg_entry) \
+({ \
+ while (reg_entry->reg_id != REG_ID("CPUEND")) \
+ reg_entry++; \
+ reg_entry++; \
+})
+
+/* Kernel Dump section info */
+struct fadump_section {
+ u32 request_flag;
+ u16 source_data_type;
+ u16 error_flags;
+ u64 source_address;
+ u64 source_len;
+ u64 bytes_dumped;
+ u64 destination_address;
+};
+
+/* ibm,configure-kernel-dump header. */
+struct fadump_section_header {
+ u32 dump_format_version;
+ u16 dump_num_sections;
+ u16 dump_status_flag;
+ u32 offset_first_dump_section;
+
+ /* Fields for disk dump option. */
+ u32 dd_block_size;
+ u64 dd_block_offset;
+ u64 dd_num_blocks;
+ u32 dd_offset_disk_path;
+
+ /* Maximum time allowed to prevent an automatic dump-reboot. */
+ u32 max_time_auto;
+};
+
+/*
+ * Firmware Assisted dump memory structure. This structure is required for
+ * registering future kernel dump with power firmware through rtas call.
+ *
+ * No disk dump option. Hence disk dump path string section is not included.
+ */
+struct fadump_mem_struct {
+ struct fadump_section_header header;
+
+ /* Kernel dump sections */
+ struct fadump_section cpu_state_data;
+ struct fadump_section hpte_region;
+ struct fadump_section rmr_region;
+};
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+ unsigned long cpu_state_data_size;
+ unsigned long hpte_region_size;
+ unsigned long boot_memory_size;
+ unsigned long reserve_dump_area_start;
+ unsigned long reserve_dump_area_size;
+ /* cmd line option during boot */
+ unsigned long reserve_bootvar;
+
+ unsigned long fadumphdr_addr;
+ unsigned long cpu_notes_buf;
+ unsigned long cpu_notes_buf_size;
+
+ int ibm_configure_kernel_dump;
+
+ unsigned long fadump_enabled:1;
+ unsigned long fadump_supported:1;
+ unsigned long dump_active:1;
+ unsigned long dump_registered:1;
+};
+
+/*
+ * Copy the ascii values for first 8 characters from a string into u64
+ * variable at their respective indexes.
+ * e.g.
+ * The string "FADMPINF" will be converted into 0x4641444d50494e46
+ */
+static inline u64 str_to_u64(const char *str)
+{
+ u64 val = 0;
+ int i;
+
+ for (i = 0; i < sizeof(val); i++)
+ val = (*str) ? (val << 8) | *str++ : val << 8;
+ return val;
+}
+#define STR_TO_HEX(x) str_to_u64(x)
+#define REG_ID(x) str_to_u64(x)
+
+#define FADUMP_CRASH_INFO_MAGIC STR_TO_HEX("FADMPINF")
+#define REGSAVE_AREA_MAGIC STR_TO_HEX("REGSAVE")
+
+/* The firmware-assisted dump format.
+ *
+ * The register save area is an area in the partition's memory used to preserve
+ * the register contents (CPU state data) for the active CPUs during a firmware
+ * assisted dump. The dump format contains register save area header followed
+ * by register entries. Each list of registers for a CPU starts with
+ * "CPUSTRT" and ends with "CPUEND".
+ */
+
+/* Register save area header. */
+struct fadump_reg_save_area_header {
+ u64 magic_number;
+ u32 version;
+ u32 num_cpu_offset;
+};
+
+/* Register entry. */
+struct fadump_reg_entry {
+ u64 reg_id;
+ u64 reg_value;
+};
+
+/* fadump crash info structure */
+struct fadump_crash_info_header {
+ u64 magic_number;
+ u64 elfcorehdr_addr;
+ u32 crashing_cpu;
+ struct pt_regs regs;
+ struct cpumask cpu_online_mask;
+};
+
+/* Crash memory ranges */
+#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
+
+struct fad_crash_memory_ranges {
+ unsigned long long base;
+ unsigned long long size;
+};
+
+extern int early_init_dt_scan_fw_dump(unsigned long node,
+ const char *uname, int depth, void *data);
+extern int fadump_reserve_mem(void);
+extern int setup_fadump(void);
+extern int is_fadump_active(void);
+extern void crash_fadump(struct pt_regs *, const char *);
+extern void fadump_cleanup(void);
+
+extern void vmcore_cleanup(void);
+#else /* CONFIG_FA_DUMP */
+static inline int is_fadump_active(void) { return 0; }
+static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 14db29b18d0e..ad0b751b0d78 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -41,7 +41,6 @@
#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
-#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000)
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
@@ -65,8 +64,6 @@ enum {
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
- FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
@@ -79,9 +76,6 @@ enum {
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
#endif
-#ifdef CONFIG_PPC_ISERIES
- FW_FEATURE_ISERIES_POSSIBLE |
-#endif
#ifdef CONFIG_PPC_POWERNV
FW_FEATURE_POWERNV_POSSIBLE |
#endif
@@ -99,9 +93,6 @@ enum {
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_ALWAYS &
#endif
-#ifdef CONFIG_PPC_ISERIES
- FW_FEATURE_ISERIES_ALWAYS &
-#endif
#ifdef CONFIG_PPC_POWERNV
FW_FEATURE_POWERNV_ALWAYS &
#endif
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index bebd12463ec9..ce04530d2000 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -4,7 +4,7 @@
* Authors: Jeff Brown
* Timur Tabi <timur@freescale.com>
*
- * Copyright 2004,2007 Freescale Semiconductor, Inc
+ * Copyright 2004,2007,2012 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -114,6 +114,10 @@ struct ccsr_guts_86xx {
__be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
+
+/* Alternate function signal multiplex control */
+#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
+
#ifdef CONFIG_PPC_86xx
#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index dbc264010d0b..caaf6e00630d 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-static inline void *__kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index bb712c9488b3..51010bfc792e 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -11,6 +11,27 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
+#ifdef CONFIG_PPC64
+
+/*
+ * PACA flags in paca->irq_happened.
+ *
+ * This bits are set when interrupts occur while soft-disabled
+ * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
+ * is set whenever we manually hard disable.
+ */
+#define PACA_IRQ_HARD_DIS 0x01
+#define PACA_IRQ_DBELL 0x02
+#define PACA_IRQ_EE 0x04
+#define PACA_IRQ_DEC 0x08 /* Or FIT */
+#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
+
+#endif /* CONFIG_PPC64 */
+
+#ifndef __ASSEMBLY__
+
+extern void __replay_interrupt(unsigned int vector);
+
extern void timer_interrupt(struct pt_regs *);
#ifdef CONFIG_PPC64
@@ -42,7 +63,6 @@ static inline unsigned long arch_local_irq_disable(void)
}
extern void arch_local_irq_restore(unsigned long);
-extern void iseries_handle_interrupts(void);
static inline void arch_local_irq_enable(void)
{
@@ -68,16 +88,33 @@ static inline bool arch_irqs_disabled(void)
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
#else
-#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
-#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
+#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
+#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
#endif
-#define hard_irq_disable() \
- do { \
- __hard_irq_disable(); \
- get_paca()->soft_enabled = 0; \
- get_paca()->hard_enabled = 0; \
- } while(0)
+static inline void hard_irq_disable(void)
+{
+ __hard_irq_disable();
+ get_paca()->soft_enabled = 0;
+ get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
+}
+
+/*
+ * This is called by asynchronous interrupts to conditionally
+ * re-enable hard interrupts when soft-disabled after having
+ * cleared the source of the interrupt
+ */
+static inline void may_hard_irq_enable(void)
+{
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ if (!(get_paca()->irq_happened & PACA_IRQ_EE))
+ __hard_irq_enable();
+}
+
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !regs->softe;
+}
#else /* CONFIG_PPC64 */
@@ -139,6 +176,13 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() arch_local_irq_disable()
+static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
+{
+ return !(regs->msr & MSR_EE);
+}
+
+static inline void may_hard_irq_enable(void) { }
+
#endif /* CONFIG_PPC64 */
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
@@ -149,5 +193,6 @@ static inline bool arch_irqs_disabled(void)
*/
struct irq_chip;
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
index 105ade297aad..c3fdfbd5a673 100644
--- a/arch/powerpc/include/asm/i8259.h
+++ b/arch/powerpc/include/asm/i8259.h
@@ -6,7 +6,7 @@
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void);
-extern struct irq_host *i8259_get_host(void);
+extern struct irq_domain *i8259_get_host(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index edfc9803ec91..957a83f43646 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -112,7 +112,6 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
struct dma_attrs *attrs);
extern void iommu_init_early_pSeries(void);
-extern void iommu_init_early_iSeries(void);
extern void iommu_init_early_dart(void);
extern void iommu_init_early_pasemi(void);
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index c0e1bc319e35..cf417e510736 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
@@ -26,267 +27,15 @@ extern atomic_t ppc_n_lost_interrupts;
/* This number is used when no interrupt has been assigned */
#define NO_IRQ (0)
-/* This is a special irq number to return from get_irq() to tell that
- * no interrupt happened _and_ ignore it (don't count it as bad). Some
- * platforms like iSeries rely on that.
- */
-#define NO_IRQ_IGNORE ((unsigned int)-1)
-
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
-/* Number of irqs reserved for the legacy controller */
-#define NUM_ISA_INTERRUPTS 16
-
/* Same thing, used by the generic IRQ code */
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
-/* This type is the placeholder for a hardware interrupt number. It has to
- * be big enough to enclose whatever representation is used by a given
- * platform.
- */
-typedef unsigned long irq_hw_number_t;
-
-/* Interrupt controller "host" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The host
- * structure is generally created by the PIC code for a given PIC instance
- * (though a host can cover more than one PIC if they have a flat number
- * model). It's the host callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures are fairly agnostic to the fact that
- * we use an open firmware device-tree. We do have references to struct
- * device_node in two places: in irq_find_host() to find the host matching
- * a given interrupt controller node, and of course as an argument to its
- * counterpart host->ops->match() callback. However, those are treated as
- * generic pointers by the core and the fact that it's actually a device-node
- * pointer is purely a convention between callers and implementation. This
- * code could thus be used on other architectures by replacing those two
- * by some sort of arch-specific void * "token" used to identify interrupt
- * controllers.
- */
-struct irq_host;
-struct radix_tree_root;
-
-/* Functions below are provided by the host and called whenever a new mapping
- * is created or an old mapping is disposed. The host can then proceed to
- * whatever internal data structures management is required. It also needs
- * to setup the irq_desc when returning from map().
- */
-struct irq_host_ops {
- /* Match an interrupt controller device node to a host, returns
- * 1 on a match
- */
- int (*match)(struct irq_host *h, struct device_node *node);
-
- /* Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- */
- int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
-
- /* Dispose of such a mapping */
- void (*unmap)(struct irq_host *h, unsigned int virq);
-
- /* Translate device-tree interrupt specifier from raw format coming
- * from the firmware to a irq_hw_number_t (interrupt line number) and
- * type (sense) that can be passed to set_irq_type(). In the absence
- * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
- * will return the hw number in the first cell and IRQ_TYPE_NONE for
- * the type (which amount to keeping whatever default value the
- * interrupt controller has for that line)
- */
- int (*xlate)(struct irq_host *h, struct device_node *ctrler,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
-};
-
-struct irq_host {
- struct list_head link;
-
- /* type of reverse mapping technique */
- unsigned int revmap_type;
-#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
-#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
-#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
-#define IRQ_HOST_MAP_TREE 3 /* radix tree */
- union {
- struct {
- unsigned int size;
- unsigned int *revmap;
- } linear;
- struct radix_tree_root tree;
- } revmap_data;
- struct irq_host_ops *ops;
- void *host_data;
- irq_hw_number_t inval_irq;
-
- /* Optional device node pointer */
- struct device_node *of_node;
-};
-
struct irq_data;
extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq);
-extern bool virq_is_host(unsigned int virq, struct irq_host *host);
-
-/**
- * irq_alloc_host - Allocate a new irq_host data structure
- * @of_node: optional device-tree node of the interrupt controller
- * @revmap_type: type of reverse mapping to use
- * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
- * @ops: map/unmap host callbacks
- * @inval_irq: provide a hw number in that host space that is always invalid
- *
- * Allocates and initialize and irq_host structure. Note that in the case of
- * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
- * for all legacy interrupts except 0 (which is always the invalid irq for
- * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
- * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
- * later during boot automatically (the reverse mapping will use the slow path
- * until that happens).
- */
-extern struct irq_host *irq_alloc_host(struct device_node *of_node,
- unsigned int revmap_type,
- unsigned int revmap_arg,
- struct irq_host_ops *ops,
- irq_hw_number_t inval_irq);
-
-
-/**
- * irq_find_host - Locates a host for a given device node
- * @node: device-tree node of the interrupt controller
- */
-extern struct irq_host *irq_find_host(struct device_node *node);
-
-
-/**
- * irq_set_default_host - Set a "default" host
- * @host: default host pointer
- *
- * For convenience, it's possible to set a "default" host that will be used
- * whenever NULL is passed to irq_create_mapping(). It makes life easier for
- * platforms that want to manipulate a few hard coded interrupt numbers that
- * aren't properly represented in the device-tree.
- */
-extern void irq_set_default_host(struct irq_host *host);
-
-
-/**
- * irq_set_virq_count - Set the maximum number of virt irqs
- * @count: number of linux virtual irqs, capped with NR_IRQS
- *
- * This is mainly for use by platforms like iSeries who want to program
- * the virtual irq number in the controller to avoid the reverse mapping
- */
-extern void irq_set_virq_count(unsigned int count);
-
-
-/**
- * irq_create_mapping - Map a hardware interrupt into linux virq space
- * @host: host owning this hardware interrupt or NULL for default host
- * @hwirq: hardware irq number in that host space
- *
- * Only one mapping per hardware interrupt is permitted. Returns a linux
- * virq number.
- * If the sense/trigger is to be specified, set_irq_type() should be called
- * on the number returned from that call.
- */
-extern unsigned int irq_create_mapping(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-
-/**
- * irq_dispose_mapping - Unmap an interrupt
- * @virq: linux virq number of the interrupt to unmap
- */
-extern void irq_dispose_mapping(unsigned int virq);
-
-/**
- * irq_find_mapping - Find a linux virq from an hw irq number.
- * @host: host owning this hardware interrupt
- * @hwirq: hardware irq number in that host space
- *
- * This is a slow path, for use by generic code. It's expected that an
- * irq controller implementation directly calls the appropriate low level
- * mapping function.
- */
-extern unsigned int irq_find_mapping(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-/**
- * irq_create_direct_mapping - Allocate a virq for direct mapping
- * @host: host to allocate the virq for or NULL for default host
- *
- * This routine is used for irq controllers which can choose the hardware
- * interrupt numbers they generate. In such a case it's simplest to use
- * the linux virq as the hardware interrupt number.
- */
-extern unsigned int irq_create_direct_mapping(struct irq_host *host);
-
-/**
- * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
- * @host: host owning this hardware interrupt
- * @virq: linux irq number
- * @hwirq: hardware irq number in that host space
- *
- * This is for use by irq controllers that use a radix tree reverse
- * mapping for fast lookup.
- */
-extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
- irq_hw_number_t hwirq);
-
-/**
- * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
- * @host: host owning this hardware interrupt
- * @hwirq: hardware irq number in that host space
- *
- * This is a fast path, for use by irq controller code that uses radix tree
- * revmaps
- */
-extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-/**
- * irq_linear_revmap - Find a linux virq from a hw irq number.
- * @host: host owning this hardware interrupt
- * @hwirq: hardware irq number in that host space
- *
- * This is a fast path, for use by irq controller code that uses linear
- * revmaps. It does fallback to the slow path if the revmap doesn't exist
- * yet and will create the revmap entry with appropriate locking
- */
-
-extern unsigned int irq_linear_revmap(struct irq_host *host,
- irq_hw_number_t hwirq);
-
-
-
-/**
- * irq_alloc_virt - Allocate virtual irq numbers
- * @host: host owning these new virtual irqs
- * @count: number of consecutive numbers to allocate
- * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
- *
- * This is a low level function that is used internally by irq_create_mapping()
- * and that can be used by some irq controllers implementations for things
- * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
- */
-extern unsigned int irq_alloc_virt(struct irq_host *host,
- unsigned int count,
- unsigned int hint);
-
-/**
- * irq_free_virt - Free virtual irq numbers
- * @virq: virtual irq number of the first interrupt to free
- * @count: number of interrupts to free
- *
- * This function is the opposite of irq_alloc_virt. It will not clear reverse
- * maps, this should be done previously by unmap'ing the interrupt. In fact,
- * all interrupts covered by the range being freed should have been unmapped
- * prior to calling this.
- */
-extern void irq_free_virt(unsigned int virq, unsigned int count);
/**
* irq_early_init - Init irq remapping subsystem
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b0b06d85788d..6f9b6e23dc5a 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -39,24 +39,31 @@
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
- cmpdi en,0; \
- bne 95f; \
- stb en,PACASOFTIRQEN(r13); \
- TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \
- b skip; \
-95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \
- li en,1;
-#define TRACE_AND_RESTORE_IRQ(en) \
- TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
- stb en,PACASOFTIRQEN(r13); \
-96:
+/*
+ * This is used by assembly code to soft-disable interrupts
+ */
+#define SOFT_DISABLE_INTS(__rA, __rB) \
+ lbz __rA,PACASOFTIRQEN(r13); \
+ lbz __rB,PACAIRQHAPPENED(r13); \
+ cmpwi cr0,__rA,0; \
+ li __rA,0; \
+ ori __rB,__rB,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACAIRQHAPPENED(r13); \
+ beq 44f; \
+ stb __rA,PACASOFTIRQEN(r13); \
+ TRACE_DISABLE_INTS; \
+44:
+
#else
#define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)
-#define TRACE_AND_RESTORE_IRQ(en) \
- stb en,PACASOFTIRQEN(r13)
+
+#define SOFT_DISABLE_INTS(__rA, __rB) \
+ lbz __rA,PACAIRQHAPPENED(r13); \
+ li __rB,0; \
+ ori __rA,__rA,PACA_IRQ_HARD_DIS; \
+ stb __rB,PACASOFTIRQEN(r13); \
+ stb __rA,PACAIRQHAPPENED(r13)
#endif
#endif
diff --git a/arch/powerpc/include/asm/iseries/alpaca.h b/arch/powerpc/include/asm/iseries/alpaca.h
deleted file mode 100644
index c0cce6727a69..000000000000
--- a/arch/powerpc/include/asm/iseries/alpaca.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2008 Stephen Rothwell IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_ALPACA_H
-#define _ASM_POWERPC_ISERIES_ALPACA_H
-
-/*
- * This is the part of the paca that the iSeries hypervisor
- * needs to be statically initialised. Immediately after boot
- * we switch to the normal Linux paca.
- */
-struct alpaca {
- struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
- const void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
-};
-
-#endif /* _ASM_POWERPC_ISERIES_ALPACA_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call.h b/arch/powerpc/include/asm/iseries/hv_call.h
deleted file mode 100644
index 162d653ad51f..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from the OS.
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_H
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/paca.h>
-
-/* Type of yield for HvCallBaseYieldProcessor */
-#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */
-#define HvCall_YieldToActive 1 /* Yield until all active procs have run */
-#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */
-
-/* interrupt masks for setEnabledInterrupts */
-#define HvCall_MaskIPI 0x00000001
-#define HvCall_MaskLpEvent 0x00000002
-#define HvCall_MaskLpProd 0x00000004
-#define HvCall_MaskTimeout 0x00000008
-
-/* Log buffer formats */
-#define HvCall_LogBuffer_ASCII 0
-#define HvCall_LogBuffer_EBCDIC 1
-
-#define HvCallBaseAckDeferredInts HvCallBase + 0
-#define HvCallBaseCpmPowerOff HvCallBase + 1
-#define HvCallBaseGetHwPatch HvCallBase + 2
-#define HvCallBaseReIplSpAttn HvCallBase + 3
-#define HvCallBaseSetASR HvCallBase + 4
-#define HvCallBaseSetASRAndRfi HvCallBase + 5
-#define HvCallBaseSetIMR HvCallBase + 6
-#define HvCallBaseSendIPI HvCallBase + 7
-#define HvCallBaseTerminateMachine HvCallBase + 8
-#define HvCallBaseTerminateMachineSrc HvCallBase + 9
-#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
-#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
-#define HvCallBaseSetVirtualSIT HvCallBase + 12
-#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
-#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
-#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
-#define HvCallBaseSendLpProd HvCallBase + 16
-#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
-#define HvCallBaseYieldProcessor HvCallBase + 18
-#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
-#define HvCallBaseSetVirtualDecr HvCallBase + 20
-#define HvCallBaseClearLogBuffer HvCallBase + 21
-#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
-#define HvCallBaseGetLogBufferFormat HvCallBase + 23
-#define HvCallBaseGetLogBufferLength HvCallBase + 24
-#define HvCallBaseReadLogBuffer HvCallBase + 25
-#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
-#define HvCallBaseWriteLogBuffer HvCallBase + 27
-#define HvCallBaseRouter28 HvCallBase + 28
-#define HvCallBaseRouter29 HvCallBase + 29
-#define HvCallBaseRouter30 HvCallBase + 30
-#define HvCallBaseSetDebugBus HvCallBase + 31
-
-#define HvCallCcSetDABR HvCallCc + 7
-
-static inline void HvCall_setVirtualDecr(void)
-{
- /*
- * Ignore any error return codes - most likely means that the
- * target value for the LP has been increased and this vary off
- * would bring us below the new target.
- */
- HvCall0(HvCallBaseSetVirtualDecr);
-}
-
-static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
-{
- HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm);
-}
-
-static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
-{
- HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts);
-}
-
-static inline void HvCall_setLogBufferFormatAndCodepage(int format,
- u32 codePage)
-{
- HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage);
-}
-
-extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
-
-static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
-{
- HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_event.h b/arch/powerpc/include/asm/iseries/hv_call_event.h
deleted file mode 100644
index cc029d388e11..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_event.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from the OS.
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/abs_addr.h>
-
-struct HvLpEvent;
-
-typedef u8 HvLpEvent_Type;
-typedef u8 HvLpEvent_AckInd;
-typedef u8 HvLpEvent_AckType;
-
-typedef u8 HvLpDma_Direction;
-typedef u8 HvLpDma_AddressType;
-
-typedef u64 HvLpEvent_Rc;
-typedef u64 HvLpDma_Rc;
-
-#define HvCallEventAckLpEvent HvCallEvent + 0
-#define HvCallEventCancelLpEvent HvCallEvent + 1
-#define HvCallEventCloseLpEventPath HvCallEvent + 2
-#define HvCallEventDmaBufList HvCallEvent + 3
-#define HvCallEventDmaSingle HvCallEvent + 4
-#define HvCallEventDmaToSp HvCallEvent + 5
-#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
-#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
-#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
-#define HvCallEventOpenLpEventPath HvCallEvent + 9
-#define HvCallEventSetLpEventStack HvCallEvent + 10
-#define HvCallEventSignalLpEvent HvCallEvent + 11
-#define HvCallEventSignalLpEventParms HvCallEvent + 12
-#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
-#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
-#define HvCallEventRouter15 HvCallEvent + 15
-
-static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
-{
- HvCall1(HvCallEventGetOverflowLpEvents, queueIndex);
-}
-
-static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
-{
- HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex);
-}
-
-static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
- char *eventStackAddr, u32 eventStackSize)
-{
- HvCall3(HvCallEventSetLpEventStack, queueIndex,
- virt_to_abs(eventStackAddr), eventStackSize);
-}
-
-static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
- u16 lpLogicalProcIndex)
-{
- HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
- lpLogicalProcIndex);
-}
-
-static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
-{
- return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event));
-}
-
-static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
- HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
- HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
- HvLpInstanceId targetInstanceId, u64 correlationToken,
- u64 eventData1, u64 eventData2, u64 eventData3,
- u64 eventData4, u64 eventData5)
-{
- /* Pack the misc bits into a single Dword to pass to PLIC */
- union {
- struct {
- u8 ack_and_target;
- u8 type;
- u16 subtype;
- HvLpInstanceId src_inst;
- HvLpInstanceId target_inst;
- } parms;
- u64 dword;
- } packed;
-
- packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp;
- packed.parms.type = type;
- packed.parms.subtype = subtype;
- packed.parms.src_inst = sourceInstanceId;
- packed.parms.target_inst = targetInstanceId;
-
- return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
- correlationToken, eventData1, eventData2,
- eventData3, eventData4, eventData5);
-}
-
-extern void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag);
-extern void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle);
-extern dma_addr_t iseries_hv_map(void *vaddr, size_t size,
- enum dma_data_direction direction);
-extern void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
-
-static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
-{
- return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
-}
-
-static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
-{
- return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event));
-}
-
-static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
- HvLpIndex targetLp, HvLpEvent_Type type)
-{
- return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
-}
-
-static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
- HvLpIndex targetLp, HvLpEvent_Type type)
-{
- return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
-}
-
-static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
- HvLpEvent_Type type)
-{
- HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
-}
-
-static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
- HvLpEvent_Type type)
-{
- HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
-}
-
-static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
- HvLpIndex remoteLp, HvLpDma_Direction direction,
- HvLpInstanceId localInstanceId,
- HvLpInstanceId remoteInstanceId,
- HvLpDma_AddressType localAddressType,
- HvLpDma_AddressType remoteAddressType,
- /* Do these need to be converted to absolute addresses? */
- u64 localBufList, u64 remoteBufList, u32 transferLength)
-{
- /* Pack the misc bits into a single Dword to pass to PLIC */
- union {
- struct {
- u8 flags;
- HvLpIndex remote;
- u8 type;
- u8 reserved;
- HvLpInstanceId local_inst;
- HvLpInstanceId remote_inst;
- } parms;
- u64 dword;
- } packed;
-
- packed.parms.flags = (direction << 7) |
- (localAddressType << 6) | (remoteAddressType << 5);
- packed.parms.remote = remoteLp;
- packed.parms.type = type;
- packed.parms.reserved = 0;
- packed.parms.local_inst = localInstanceId;
- packed.parms.remote_inst = remoteInstanceId;
-
- return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
- remoteBufList, transferLength);
-}
-
-static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
- u32 length, HvLpDma_Direction dir)
-{
- return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote,
- length, dir);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_sc.h b/arch/powerpc/include/asm/iseries/hv_call_sc.h
deleted file mode 100644
index f5d210959250..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_sc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
-
-#include <linux/types.h>
-
-#define HvCallBase 0x8000000000000000ul
-#define HvCallCc 0x8001000000000000ul
-#define HvCallCfg 0x8002000000000000ul
-#define HvCallEvent 0x8003000000000000ul
-#define HvCallHpt 0x8004000000000000ul
-#define HvCallPci 0x8005000000000000ul
-#define HvCallSm 0x8007000000000000ul
-#define HvCallXm 0x8009000000000000ul
-
-extern u64 HvCall0(u64);
-extern u64 HvCall1(u64, u64);
-extern u64 HvCall2(u64, u64, u64);
-extern u64 HvCall3(u64, u64, u64, u64);
-extern u64 HvCall4(u64, u64, u64, u64, u64);
-extern u64 HvCall5(u64, u64, u64, u64, u64, u64);
-extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64);
-extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64);
-
-extern u64 HvCall0Ret16(u64, void *);
-extern u64 HvCall1Ret16(u64, void *, u64);
-extern u64 HvCall2Ret16(u64, void *, u64, u64);
-extern u64 HvCall3Ret16(u64, void *, u64, u64, u64);
-extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64);
-extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
-extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
-extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_xm.h b/arch/powerpc/include/asm/iseries/hv_call_xm.h
deleted file mode 100644
index 392ac3f54df0..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_xm.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from SLIC.
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
-#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-#define HvCallXmGetTceTableParms HvCallXm + 0
-#define HvCallXmTestBus HvCallXm + 1
-#define HvCallXmConnectBusUnit HvCallXm + 2
-#define HvCallXmLoadTod HvCallXm + 8
-#define HvCallXmTestBusUnit HvCallXm + 9
-#define HvCallXmSetTce HvCallXm + 11
-#define HvCallXmSetTces HvCallXm + 13
-
-static inline void HvCallXm_getTceTableParms(u64 cb)
-{
- HvCall1(HvCallXmGetTceTableParms, cb);
-}
-
-static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
-{
- return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce);
-}
-
-static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset,
- u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
-{
- return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
- tce1, tce2, tce3, tce4);
-}
-
-static inline u64 HvCallXm_testBus(u16 busNumber)
-{
- return HvCall1(HvCallXmTestBus, busNumber);
-}
-
-static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber,
- u8 deviceId)
-{
- return HvCall2(HvCallXmTestBusUnit, busNumber,
- (subBusNumber << 8) | deviceId);
-}
-
-static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber,
- u8 deviceId, u64 interruptToken)
-{
- return HvCall5(HvCallXmConnectBusUnit, busNumber,
- (subBusNumber << 8) | deviceId, interruptToken, 0,
- 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */);
-}
-
-static inline u64 HvCallXm_loadTod(void)
-{
- return HvCall0(HvCallXmLoadTod);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_config.h b/arch/powerpc/include/asm/iseries/hv_lp_config.h
deleted file mode 100644
index a006fd1e4a2c..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_lp_config.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
-#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
-
-/*
- * This file contains the interface to the LPAR configuration data
- * to determine which resources should be allocated to each partition.
- */
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-enum {
- HvCallCfg_Cur = 0,
- HvCallCfg_Init = 1,
- HvCallCfg_Max = 2,
- HvCallCfg_Min = 3
-};
-
-#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
-#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
-#define HvCallCfgGetMsChunks HvCallCfg + 9
-#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
-#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
-#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
-#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
-#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
-
-extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
-extern HvLpIndex HvLpConfig_getLpIndex(void);
-extern HvLpIndex HvLpConfig_getPrimaryLpIndex(void);
-
-static inline u64 HvLpConfig_getMsChunks(void)
-{
- return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(),
- HvCallCfg_Cur);
-}
-
-static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
-{
- return HvCall0(HvCallCfgGetSystemPhysicalProcessors);
-}
-
-static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
-{
- return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI);
-}
-
-static inline u64 HvLpConfig_getPhysicalProcessors(void)
-{
- return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
- HvCallCfg_Cur);
-}
-
-static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
-{
- return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex());
-}
-
-static inline u64 HvLpConfig_getSharedProcUnits(void)
-{
- return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
- HvCallCfg_Cur);
-}
-
-static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
-{
- return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
- HvCallCfg_Max);
-}
-
-static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
-{
- return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
- HvCallCfg_Max);
-}
-
-static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(
- HvLpIndex lp)
-{
- /*
- * This is a new function in V5R1 so calls to this on older
- * hypervisors will return -1
- */
- u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
- if (retVal == -1)
- retVal = 0;
- return retVal;
-}
-
-static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
-{
- return HvLpConfig_getVirtualLanIndexMapForLp(
- HvLpConfig_getLpIndex_outline());
-}
-
-static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1,
- HvLpIndex lp2)
-{
- HvLpVirtualLanIndexMap virtualLanIndexMap1 =
- HvLpConfig_getVirtualLanIndexMapForLp(lp1);
- HvLpVirtualLanIndexMap virtualLanIndexMap2 =
- HvLpConfig_getVirtualLanIndexMapForLp(lp2);
- return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
-}
-
-static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
-{
- return HvCall1(HvCallCfgGetHostingLpIndex, lp);
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_event.h b/arch/powerpc/include/asm/iseries/hv_lp_event.h
deleted file mode 100644
index 8f5da7d77202..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_lp_event.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* This file contains the class for HV events in the system. */
-
-#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
-#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
-
-#include <asm/types.h>
-#include <asm/ptrace.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_call_event.h>
-
-/*
- * HvLpEvent is the structure for Lp Event messages passed between
- * partitions through PLIC.
- */
-
-struct HvLpEvent {
- u8 flags; /* Event flags x00-x00 */
- u8 xType; /* Type of message x01-x01 */
- u16 xSubtype; /* Subtype for event x02-x03 */
- u8 xSourceLp; /* Source LP x04-x04 */
- u8 xTargetLp; /* Target LP x05-x05 */
- u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */
- u8 xRc; /* RC for Ack flows x07-x07 */
- u16 xSourceInstanceId; /* Source sides instance id x08-x09 */
- u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */
- union {
- u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */
- u16 xSubtypeDataShort[2]; /* Data as 2 shorts */
- u8 xSubtypeDataChar[4]; /* Data as 4 chars */
- } x;
-
- u64 xCorrelationToken; /* Unique value for source/type x10-x17 */
-};
-
-typedef void (*LpEventHandler)(struct HvLpEvent *);
-
-/* Register a handler for an event type - returns 0 on success */
-extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType,
- LpEventHandler hdlr);
-
-/*
- * Unregister a handler for an event type
- *
- * This call will sleep until the handler being removed is guaranteed to
- * be no longer executing on any CPU. Do not call with locks held.
- *
- * returns 0 on success
- * Unregister will fail if there are any paths open for the type
- */
-extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType);
-
-/*
- * Open an Lp Event Path for an event type
- * returns 0 on success
- * openPath will fail if there is no handler registered for the event type.
- * The lpIndex specified is the partition index for the target partition
- * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
- */
-extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
-
-/*
- * Close an Lp Event Path for a type and partition
- * returns 0 on success
- */
-extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
-
-#define HvLpEvent_Type_Hypervisor 0
-#define HvLpEvent_Type_MachineFac 1
-#define HvLpEvent_Type_SessionMgr 2
-#define HvLpEvent_Type_SpdIo 3
-#define HvLpEvent_Type_VirtualBus 4
-#define HvLpEvent_Type_PciIo 5
-#define HvLpEvent_Type_RioIo 6
-#define HvLpEvent_Type_VirtualLan 7
-#define HvLpEvent_Type_VirtualIo 8
-#define HvLpEvent_Type_NumTypes 9
-
-#define HvLpEvent_Rc_Good 0
-#define HvLpEvent_Rc_BufferNotAvailable 1
-#define HvLpEvent_Rc_Cancelled 2
-#define HvLpEvent_Rc_GenericError 3
-#define HvLpEvent_Rc_InvalidAddress 4
-#define HvLpEvent_Rc_InvalidPartition 5
-#define HvLpEvent_Rc_InvalidSize 6
-#define HvLpEvent_Rc_InvalidSubtype 7
-#define HvLpEvent_Rc_InvalidSubtypeData 8
-#define HvLpEvent_Rc_InvalidType 9
-#define HvLpEvent_Rc_PartitionDead 10
-#define HvLpEvent_Rc_PathClosed 11
-#define HvLpEvent_Rc_SubtypeError 12
-
-#define HvLpEvent_Function_Ack 0
-#define HvLpEvent_Function_Int 1
-
-#define HvLpEvent_AckInd_NoAck 0
-#define HvLpEvent_AckInd_DoAck 1
-
-#define HvLpEvent_AckType_ImmediateAck 0
-#define HvLpEvent_AckType_DeferredAck 1
-
-#define HV_LP_EVENT_INT 0x01
-#define HV_LP_EVENT_DO_ACK 0x02
-#define HV_LP_EVENT_DEFERRED_ACK 0x04
-#define HV_LP_EVENT_VALID 0x80
-
-#define HvLpDma_Direction_LocalToRemote 0
-#define HvLpDma_Direction_RemoteToLocal 1
-
-#define HvLpDma_AddressType_TceIndex 0
-#define HvLpDma_AddressType_RealAddress 1
-
-#define HvLpDma_Rc_Good 0
-#define HvLpDma_Rc_Error 1
-#define HvLpDma_Rc_PartitionDead 2
-#define HvLpDma_Rc_PathClosed 3
-#define HvLpDma_Rc_InvalidAddress 4
-#define HvLpDma_Rc_InvalidLength 5
-
-static inline int hvlpevent_is_valid(struct HvLpEvent *h)
-{
- return h->flags & HV_LP_EVENT_VALID;
-}
-
-static inline void hvlpevent_invalidate(struct HvLpEvent *h)
-{
- h->flags &= ~ HV_LP_EVENT_VALID;
-}
-
-static inline int hvlpevent_is_int(struct HvLpEvent *h)
-{
- return h->flags & HV_LP_EVENT_INT;
-}
-
-static inline int hvlpevent_is_ack(struct HvLpEvent *h)
-{
- return !hvlpevent_is_int(h);
-}
-
-static inline int hvlpevent_need_ack(struct HvLpEvent *h)
-{
- return h->flags & HV_LP_EVENT_DO_ACK;
-}
-
-#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_types.h b/arch/powerpc/include/asm/iseries/hv_types.h
deleted file mode 100644
index c3e6d2a1d1c3..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_types.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
-#define _ASM_POWERPC_ISERIES_HV_TYPES_H
-
-/*
- * General typedefs for the hypervisor.
- */
-
-#include <asm/types.h>
-
-typedef u8 HvLpIndex;
-typedef u16 HvLpInstanceId;
-typedef u64 HvLpTOD;
-typedef u64 HvLpSystemSerialNum;
-typedef u8 HvLpDeviceSerialNum[12];
-typedef u16 HvLpSanHwSet;
-typedef u16 HvLpBus;
-typedef u16 HvLpBoard;
-typedef u16 HvLpCard;
-typedef u8 HvLpDeviceType[4];
-typedef u8 HvLpDeviceModel[3];
-typedef u64 HvIoToken;
-typedef u8 HvLpName[8];
-typedef u32 HvIoId;
-typedef u64 HvRealMemoryIndex;
-typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */
-typedef u16 HvLpVrmIndex;
-typedef u32 HvXmGenerationId;
-typedef u8 HvLpBusPool;
-typedef u8 HvLpSharedPoolIndex;
-typedef u16 HvLpSharedProcUnitsX100;
-typedef u8 HvLpVirtualLanIndex;
-typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */
-typedef u16 HvBusNumber; /* Hypervisor Bus Number */
-typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */
-typedef u8 HvAgentId; /* Hypervisor DevFn */
-
-
-#define HVMAXARCHITECTEDLPS 32
-#define HVMAXARCHITECTEDVIRTUALLANS 16
-#define HVMAXARCHITECTEDVIRTUALDISKS 32
-#define HVMAXARCHITECTEDVIRTUALCDROMS 8
-#define HVMAXARCHITECTEDVIRTUALTAPES 8
-#define HVCHUNKSIZE (256 * 1024)
-#define HVPAGESIZE (4 * 1024)
-#define HVLPMINMEGSPRIMARY 256
-#define HVLPMINMEGSSECONDARY 64
-#define HVCHUNKSPERMEG 4
-#define HVPAGESPERMEG 256
-#define HVPAGESPERCHUNK 64
-
-#define HvLpIndexInvalid ((HvLpIndex)0xff)
-
-/*
- * Enums for the sub-components under PLIC
- * Used in HvCall and HvPrimaryCall
- */
-enum {
- HvCallCompId = 0,
- HvCallCpuCtlsCompId = 1,
- HvCallCfgCompId = 2,
- HvCallEventCompId = 3,
- HvCallHptCompId = 4,
- HvCallPciCompId = 5,
- HvCallSlmCompId = 6,
- HvCallSmCompId = 7,
- HvCallSpdCompId = 8,
- HvCallXmCompId = 9,
- HvCallRioCompId = 10,
- HvCallRsvd3CompId = 11,
- HvCallRsvd2CompId = 12,
- HvCallRsvd1CompId = 13,
- HvCallMaxCompId = 14,
- HvPrimaryCallCompId = 0,
- HvPrimaryCallCfgCompId = 1,
- HvPrimaryCallPciCompId = 2,
- HvPrimaryCallSmCompId = 3,
- HvPrimaryCallSpdCompId = 4,
- HvPrimaryCallXmCompId = 5,
- HvPrimaryCallRioCompId = 6,
- HvPrimaryCallRsvd7CompId = 7,
- HvPrimaryCallRsvd6CompId = 8,
- HvPrimaryCallRsvd5CompId = 9,
- HvPrimaryCallRsvd4CompId = 10,
- HvPrimaryCallRsvd3CompId = 11,
- HvPrimaryCallRsvd2CompId = 12,
- HvPrimaryCallRsvd1CompId = 13,
- HvPrimaryCallMaxCompId = HvCallMaxCompId
-};
-
-struct HvLpBufferList {
- u64 addr;
- u64 len;
-};
-
-#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/arch/powerpc/include/asm/iseries/iommu.h b/arch/powerpc/include/asm/iseries/iommu.h
deleted file mode 100644
index 1b9692c60899..000000000000
--- a/arch/powerpc/include/asm/iseries/iommu.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _ASM_POWERPC_ISERIES_IOMMU_H
-#define _ASM_POWERPC_ISERIES_IOMMU_H
-
-/*
- * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
- */
-
-struct pci_dev;
-struct vio_dev;
-struct device_node;
-struct iommu_table;
-
-/* Get table parameters from HV */
-extern void iommu_table_getparms_iSeries(unsigned long busno,
- unsigned char slotno, unsigned char virtbus,
- struct iommu_table *tbl);
-
-extern struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev);
-extern void iommu_vio_init(void);
-
-#endif /* _ASM_POWERPC_ISERIES_IOMMU_H */
diff --git a/arch/powerpc/include/asm/iseries/it_lp_queue.h b/arch/powerpc/include/asm/iseries/it_lp_queue.h
deleted file mode 100644
index 428278838821..000000000000
--- a/arch/powerpc/include/asm/iseries/it_lp_queue.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
-#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
-
-/*
- * This control block defines the simple LP queue structure that is
- * shared between the hypervisor (PLIC) and the OS in order to send
- * events to an LP.
- */
-
-#include <asm/types.h>
-#include <asm/ptrace.h>
-
-#define IT_LP_MAX_QUEUES 8
-
-#define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */
-#define IT_LP_DEDICATED_IO 1 /* Queue dedicated to IO processor specified */
-#define IT_LP_DEDICATED_LP 2 /* Queue dedicated to LP specified */
-#define IT_LP_SHARED 3 /* Queue shared for both IO and LP */
-
-#define IT_LP_EVENT_STACK_SIZE 4096
-#define IT_LP_EVENT_MAX_SIZE 256
-#define IT_LP_EVENT_ALIGN 64
-
-struct hvlpevent_queue {
-/*
- * The hq_current_event is the pointer to the next event stack entry
- * that will become valid. The OS must peek at this entry to determine
- * if it is valid. PLIC will set the valid indicator as the very last
- * store into that entry.
- *
- * When the OS has completed processing of the event then it will mark
- * the event as invalid so that PLIC knows it can store into that event
- * location again.
- *
- * If the event stack fills and there are overflow events, then PLIC
- * will set the hq_overflow_pending flag in which case the OS will
- * have to fetch the additional LP events once they have drained the
- * event stack.
- *
- * The first 16-bytes are known by both the OS and PLIC. The remainder
- * of the cache line is for use by the OS.
- */
- u8 hq_overflow_pending; /* 0x00 Overflow events are pending */
- u8 hq_status; /* 0x01 DedicatedIo or DedicatedLp or NotUsed */
- u16 hq_proc_index; /* 0x02 Logical Proc Index for correlation */
- u8 hq_reserved1[12]; /* 0x04 */
- char *hq_current_event; /* 0x10 */
- char *hq_last_event; /* 0x18 */
- char *hq_event_stack; /* 0x20 */
- u8 hq_index; /* 0x28 unique sequential index. */
- u8 hq_reserved2[3]; /* 0x29-2b */
- spinlock_t hq_lock;
-};
-
-extern struct hvlpevent_queue hvlpevent_queue;
-
-extern int hvlpevent_is_pending(void);
-extern void process_hvlpevents(void);
-extern void setup_hvlpevent_queue(void);
-
-#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/arch/powerpc/include/asm/iseries/lpar_map.h b/arch/powerpc/include/asm/iseries/lpar_map.h
deleted file mode 100644
index 5e9f3e128ee2..000000000000
--- a/arch/powerpc/include/asm/iseries/lpar_map.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
-#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
-
-#ifndef __ASSEMBLY__
-
-#include <asm/types.h>
-
-#endif
-
-/*
- * The iSeries hypervisor will set up mapping for one or more
- * ESID/VSID pairs (in SLB/segment registers) and will set up
- * mappings of one or more ranges of pages to VAs.
- * We will have the hypervisor set up the ESID->VSID mapping
- * for the four kernel segments (C-F). With shared processors,
- * the hypervisor will clear all segment registers and reload
- * these four whenever the processor is switched from one
- * partition to another.
- */
-
-/* The Vsid and Esid identified below will be used by the hypervisor
- * to set up a memory mapping for part of the load area before giving
- * control to the Linux kernel. The load area is 64 MB, but this must
- * not attempt to map the whole load area. The Hashed Page Table may
- * need to be located within the load area (if the total partition size
- * is 64 MB), but cannot be mapped. Typically, this should specify
- * to map half (32 MB) of the load area.
- *
- * The hypervisor will set up page table entries for the number of
- * pages specified.
- *
- * In 32-bit mode, the hypervisor will load all four of the
- * segment registers (identified by the low-order four bits of the
- * Esid field. In 64-bit mode, the hypervisor will load one SLB
- * entry to map the Esid to the Vsid.
-*/
-
-#define HvEsidsToMap 2
-#define HvRangesToMap 1
-
-/* Hypervisor initially maps 32MB of the load area */
-#define HvPagesToMap 8192
-
-#ifndef __ASSEMBLY__
-struct LparMap {
- u64 xNumberEsids; // Number of ESID/VSID pairs
- u64 xNumberRanges; // Number of VA ranges to map
- u64 xSegmentTableOffs; // Page number within load area of seg table
- u64 xRsvd[5];
- struct {
- u64 xKernelEsid; // Esid used to map kernel load
- u64 xKernelVsid; // Vsid used to map kernel load
- } xEsids[HvEsidsToMap];
- struct {
- u64 xPages; // Number of pages to be mapped
- u64 xOffset; // Offset from start of load area
- u64 xVPN; // Virtual Page Number
- } xRanges[HvRangesToMap];
-};
-
-extern const struct LparMap xLparMap;
-
-#endif /* __ASSEMBLY__ */
-
-/* the fixed address where the LparMap exists */
-#define LPARMAP_PHYS 0x7000
-
-#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/arch/powerpc/include/asm/iseries/mf.h b/arch/powerpc/include/asm/iseries/mf.h
deleted file mode 100644
index eb851a9c9e5c..000000000000
--- a/arch/powerpc/include/asm/iseries/mf.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
- * Copyright (C) 2004 Stephen Rothwell IBM Corporation
- *
- * This modules exists as an interface between a Linux secondary partition
- * running on an iSeries and the primary partition's Virtual Service
- * Processor (VSP) object. The VSP has final authority over powering on/off
- * all partitions in the iSeries. It also provides miscellaneous low-level
- * machine facility type operations.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ASM_POWERPC_ISERIES_MF_H
-#define _ASM_POWERPC_ISERIES_MF_H
-
-#include <linux/types.h>
-
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_call_event.h>
-
-struct rtc_time;
-
-typedef void (*MFCompleteHandler)(void *clientToken, int returnCode);
-
-extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
- unsigned size, unsigned amount, MFCompleteHandler hdlr,
- void *userToken);
-extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
- unsigned count, MFCompleteHandler hdlr, void *userToken);
-
-extern void mf_power_off(void);
-extern void mf_reboot(char *cmd);
-
-extern void mf_display_src(u32 word);
-extern void mf_display_progress(u16 value);
-
-extern void mf_init(void);
-
-#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/arch/powerpc/include/asm/iseries/vio.h b/arch/powerpc/include/asm/iseries/vio.h
deleted file mode 100644
index f9ac0d00b951..000000000000
--- a/arch/powerpc/include/asm/iseries/vio.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* -*- linux-c -*-
- *
- * iSeries Virtual I/O Message Path header
- *
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- *
- * (C) Copyright 2000 IBM Corporation
- *
- * This header file is used by the iSeries virtual I/O device
- * drivers. It defines the interfaces to the common functions
- * (implemented in drivers/char/viopath.h) as well as defining
- * common functions and structures. Currently (at the time I
- * wrote this comment) the iSeries virtual I/O device drivers
- * that use this are
- * drivers/block/viodasd.c
- * drivers/char/viocons.c
- * drivers/char/viotape.c
- * drivers/cdrom/viocd.c
- *
- * The iSeries virtual ethernet support (veth.c) uses a whole
- * different set of functions.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) anyu later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#ifndef _ASM_POWERPC_ISERIES_VIO_H
-#define _ASM_POWERPC_ISERIES_VIO_H
-
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-
-/*
- * iSeries virtual I/O events use the subtype field in
- * HvLpEvent to figure out what kind of vio event is coming
- * in. We use a table to route these, and this defines
- * the maximum number of distinct subtypes
- */
-#define VIO_MAX_SUBTYPES 8
-
-#define VIOMAXBLOCKDMA 12
-
-struct open_data {
- u64 disk_size;
- u16 max_disk;
- u16 cylinders;
- u16 tracks;
- u16 sectors;
- u16 bytes_per_sector;
-};
-
-struct rw_data {
- u64 offset;
- struct {
- u32 token;
- u32 reserved;
- u64 len;
- } dma_info[VIOMAXBLOCKDMA];
-};
-
-struct vioblocklpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 sub_result;
- u16 disk;
- u16 flags;
- union {
- struct open_data open_data;
- struct rw_data rw_data;
- u64 changed;
- } u;
-};
-
-#define vioblockflags_ro 0x0001
-
-enum vioblocksubtype {
- vioblockopen = 0x0001,
- vioblockclose = 0x0002,
- vioblockread = 0x0003,
- vioblockwrite = 0x0004,
- vioblockflush = 0x0005,
- vioblockcheck = 0x0007
-};
-
-struct viocdlpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 sub_result;
- u16 disk;
- u16 flags;
- u32 token;
- u64 offset; /* On open, max number of disks */
- u64 len; /* On open, size of the disk */
- u32 block_size; /* Only set on open */
- u32 media_size; /* Only set on open */
-};
-
-enum viocdsubtype {
- viocdopen = 0x0001,
- viocdclose = 0x0002,
- viocdread = 0x0003,
- viocdwrite = 0x0004,
- viocdlockdoor = 0x0005,
- viocdgetinfo = 0x0006,
- viocdcheck = 0x0007
-};
-
-struct viotapelpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 sub_type_result;
- u16 tape;
- u16 flags;
- u32 token;
- u64 len;
- union {
- struct {
- u32 tape_op;
- u32 count;
- } op;
- struct {
- u32 type;
- u32 resid;
- u32 dsreg;
- u32 gstat;
- u32 erreg;
- u32 file_no;
- u32 block_no;
- } get_status;
- struct {
- u32 block_no;
- } get_pos;
- } u;
-};
-
-enum viotapesubtype {
- viotapeopen = 0x0001,
- viotapeclose = 0x0002,
- viotaperead = 0x0003,
- viotapewrite = 0x0004,
- viotapegetinfo = 0x0005,
- viotapeop = 0x0006,
- viotapegetpos = 0x0007,
- viotapesetpos = 0x0008,
- viotapegetstatus = 0x0009
-};
-
-/*
- * Each subtype can register a handler to process their events.
- * The handler must have this interface.
- */
-typedef void (vio_event_handler_t) (struct HvLpEvent * event);
-
-extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
-extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
-extern int vio_setHandler(int subtype, vio_event_handler_t * beh);
-extern int vio_clearHandler(int subtype);
-extern int viopath_isactive(HvLpIndex lp);
-extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
-extern HvLpInstanceId viopath_targetinst(HvLpIndex lp);
-extern void vio_set_hostlp(void);
-extern void *vio_get_event_buffer(int subtype);
-extern void vio_free_event_buffer(int subtype, void *buffer);
-
-extern struct vio_dev *vio_create_viodasd(u32 unit);
-
-extern HvLpIndex viopath_hostLp;
-extern HvLpIndex viopath_ourLp;
-
-#define VIOCHAR_MAX_DATA 200
-
-#define VIOMAJOR_SUBTYPE_MASK 0xff00
-#define VIOMINOR_SUBTYPE_MASK 0x00ff
-#define VIOMAJOR_SUBTYPE_SHIFT 8
-
-#define VIOVERSION 0x0101
-
-/*
- * This is the general structure for VIO errors; each module should have
- * a table of them, and each table should be terminated by an entry of
- * { 0, 0, NULL }. Then, to find a specific error message, a module
- * should pass its local table and the return code.
- */
-struct vio_error_entry {
- u16 rc;
- int errno;
- const char *msg;
-};
-extern const struct vio_error_entry *vio_lookup_rc(
- const struct vio_error_entry *local_table, u16 rc);
-
-enum viosubtypes {
- viomajorsubtype_monitor = 0x0100,
- viomajorsubtype_blockio = 0x0200,
- viomajorsubtype_chario = 0x0300,
- viomajorsubtype_config = 0x0400,
- viomajorsubtype_cdio = 0x0500,
- viomajorsubtype_tape = 0x0600,
- viomajorsubtype_scsi = 0x0700
-};
-
-enum vioconfigsubtype {
- vioconfigget = 0x0001,
-};
-
-enum viorc {
- viorc_good = 0x0000,
- viorc_noConnection = 0x0001,
- viorc_noReceiver = 0x0002,
- viorc_noBufferAvailable = 0x0003,
- viorc_invalidMessageType = 0x0004,
- viorc_invalidRange = 0x0201,
- viorc_invalidToken = 0x0202,
- viorc_DMAError = 0x0203,
- viorc_useError = 0x0204,
- viorc_releaseError = 0x0205,
- viorc_invalidDisk = 0x0206,
- viorc_openRejected = 0x0301
-};
-
-/*
- * The structure of the events that flow between us and OS/400 for chario
- * events. You can't mess with this unless the OS/400 side changes too.
- */
-struct viocharlpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 subtype_result_code;
- u8 virtual_device;
- u8 len;
- u8 data[VIOCHAR_MAX_DATA];
-};
-
-#define VIOCHAR_WINDOW 10
-
-enum viocharsubtype {
- viocharopen = 0x0001,
- viocharclose = 0x0002,
- viochardata = 0x0003,
- viocharack = 0x0004,
- viocharconfig = 0x0005
-};
-
-enum viochar_rc {
- viochar_rc_ebusy = 1
-};
-
-#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 938986e412f1..ae098c438f00 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -17,7 +17,7 @@
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
"nop\n\t"
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
index fc195d0b3c34..2156315d8a90 100644
--- a/arch/powerpc/include/asm/keylargo.h
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -21,7 +21,7 @@
#define KEYLARGO_FCR4 0x48
#define KEYLARGO_FCR5 0x4c /* Pangea only */
-/* K2 aditional FCRs */
+/* K2 additional FCRs */
#define K2_FCR6 0x34
#define K2_FCR7 0x30
#define K2_FCR8 0x2c
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index f7727d91ac6b..b921c3f48928 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -265,12 +265,9 @@ struct kvm_debug_exit_arch {
struct kvm_guest_debug_arch {
};
-#define KVM_REG_MASK 0x001f
-#define KVM_REG_EXT_MASK 0xffe0
-#define KVM_REG_GPR 0x0000
-#define KVM_REG_FPR 0x0020
-#define KVM_REG_QPR 0x0040
-#define KVM_REG_FQPR 0x0060
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
#define KVM_INTERRUPT_SET -1U
#define KVM_INTERRUPT_UNSET -2U
@@ -292,4 +289,41 @@ struct kvm_allocate_rma {
__u64 rma_size;
};
+struct kvm_book3e_206_tlb_entry {
+ __u32 mas8;
+ __u32 mas1;
+ __u64 mas2;
+ __u64 mas7_3;
+};
+
+struct kvm_book3e_206_tlb_params {
+ /*
+ * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
+ *
+ * - The number of ways of TLB0 must be a power of two between 2 and
+ * 16.
+ * - TLB1 must be fully associative.
+ * - The size of TLB0 must be a multiple of the number of ways, and
+ * the number of sets must be a power of two.
+ * - The size of TLB1 may not exceed 64 entries.
+ * - TLB0 supports 4 KiB pages.
+ * - The page sizes supported by TLB1 are as indicated by
+ * TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
+ * as returned by KVM_GET_SREGS.
+ * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
+ * and tlb_ways[] must be zero.
+ *
+ * tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
+ *
+ * KVM will adjust TLBnCFG based on the sizes configured here,
+ * though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
+ * set to zero.
+ */
+ __u32 tlb_sizes[4];
+ __u32 tlb_ways[4];
+ __u32 reserved[8];
+};
+
+#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 69c7377d2071..aa795ccef294 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -90,6 +90,8 @@ struct kvmppc_vcpu_book3s {
#endif
int context_id[SID_CONTEXTS];
+ bool hior_explicit; /* HIOR is set by ioctl, not PVR */
+
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
@@ -119,6 +121,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
+ struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned long status);
+extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
+ unsigned long slb_v, unsigned long valid);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
@@ -138,6 +145,21 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
+ unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+ unsigned long pte_index);
+void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+ unsigned long pte_index);
+extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
+ unsigned long *nb_ret);
+extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
+extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel);
+extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel);
+extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
@@ -183,7 +205,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
if ( num < 14 ) {
- to_svcpu(vcpu)->gpr[num] = val;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->gpr[num] = val;
+ svcpu_put(svcpu);
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
} else
vcpu->arch.gpr[num] = val;
@@ -191,80 +215,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
- if ( num < 14 )
- return to_svcpu(vcpu)->gpr[num];
- else
+ if ( num < 14 ) {
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong r = svcpu->gpr[num];
+ svcpu_put(svcpu);
+ return r;
+ } else
return vcpu->arch.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- to_svcpu(vcpu)->cr = val;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->cr = val;
+ svcpu_put(svcpu);
to_book3s(vcpu)->shadow_vcpu->cr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->cr;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 r;
+ r = svcpu->cr;
+ svcpu_put(svcpu);
+ return r;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
{
- to_svcpu(vcpu)->xer = val;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->xer = val;
to_book3s(vcpu)->shadow_vcpu->xer = val;
+ svcpu_put(svcpu);
}
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->xer;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 r;
+ r = svcpu->xer;
+ svcpu_put(svcpu);
+ return r;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
- to_svcpu(vcpu)->ctr = val;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->ctr = val;
+ svcpu_put(svcpu);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->ctr;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong r;
+ r = svcpu->ctr;
+ svcpu_put(svcpu);
+ return r;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
- to_svcpu(vcpu)->lr = val;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->lr = val;
+ svcpu_put(svcpu);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->lr;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong r;
+ r = svcpu->lr;
+ svcpu_put(svcpu);
+ return r;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
- to_svcpu(vcpu)->pc = val;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->pc = val;
+ svcpu_put(svcpu);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->pc;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong r;
+ r = svcpu->pc;
+ svcpu_put(svcpu);
+ return r;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
ulong pc = kvmppc_get_pc(vcpu);
- struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 r;
/* Load the instruction manually if it failed to do so in the
* exit path */
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
- return svcpu->last_inst;
+ r = svcpu->last_inst;
+ svcpu_put(svcpu);
+ return r;
}
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
- return to_svcpu(vcpu)->fault_dar;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong r;
+ r = svcpu->fault_dar;
+ svcpu_put(svcpu);
+ return r;
}
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index de604db135f5..38040ff82063 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -20,11 +20,15 @@
#ifndef __ASM_KVM_BOOK3S_32_H__
#define __ASM_KVM_BOOK3S_32_H__
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
return to_book3s(vcpu)->shadow_vcpu;
}
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+}
+
#define PTE_SIZE 12
#define VSID_ALL 0
#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d0ac94f98f9e..b0c08b142770 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -21,14 +21,56 @@
#define __ASM_KVM_BOOK3S_64_H__
#ifdef CONFIG_KVM_BOOK3S_PR
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
+static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
{
+ preempt_disable();
return &get_paca()->shadow_vcpu;
}
+
+static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
+{
+ preempt_enable();
+}
#endif
#define SPAPR_TCE_SHIFT 12
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+#endif
+
+#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
+
+/*
+ * We use a lock bit in HPTE dword 0 to synchronize updates and
+ * accesses to each HPTE, and another bit to indicate non-present
+ * HPTEs.
+ */
+#define HPTE_V_HVLOCK 0x40UL
+#define HPTE_V_ABSENT 0x20UL
+
+static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
+{
+ unsigned long tmp, old;
+
+ asm volatile(" ldarx %0,0,%2\n"
+ " and. %1,%0,%3\n"
+ " bne 2f\n"
+ " ori %0,%0,%4\n"
+ " stdcx. %0,0,%2\n"
+ " beq+ 2f\n"
+ " li %1,%3\n"
+ "2: isync"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "cc", "memory");
+ return old == 0;
+}
+
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
@@ -62,4 +104,140 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
return rb;
}
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+ /* only handle 4k, 64k and 16M pages for now */
+ if (!(h & HPTE_V_LARGE))
+ return 1ul << 12; /* 4k page */
+ if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
+ return 1ul << 16; /* 64k page */
+ if ((l & 0xff000) == 0)
+ return 1ul << 24; /* 16M page */
+ return 0; /* error */
+}
+
+static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
+{
+ return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
+}
+
+static inline int hpte_is_writable(unsigned long ptel)
+{
+ unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
+
+ return pp != PP_RXRX && pp != PP_RXXX;
+}
+
+static inline unsigned long hpte_make_readonly(unsigned long ptel)
+{
+ if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
+ ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
+ else
+ ptel |= PP_RXRX;
+ return ptel;
+}
+
+static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
+{
+ unsigned int wimg = ptel & HPTE_R_WIMG;
+
+ /* Handle SAO */
+ if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
+ cpu_has_feature(CPU_FTR_ARCH_206))
+ wimg = HPTE_R_M;
+
+ if (!io_type)
+ return wimg == HPTE_R_M;
+
+ return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
+}
+
+/*
+ * Lock and read a linux PTE. If it's present and writable, atomically
+ * set dirty and referenced bits and return the PTE, otherwise return 0.
+ */
+static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
+{
+ pte_t pte, tmp;
+
+ /* wait until _PAGE_BUSY is clear then set it atomically */
+ __asm__ __volatile__ (
+ "1: ldarx %0,0,%3\n"
+ " andi. %1,%0,%4\n"
+ " bne- 1b\n"
+ " ori %1,%0,%4\n"
+ " stdcx. %1,0,%3\n"
+ " bne- 1b"
+ : "=&r" (pte), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "i" (_PAGE_BUSY)
+ : "cc");
+
+ if (pte_present(pte)) {
+ pte = pte_mkyoung(pte);
+ if (writing && pte_write(pte))
+ pte = pte_mkdirty(pte);
+ }
+
+ *p = pte; /* clears _PAGE_BUSY */
+
+ return pte;
+}
+
+/* Return HPTE cache control bits corresponding to Linux pte bits */
+static inline unsigned long hpte_cache_bits(unsigned long pte_val)
+{
+#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
+ return pte_val & (HPTE_R_W | HPTE_R_I);
+#else
+ return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
+ ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
+#endif
+}
+
+static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
+{
+ if (key)
+ return PP_RWRX <= pp && pp <= PP_RXRX;
+ return 1;
+}
+
+static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
+{
+ if (key)
+ return pp == PP_RWRW;
+ return pp <= PP_RWRW;
+}
+
+static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
+{
+ unsigned long skey;
+
+ skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
+ ((hpte_r & HPTE_R_KEY_LO) >> 9);
+ return (amr >> (62 - 2 * skey)) & 3;
+}
+
+static inline void lock_rmap(unsigned long *rmap)
+{
+ do {
+ while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
+ cpu_relax();
+ } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
+}
+
+static inline void unlock_rmap(unsigned long *rmap)
+{
+ __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
+}
+
+static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
+ unsigned long pagesize)
+{
+ unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
+
+ if (pagesize <= PAGE_SIZE)
+ return 1;
+ return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
+}
+
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index adbfca9dd100..8cd50a514271 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -22,46 +22,55 @@
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-struct tlbe{
- u32 mas1;
- u32 mas2;
- u32 mas3;
- u32 mas7;
-};
-
#define E500_TLB_VALID 1
#define E500_TLB_DIRTY 2
-struct tlbe_priv {
+struct tlbe_ref {
pfn_t pfn;
unsigned int flags; /* E500_TLB_* */
};
+struct tlbe_priv {
+ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+};
+
struct vcpu_id_table;
+struct kvmppc_e500_tlb_params {
+ int entries, ways, sets;
+};
+
struct kvmppc_vcpu_e500 {
- /* Unmodified copy of the guest's TLB. */
- struct tlbe *gtlb_arch[E500_TLB_NUM];
+ /* Unmodified copy of the guest's TLB -- shared with host userspace. */
+ struct kvm_book3e_206_tlb_entry *gtlb_arch;
+
+ /* Starting entry number in gtlb_arch[] */
+ int gtlb_offset[E500_TLB_NUM];
/* KVM internal information associated with each guest TLB entry */
struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
- unsigned int gtlb_size[E500_TLB_NUM];
+ struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
+
unsigned int gtlb_nv[E500_TLB_NUM];
+ /*
+ * information associated with each host TLB entry --
+ * TLB1 only for now. If/when guest TLB1 entries can be
+ * mapped with host TLB0, this will be used for that too.
+ *
+ * We don't want to use this for guest TLB0 because then we'd
+ * have the overhead of doing the translation again even if
+ * the entry is still in the guest TLB (e.g. we swapped out
+ * and back, and our host TLB entries got evicted).
+ */
+ struct tlbe_ref *tlb_refs[E500_TLB_NUM];
+ unsigned int host_tlb1_nv;
+
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
u32 svr;
- u32 mas0;
- u32 mas1;
- u32 mas2;
- u32 mas3;
- u32 mas4;
- u32 mas5;
- u32 mas6;
- u32 mas7;
-
/* vcpu id table */
struct vcpu_id_table *idt;
@@ -73,6 +82,9 @@ struct kvmppc_vcpu_e500 {
u32 tlb1cfg;
u64 mcar;
+ struct page **shared_tlb_pages;
+ int num_shared_tlb_pages;
+
struct kvm_vcpu vcpu;
};
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bf8af5d5d5dc..52eb9c1f4fe0 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -32,17 +32,32 @@
#include <linux/atomic.h>
#include <asm/kvm_asm.h>
#include <asm/processor.h>
+#include <asm/page.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+#include <linux/mmu_notifier.h>
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+
+struct kvm;
+extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
+#endif
+
/* We don't currently support large pages. */
#define KVM_HPAGE_GFN_SHIFT(x) 0
#define KVM_NR_PAGE_SIZES 1
@@ -158,34 +173,72 @@ struct kvmppc_spapr_tce_table {
struct page *pages[0];
};
-struct kvmppc_rma_info {
+struct kvmppc_linear_info {
void *base_virt;
unsigned long base_pfn;
unsigned long npages;
struct list_head list;
- atomic_t use_count;
+ atomic_t use_count;
+ int type;
+};
+
+/*
+ * The reverse mapping array has one entry for each HPTE,
+ * which stores the guest's view of the second word of the HPTE
+ * (including the guest physical address of the mapping),
+ * plus forward and backward pointers in a doubly-linked ring
+ * of HPTEs that map the same host page. The pointers in this
+ * ring are 32-bit HPTE indexes, to save space.
+ */
+struct revmap_entry {
+ unsigned long guest_rpte;
+ unsigned int forw, back;
+};
+
+/*
+ * We use the top bit of each memslot->rmap entry as a lock bit,
+ * and bit 32 as a present flag. The bottom 32 bits are the
+ * index in the guest HPT of a HPTE that points to the page.
+ */
+#define KVMPPC_RMAP_LOCK_BIT 63
+#define KVMPPC_RMAP_RC_SHIFT 32
+#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_PRESENT 0x100000000ul
+#define KVMPPC_RMAP_INDEX 0xfffffffful
+
+/* Low-order bits in kvm->arch.slot_phys[][] */
+#define KVMPPC_PAGE_ORDER_MASK 0x1f
+#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
+#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
+#define KVMPPC_GOT_PAGE 0x80
+
+struct kvm_arch_memory_slot {
};
struct kvm_arch {
#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt;
- unsigned long ram_npages;
- unsigned long ram_psize;
- unsigned long ram_porder;
- struct kvmppc_pginfo *ram_pginfo;
+ struct revmap_entry *revmap;
unsigned int lpid;
unsigned int host_lpid;
unsigned long host_lpcr;
unsigned long sdr1;
unsigned long host_sdr1;
int tlbie_lock;
- int n_rma_pages;
unsigned long lpcr;
unsigned long rmor;
- struct kvmppc_rma_info *rma;
+ struct kvmppc_linear_info *rma;
+ unsigned long vrma_slb_v;
+ int rma_setup_done;
+ int using_mmu_notifiers;
struct list_head spapr_tce_tables;
+ spinlock_t slot_phys_lock;
+ unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
+ int slot_npages[KVM_MEM_SLOTS_NUM];
unsigned short last_vcpu[NR_CPUS];
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+ struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
};
@@ -318,10 +371,6 @@ struct kvm_vcpu_arch {
u32 vrsave; /* also USPRG0 */
u32 mmucr;
ulong shadow_msr;
- ulong sprg4;
- ulong sprg5;
- ulong sprg6;
- ulong sprg7;
ulong csrr0;
ulong csrr1;
ulong dsrr0;
@@ -329,16 +378,14 @@ struct kvm_vcpu_arch {
ulong mcsrr0;
ulong mcsrr1;
ulong mcsr;
- ulong esr;
u32 dec;
u32 decar;
u32 tbl;
u32 tbu;
u32 tcr;
- u32 tsr;
+ ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
u32 ivor[64];
ulong ivpr;
- u32 pir;
u32 pvr;
u32 shadow_pid;
@@ -427,9 +474,14 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_KVM_BOOK3S_64_HV
struct kvm_vcpu_arch_shared shregs;
+ unsigned long pgfault_addr;
+ long pgfault_index;
+ unsigned long pgfault_hpte[2];
+
struct list_head run_list;
struct task_struct *run_task;
struct kvm_run *kvm_run;
+ pgd_t *pgdir;
#endif
};
@@ -438,4 +490,12 @@ struct kvm_vcpu_arch {
#define KVMPPC_VCPU_BUSY_IN_HOST 1
#define KVMPPC_VCPU_RUNNABLE 2
+/* Values for vcpu->arch.io_gpr */
+#define KVM_MMIO_REG_MASK 0x001f
+#define KVM_MMIO_REG_EXT_MASK 0xffe0
+#define KVM_MMIO_REG_GPR 0x0000
+#define KVM_MMIO_REG_FPR 0x0020
+#define KVM_MMIO_REG_QPR 0x0040
+#define KVM_MMIO_REG_FQPR 0x0060
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 50533f9adf40..7b754e743003 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -22,6 +22,16 @@
#include <linux/types.h>
+/*
+ * Additions to this struct must only occur at the end, and should be
+ * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
+ * (albeit not necessarily relevant to the current target hardware platform).
+ *
+ * Struct fields are always 32 or 64 bit aligned, depending on them being 32
+ * or 64 bit wide respectively.
+ *
+ * See Documentation/virtual/kvm/ppc-pv.txt
+ */
struct kvm_vcpu_arch_shared {
__u64 scratch1;
__u64 scratch2;
@@ -33,11 +43,35 @@ struct kvm_vcpu_arch_shared {
__u64 sprg3;
__u64 srr0;
__u64 srr1;
- __u64 dar;
+ __u64 dar; /* dear on BookE */
__u64 msr;
__u32 dsisr;
__u32 int_pending; /* Tells the guest if we have an interrupt */
__u32 sr[16];
+ __u32 mas0;
+ __u32 mas1;
+ __u64 mas7_3;
+ __u64 mas2;
+ __u32 mas4;
+ __u32 mas6;
+ __u32 esr;
+ __u32 pir;
+
+ /*
+ * SPRG4-7 are user-readable, so we can only keep these consistent
+ * between the shared area and the real registers when there's an
+ * intervening exit to KVM. This also applies to SPRG3 on some
+ * chips.
+ *
+ * This suffices for access by guest userspace, since in PR-mode
+ * KVM, an exit must occur when changing the guest's MSR[PR].
+ * If the guest kernel writes to SPRG3-7 via the shared area, it
+ * must also use the shared area for reading while in kernel space.
+ */
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
};
#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
@@ -47,7 +81,10 @@ struct kvm_vcpu_arch_shared {
#define KVM_FEATURE_MAGIC_PAGE 1
-#define KVM_MAGIC_FEAT_SR (1 << 0)
+#define KVM_MAGIC_FEAT_SR (1 << 0)
+
+/* MASn, ESR, PIR, and high SPRGs */
+#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
#ifdef __KERNEL__
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 46efd1a265c9..9d6dee0f7d48 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -66,6 +66,7 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
+extern void kvmppc_decrementer_func(unsigned long data);
extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
/* Core-specific hooks */
@@ -94,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
@@ -120,15 +121,17 @@ extern long kvmppc_alloc_hpt(struct kvm *kvm);
extern void kvmppc_free_hpt(struct kvm *kvm);
extern long kvmppc_prepare_vrma(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
-extern void kvmppc_map_vrma(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
+extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
-extern struct kvmppc_rma_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvmppc_rma_info *ri);
+extern struct kvmppc_linear_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvmppc_linear_info *ri);
+extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
+extern void kvm_release_hpt(struct kvmppc_linear_info *li);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
@@ -175,6 +178,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
#ifdef CONFIG_KVM_BOOK3S_64_HV
@@ -183,14 +189,19 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
paca[cpu].kvm_hstate.xics_phys = addr;
}
-extern void kvm_rma_init(void);
+extern void kvm_linear_init(void);
#else
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
-static inline void kvm_rma_init(void)
+static inline void kvm_linear_init(void)
{}
#endif
+int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_config_tlb *cfg);
+int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_dirty_tlb *cfg);
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index e0298d26ce5d..a76254af0aaa 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -41,15 +41,7 @@
* We only have to have statically allocated lppaca structs on
* legacy iSeries, which supports at most 64 cpus.
*/
-#ifdef CONFIG_PPC_ISERIES
-#if NR_CPUS < 64
-#define NR_LPPACAS NR_CPUS
-#else
-#define NR_LPPACAS 64
-#endif
-#else /* not iSeries */
#define NR_LPPACAS 1
-#endif
/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index bf37931d1ad6..42ce570812c1 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -99,9 +99,7 @@ struct machdep_calls {
void (*init_IRQ)(void);
- /* Return an irq, or NO_IRQ to indicate there are none pending.
- * If for some reason there is no irq, but the interrupt
- * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
+ /* Return an irq, or NO_IRQ to indicate there are none pending. */
unsigned int (*get_irq)(void);
/* PCI stuff */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index f5f89cafebd0..cdb5421877e2 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -41,9 +41,10 @@
/* MAS registers bit definitions */
#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
-#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
-#define MAS0_NV(x) ((x) & 0x00000FFF)
#define MAS0_ESEL_MASK 0x0FFF0000
+#define MAS0_ESEL_SHIFT 16
+#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
+#define MAS0_NV(x) ((x) & 0x00000FFF)
#define MAS0_HES 0x00004000
#define MAS0_WQ_ALLWAYS 0x00000000
#define MAS0_WQ_COND 0x00001000
@@ -167,6 +168,7 @@
#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
#define TLBnCFG_MAXSIZE_SHIFT 16
#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
+#define TLBnCFG_ASSOC_SHIFT 24
/* TLBnPS encoding */
#define TLBnPS_4K 0x00000004
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 412ba493cb98..1c65a59881ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -108,11 +108,11 @@ extern char initial_stab[];
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
/* Values for PP (assumes Ks=0, Kp=1) */
-/* pp0 will always be 0 for linux */
#define PP_RWXX 0 /* Supervisor read/write, User none */
#define PP_RWRX 1 /* Supervisor read/write, User read */
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
#define PP_RXRX 3 /* Supervisor read, User read */
+#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
#ifndef __ASSEMBLY__
@@ -267,7 +267,6 @@ extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
-extern void hpte_init_iSeries(void);
extern void hpte_init_beat(void);
extern void hpte_init_beat_v3(void);
@@ -325,9 +324,6 @@ extern void slb_set_size(u16 size);
* WARNING - If you change these you must make sure the asm
* implementations in slb_allocate (slb_low.S), do_stab_bolted
* (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
- *
- * You'll also need to change the precomputed VSID values in head.S
- * which are used by the iSeries firmware.
*/
#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
@@ -484,14 +480,6 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
| (ea >> SID_SHIFT_1T), 1T);
}
-/*
- * This is only used on legacy iSeries in lparmap.c,
- * hence the 256MB segment assumption.
- */
-#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
- VSID_MODULUS_256M)
-#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 67b4d9837236..c65b9294376e 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -255,7 +255,7 @@ struct mpic
struct device_node *node;
/* The remapper for this MPIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
@@ -273,7 +273,6 @@ struct mpic
unsigned int isu_size;
unsigned int isu_shift;
unsigned int isu_mask;
- unsigned int irq_count;
/* Number of sources */
unsigned int num_sources;
/* default senses array */
@@ -349,8 +348,6 @@ struct mpic
#define MPIC_U3_HT_IRQS 0x00000004
/* Broken IPI registers (autodetected) */
#define MPIC_BROKEN_IPI 0x00000008
-/* MPIC wants a reset */
-#define MPIC_WANTS_RESET 0x00000010
/* Spurious vector requires EOI */
#define MPIC_SPV_EOI 0x00000020
/* No passthrough disable */
@@ -363,15 +360,11 @@ struct mpic
#define MPIC_ENABLE_MCK 0x00000200
/* Disable bias among target selection, spread interrupts evenly */
#define MPIC_NO_BIAS 0x00000400
-/* Ignore NIRQS as reported by FRR */
-#define MPIC_BROKEN_FRR_NIRQS 0x00000800
/* Destination only supports a single CPU at a time */
#define MPIC_SINGLE_DEST_CPU 0x00001000
/* Enable CoreInt delivery of interrupts */
#define MPIC_ENABLE_COREINT 0x00002000
-/* Disable resetting of the MPIC.
- * NOTE: This flag trumps MPIC_WANTS_RESET.
- */
+/* Do not reset the MPIC during initialization */
#define MPIC_NO_RESET 0x00004000
/* Freescale MPIC (compatible includes "fsl,mpic") */
#define MPIC_FSL 0x00008000
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
new file mode 100644
index 000000000000..3ec37dc9003e
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#ifndef _ASM_MPIC_MSGR_H
+#define _ASM_MPIC_MSGR_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct mpic_msgr {
+ u32 __iomem *base;
+ u32 __iomem *mer;
+ int irq;
+ unsigned char in_use;
+ raw_spinlock_t lock;
+ int num;
+};
+
+/* Get a message register
+ *
+ * @reg_num: the MPIC message register to get
+ *
+ * A pointer to the message register is returned. If
+ * the message register asked for is already in use, then
+ * EBUSY is returned. If the number given is not associated
+ * with an actual message register, then ENODEV is returned.
+ * Successfully getting the register marks it as in use.
+ */
+extern struct mpic_msgr *mpic_msgr_get(unsigned int reg_num);
+
+/* Relinquish a message register
+ *
+ * @msgr: the message register to return
+ *
+ * Disables the given message register and marks it as free.
+ * After this call has completed successully the message
+ * register is available to be acquired by a call to
+ * mpic_msgr_get.
+ */
+extern void mpic_msgr_put(struct mpic_msgr *msgr);
+
+/* Enable a message register
+ *
+ * @msgr: the message register to enable
+ *
+ * The given message register is enabled for sending
+ * messages.
+ */
+extern void mpic_msgr_enable(struct mpic_msgr *msgr);
+
+/* Disable a message register
+ *
+ * @msgr: the message register to disable
+ *
+ * The given message register is disabled for sending
+ * messages.
+ */
+extern void mpic_msgr_disable(struct mpic_msgr *msgr);
+
+/* Write a message to a message register
+ *
+ * @msgr: the message register to write to
+ * @message: the message to write
+ *
+ * The given 32-bit message is written to the given message
+ * register. Writing to an enabled message registers fires
+ * an interrupt.
+ */
+static inline void mpic_msgr_write(struct mpic_msgr *msgr, u32 message)
+{
+ out_be32(msgr->base, message);
+}
+
+/* Read a message from a message register
+ *
+ * @msgr: the message register to read from
+ *
+ * Returns the 32-bit value currently in the given message register.
+ * Upon reading the register any interrupts for that register are
+ * cleared.
+ */
+static inline u32 mpic_msgr_read(struct mpic_msgr *msgr)
+{
+ return in_be32(msgr->base);
+}
+
+/* Clear a message register
+ *
+ * @msgr: the message register to clear
+ *
+ * Clears any interrupts associated with the given message register.
+ */
+static inline void mpic_msgr_clear(struct mpic_msgr *msgr)
+{
+ (void) mpic_msgr_read(msgr);
+}
+
+/* Set the destination CPU for the message register
+ *
+ * @msgr: the message register whose destination is to be set
+ * @cpu_num: the Linux CPU number to bind the message register to
+ *
+ * Note that the CPU number given is the CPU number used by the kernel
+ * and *not* the actual hardware CPU number.
+ */
+static inline void mpic_msgr_set_destination(struct mpic_msgr *msgr,
+ u32 cpu_num)
+{
+ out_be32(msgr->base, 1 << get_hard_smp_processor_id(cpu_num));
+}
+
+/* Get the IRQ number for the message register
+ * @msgr: the message register whose IRQ is to be returned
+ *
+ * Returns the IRQ number associated with the given message register.
+ * NO_IRQ is returned if this message register is not capable of
+ * receiving interrupts. What message register can and cannot receive
+ * interrupts is specified in the device tree for the system.
+ */
+static inline int mpic_msgr_get_irq(struct mpic_msgr *msgr)
+{
+ return msgr->irq;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 269c05a36d91..daf813fea91f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -132,7 +132,7 @@ struct paca_struct {
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 soft_enabled; /* irq soft-enable flag */
- u8 hard_enabled; /* set if irqs are enabled in MSR */
+ u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 5d487657322e..ac39e6a3b25a 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -155,14 +155,7 @@ struct pci_dn {
struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
- int class_code; /* pci device class */
- int eeh_mode; /* See eeh.h for possible EEH_MODEs */
- int eeh_config_addr;
- int eeh_pe_config_addr; /* new-style partition endpoint address */
- int eeh_check_count; /* # times driver ignored error */
- int eeh_freeze_count; /* # times this device froze up. */
- int eeh_false_positives; /* # times this device reported #ff's */
- u32 config_space[16]; /* saved PCI config space */
+ struct eeh_dev *edev; /* eeh device */
#endif
#define IODA_INVALID_PE (-1)
#ifdef CONFIG_PPC_POWERNV
@@ -185,6 +178,13 @@ static inline int pci_device_from_OF_node(struct device_node *np,
return 0;
}
+#if defined(CONFIG_EEH)
+static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
+{
+ return PCI_DN(dn)->edev;
+}
+#endif
+
/** Find the bus corresponding to the indicated device node */
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index f54b3d26ce9d..6653f2743c4e 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -154,14 +154,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
#endif /* CONFIG_PPC64 */
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
- struct pci_bus_region *region,
- struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev,
- struct resource *res,
- struct pci_bus_region *region);
-
extern void pcibios_claim_one_bus(struct pci_bus *b);
extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
@@ -190,6 +182,7 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
+extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8f1df1208d23..078019b5b353 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -47,6 +47,8 @@ struct power_pmu {
*/
#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
+#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
+#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
/*
* Values for flags to get_alternatives()
@@ -61,8 +63,6 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-#define PERF_EVENT_INDEX_OFFSET 1
-
/*
* Only override the default definitions in include/linux/perf_event.h
* if we have hardware PMU support.
diff --git a/arch/powerpc/include/asm/phyp_dump.h b/arch/powerpc/include/asm/phyp_dump.h
deleted file mode 100644
index fa74c6c3e106..000000000000
--- a/arch/powerpc/include/asm/phyp_dump.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Hypervisor-assisted dump
- *
- * Linas Vepstas, Manish Ahuja 2008
- * Copyright 2008 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _PPC64_PHYP_DUMP_H
-#define _PPC64_PHYP_DUMP_H
-
-#ifdef CONFIG_PHYP_DUMP
-
-/* The RMR region will be saved for later dumping
- * whenever the kernel crashes. Set this to 256MB. */
-#define PHYP_DUMP_RMR_START 0x0
-#define PHYP_DUMP_RMR_END (1UL<<28)
-
-struct phyp_dump {
- /* Memory that is reserved during very early boot. */
- unsigned long init_reserve_start;
- unsigned long init_reserve_size;
- /* cmd line options during boot */
- unsigned long reserve_bootvar;
- unsigned long phyp_dump_at_boot;
- /* Check status during boot if dump supported, active & present*/
- unsigned long phyp_dump_configured;
- unsigned long phyp_dump_is_active;
- /* store cpu & hpte size */
- unsigned long cpu_state_size;
- unsigned long hpte_region_size;
- /* previous scratch area values */
- unsigned long reserved_scratch_addr;
- unsigned long reserved_scratch_size;
-};
-
-extern struct phyp_dump *phyp_dump_info;
-
-int early_init_dt_scan_phyp_dump(unsigned long node,
- const char *uname, int depth, void *data);
-
-#endif /* CONFIG_PHYP_DUMP */
-#endif /* _PPC64_PHYP_DUMP_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e980faae4225..d81f99430fe7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -45,6 +45,7 @@
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -183,7 +184,8 @@
__PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
__PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
-
+#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
+ __PPC_RT(t) | __PPC_RB(b))
/*
* Define what the VSX XX1 form instructions will look like, then add
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 43268f15004e..80fa704d410f 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -45,94 +45,21 @@ extern void init_pci_config_tokens (void);
extern unsigned long get_phb_buid (struct device_node *);
extern int rtas_setup_phb(struct pci_controller *phb);
-extern unsigned long pci_probe_only;
-
-/* ---- EEH internal-use-only related routines ---- */
#ifdef CONFIG_EEH
+void pci_addr_cache_build(void);
void pci_addr_cache_insert_device(struct pci_dev *dev);
void pci_addr_cache_remove_device(struct pci_dev *dev);
-void pci_addr_cache_build(void);
-struct pci_dev *pci_get_device_by_addr(unsigned long addr);
-
-/**
- * eeh_slot_error_detail -- record and EEH error condition to the log
- * @pdn: pci device node
- * @severity: EEH_LOG_TEMP_FAILURE or EEH_LOG_PERM_FAILURE
- *
- * Obtains the EEH error details from the RTAS subsystem,
- * and then logs these details with the RTAS error log system.
- */
-#define EEH_LOG_TEMP_FAILURE 1
-#define EEH_LOG_PERM_FAILURE 2
-void eeh_slot_error_detail (struct pci_dn *pdn, int severity);
-
-/**
- * rtas_pci_enable - enable IO transfers for this slot
- * @pdn: pci device node
- * @function: either EEH_THAW_MMIO or EEH_THAW_DMA
- *
- * Enable I/O transfers to this slot
- */
-#define EEH_THAW_MMIO 2
-#define EEH_THAW_DMA 3
-int rtas_pci_enable(struct pci_dn *pdn, int function);
-
-/**
- * rtas_set_slot_reset -- unfreeze a frozen slot
- * @pdn: pci device node
- *
- * Clear the EEH-frozen condition on a slot. This routine
- * does this by asserting the PCI #RST line for 1/8th of
- * a second; this routine will sleep while the adapter is
- * being reset.
- *
- * Returns a non-zero value if the reset failed.
- */
-int rtas_set_slot_reset (struct pci_dn *);
-int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs);
-
-/**
- * eeh_restore_bars - Restore device configuration info.
- * @pdn: pci device node
- *
- * A reset of a PCI device will clear out its config space.
- * This routines will restore the config space for this
- * device, and is children, to values previously obtained
- * from the firmware.
- */
-void eeh_restore_bars(struct pci_dn *);
-
-/**
- * rtas_configure_bridge -- firmware initialization of pci bridge
- * @pdn: pci device node
- *
- * Ask the firmware to configure all PCI bridges devices
- * located behind the indicated node. Required after a
- * pci device reset. Does essentially the same hing as
- * eeh_restore_bars, but for brdges, and lets firmware
- * do the work.
- */
-void rtas_configure_bridge(struct pci_dn *);
-
+struct pci_dev *pci_addr_cache_get_device(unsigned long addr);
+void eeh_slot_error_detail(struct eeh_dev *edev, int severity);
+int eeh_pci_enable(struct eeh_dev *edev, int function);
+int eeh_reset_pe(struct eeh_dev *);
+void eeh_restore_bars(struct eeh_dev *);
int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
-
-/**
- * eeh_mark_slot -- set mode flags for pertition endpoint
- * @pdn: pci device node
- *
- * mark and clear slots: find "partition endpoint" PE and set or
- * clear the flags for each subnode of the PE.
- */
-void eeh_mark_slot (struct device_node *dn, int mode_flag);
-void eeh_clear_slot (struct device_node *dn, int mode_flag);
-
-/**
- * find_device_pe -- Find the associated "Partiationable Endpoint" PE
- * @pdn: pci device node
- */
-struct device_node * find_device_pe(struct device_node *dn);
+void eeh_mark_slot(struct device_node *dn, int mode_flag);
+void eeh_clear_slot(struct device_node *dn, int mode_flag);
+struct device_node *eeh_find_device_pe(struct device_node *dn);
void eeh_sysfs_add_device(struct pci_dev *pdev);
void eeh_sysfs_remove_device(struct pci_dev *pdev);
@@ -142,6 +69,11 @@ static inline const char *eeh_pci_name(struct pci_dev *pdev)
return pdev ? pci_name(pdev) : "<null>";
}
+static inline const char *eeh_driver_name(struct pci_dev *pdev)
+{
+ return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
+}
+
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 368f72f79808..50f73aa2ba21 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -60,6 +60,8 @@ BEGIN_FW_FTR_SECTION; \
cmpd cr1,r11,r10; \
beq+ cr1,33f; \
bl .accumulate_stolen_time; \
+ ld r12,_MSR(r1); \
+ andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
33: \
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 78a205162fd7..84cc7840cd18 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -83,8 +83,18 @@ struct pt_regs {
#ifndef __ASSEMBLY__
-#define instruction_pointer(regs) ((regs)->nip)
-#define user_stack_pointer(regs) ((regs)->gpr[1])
+#define GET_IP(regs) ((regs)->nip)
+#define GET_USP(regs) ((regs)->gpr[1])
+#define GET_FP(regs) (0)
+#define SET_FP(regs, val)
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#define profile_pc profile_pc
+#endif
+
+#include <asm-generic/ptrace.h>
+
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
@@ -99,12 +109,6 @@ static inline long regs_return_value(struct pt_regs *regs)
return -regs->gpr[3];
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7fdc2c0b7fa0..9d7f0fb69028 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -216,6 +216,7 @@
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
+#define DSISR_KEYFAULT 0x00200000 /* Key fault */
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
@@ -237,6 +238,7 @@
#define LPCR_ISL (1ul << (63-2))
#define LPCR_VC_SH (63-2)
#define LPCR_DPFD_SH (63-11)
+#define LPCR_VRMASD (0x1ful << (63-16))
#define LPCR_VRMA_L (1ul << (63-12))
#define LPCR_VRMA_LP0 (1ul << (63-15))
#define LPCR_VRMA_LP1 (1ul << (63-16))
@@ -493,6 +495,9 @@
#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
+#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
+#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
@@ -1079,30 +1084,12 @@
#define proc_trap() asm volatile("trap")
-#ifdef CONFIG_PPC64
-
-extern void ppc64_runlatch_on(void);
-extern void __ppc64_runlatch_off(void);
-
-#define ppc64_runlatch_off() \
- do { \
- if (cpu_has_feature(CPU_FTR_CTRL) && \
- test_thread_flag(TIF_RUNLATCH)) \
- __ppc64_runlatch_off(); \
- } while (0)
+#define __get_SP() ({unsigned long sp; \
+ asm volatile("mr %0,1": "=r" (sp)); sp;})
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
-#else
-#define ppc64_runlatch_on()
-#define ppc64_runlatch_off()
-
-#endif /* CONFIG_PPC64 */
-
-#define __get_SP() ({unsigned long sp; \
- asm volatile("mr %0,1": "=r" (sp)); sp;})
-
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 500fe1dc43e6..8a97aa7289d3 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -62,6 +62,7 @@
#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
+#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c143bb77ae..f0a4db31ecb6 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -74,7 +74,6 @@ struct rtas_suspend_me_data {
/* RTAS event classes */
#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
-#define RTAS_POWERMGM_EVENTS 0x20000000 /* set bit 2 */
#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
#define RTAS_IO_EVENTS 0x08000000 /* set bit 4 */
#define RTAS_EVENT_SCAN_ALL_EVENTS 0xffffffff
@@ -204,6 +203,39 @@ struct rtas_ext_event_log_v6 {
/* Variable length. */
};
+/* pSeries event log format */
+
+/* Two bytes ASCII section IDs */
+#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
+#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
+#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
+#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
+#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
+#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
+#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
+#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
+#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_errorlog {
+ uint16_t id; /* 0x00 2-byte ASCII section ID */
+ uint16_t length; /* 0x02 Section length in bytes */
+ uint8_t version; /* 0x04 Section version */
+ uint8_t subtype; /* 0x05 Section subtype */
+ uint16_t creator_component; /* 0x06 Creator component ID */
+ uint8_t data[]; /* 0x08 Start of section data */
+};
+
+struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+ uint16_t section_id);
+
/*
* This can be set by the rtas_flash module so that it can get called
* as the absolutely last thing before the kernel terminates.
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index adba970ce918..ebc24dc5b1a1 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -122,7 +122,6 @@ extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
extern void smp_muxed_ipi_message_pass(int cpu, int msg);
extern irqreturn_t smp_ipi_demux(void);
-void smp_init_iSeries(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
void smp_init_celleb(void);
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index 2fc2af8fbf59..3d5179bb122f 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -71,5 +71,9 @@
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
+#define SO_PEEK_OFF 42
+
+/* Instruct lower device to use last 4-bytes of skb data as FCS */
+#define SO_NOFCS 43
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f9611bd69ed2..7124fc06ad47 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -23,7 +23,6 @@
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
#endif
#include <asm/asm-compat.h>
#include <asm/synch.h>
@@ -95,12 +94,12 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* value.
*/
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR || ISERIES */
+#else /* SPLPAR */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
#define SHARED_PROCESSOR 0
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index c377457d1b89..a02883d5af43 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -550,5 +550,43 @@ extern void reloc_got2(unsigned long);
extern struct dentry *powerpc_debugfs_root;
+#ifdef CONFIG_PPC64
+
+extern void __ppc64_runlatch_on(void);
+extern void __ppc64_runlatch_off(void);
+
+/*
+ * We manually hard enable-disable, this is called
+ * in the idle loop and we don't want to mess up
+ * with soft-disable/enable & interrupt replay.
+ */
+#define ppc64_runlatch_off() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ test_thread_local_flags(_TLF_RUNLATCH)) { \
+ unsigned long msr = mfmsr(); \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_off(); \
+ if (msr & MSR_EE) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+
+#define ppc64_runlatch_on() \
+ do { \
+ if (cpu_has_feature(CPU_FTR_CTRL) && \
+ !test_thread_local_flags(_TLF_RUNLATCH)) { \
+ unsigned long msr = mfmsr(); \
+ __hard_irq_disable(); \
+ __ppc64_runlatch_on(); \
+ if (msr & MSR_EE) \
+ __hard_irq_enable(); \
+ } \
+ } while (0)
+#else
+#define ppc64_runlatch_on()
+#define ppc64_runlatch_off()
+#endif /* CONFIG_PPC64 */
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 964714940961..4a741c7efd02 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -110,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
-#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -141,11 +140,13 @@ static inline struct thread_info *current_thread_info(void)
#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
#define TLF_LAZY_MMU 3 /* tlb_batch is active */
+#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
#define _TLF_NAPPING (1 << TLF_NAPPING)
#define _TLF_SLEEPING (1 << TLF_SLEEPING)
#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
+#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK 1
@@ -156,6 +157,12 @@ static inline void set_restore_sigmask(void)
set_bit(TIF_SIGPENDING, &ti->flags);
}
+static inline bool test_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ return (ti->local_flags & flags) != 0;
+}
+
#ifdef CONFIG_PPC64
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
#else
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 7eb10fb96cd0..2136f58a54e8 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,11 +18,6 @@
#include <linux/percpu.h>
#include <asm/processor.h>
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/paca.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_call.h>
-#endif
/* time.c */
extern unsigned long tb_ticks_per_jiffy;
@@ -167,15 +162,6 @@ static inline void set_dec(int val)
#ifndef CONFIG_BOOKE
--val;
#endif
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES) &&
- get_lppaca()->shared_proc) {
- get_lppaca()->virtual_decr = val;
- if (get_dec() > val)
- HvCall_setVirtualDecr();
- return;
- }
-#endif
mtspr(SPRN_DEC, val);
#endif /* not 40x or 8xx_CPU6 */
}
@@ -217,7 +203,6 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
#endif
extern void secondary_cpu_time_init(void);
-extern void iSeries_time_init_early(void);
DECLARE_PER_CPU(u64, decrementers_next_tb);
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 8338aef5a4d3..b3038817b8dc 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -44,7 +44,6 @@ extern void __init udbg_init_debug_lpar_hvsi(void);
extern void __init udbg_init_pmac_realmode(void);
extern void __init udbg_init_maple_realmode(void);
extern void __init udbg_init_pas_realmode(void);
-extern void __init udbg_init_iseries(void);
extern void __init udbg_init_rtas_panel(void);
extern void __init udbg_init_rtas_console(void);
extern void __init udbg_init_debug_beat(void);
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 0a290a195946..6bfd5ffe1d4f 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -69,6 +69,7 @@ struct vio_dev {
};
struct vio_driver {
+ const char *name;
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
int (*remove)(struct vio_dev *dev);
@@ -76,10 +77,17 @@ struct vio_driver {
* be loaded in a CMO environment if it uses DMA.
*/
unsigned long (*get_desired_dma)(struct vio_dev *dev);
+ const struct dev_pm_ops *pm;
struct device_driver driver;
};
-extern int vio_register_driver(struct vio_driver *drv);
+extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
+ const char *mod_name);
+/*
+ * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
+ */
+#define vio_register_driver(driver) \
+ __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void vio_unregister_driver(struct vio_driver *drv);
extern int vio_cmo_entitlement_update(size_t);
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index c48de98ba94e..4ae9a09c3b89 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -86,7 +86,7 @@ struct ics {
extern unsigned int xics_default_server;
extern unsigned int xics_default_distrib_server;
extern unsigned int xics_interrupt_server_size;
-extern struct irq_host *xics_host;
+extern struct irq_domain *xics_host;
struct xics_cppr {
unsigned char stack[MAX_NUM_PRIORITIES];
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ee728e433aa2..f5808a35688c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_FA_DUMP) += fadump.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_E500) += idle_e500.o
endif
@@ -113,15 +114,6 @@ obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
-obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
-
-obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o
-obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
- power5+-pmu.o power6-pmu.o power7-pmu.o
-obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
-
-obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o
-obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 04caee7d9bc1..34b8afe94a50 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -46,9 +46,6 @@
#include <asm/hvcall.h>
#include <asm/xics.h>
#endif
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/iseries/alpaca.h>
-#endif
#ifdef CONFIG_PPC_POWERNV
#include <asm/opal.h>
#endif
@@ -147,7 +144,7 @@ int main(void)
DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
- DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
+ DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
@@ -384,17 +381,6 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef CONFIG_PPC_ISERIES
- /* the assembler miscalculates the VSID values */
- DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET));
- DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET));
- DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START));
- DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START));
-
- /* alpaca */
- DEFINE(ALPACA_SIZE, sizeof(struct alpaca));
-#endif
-
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
DEFINE(PTE_SIZE, sizeof(pte_t));
@@ -426,16 +412,23 @@ int main(void)
DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
#endif
- DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
- DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
- DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
- DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
+ DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
+ DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
+ DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
+ DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+ DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
+ DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
+ DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
+ DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
+ DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
+ DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
+
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_64_HV
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
@@ -448,6 +441,7 @@ int main(void)
DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
+ DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 81db9e2a8a20..138ae183c440 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1816,7 +1816,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.platform = "ppc440",
},
{ /* 464 in APM821xx */
- .pvr_mask = 0xffffff00,
+ .pvr_mask = 0xfffffff0,
.pvr_value = 0x12C41C80,
.cpu_name = "APM821XX",
.cpu_features = CPU_FTRS_44X,
@@ -2019,6 +2019,24 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
},
+ { /* e6500 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80400000,
+ .cpu_name = "e6500",
+ .cpu_features = CPU_FTRS_E6500,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
+ MMU_FTR_USE_TLBILX,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 4,
+ .oprofile_cpu_type = "ppc/e6500",
+ .oprofile_type = PPC_OPROFILE_FSL_EMB,
+ .cpu_setup = __setup_cpu_e5500,
+ .cpu_restore = __restore_cpu_e5500,
+ .machine_check = machine_check_e500mc,
+ .platform = "ppce6500",
+ },
#ifdef CONFIG_PPC32
{ /* default match */
.pvr_mask = 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 28be3452e67a..abef75176c07 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -46,7 +46,6 @@
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
-static atomic_t cpus_in_crash;
static int time_to_dump;
#define CRASH_HANDLER_MAX 3
@@ -66,6 +65,7 @@ static int handle_fault(struct pt_regs *regs)
#ifdef CONFIG_SMP
+static atomic_t cpus_in_crash;
void crash_ipi_callback(struct pt_regs *regs)
{
static cpumask_t cpus_state_saved = CPU_MASK_NONE;
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 2cc451aaaca7..5b25c8060fd6 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -37,6 +37,8 @@ void doorbell_exception(struct pt_regs *regs)
irq_enter();
+ may_hard_irq_enable();
+
smp_ipi_demux();
irq_exit();
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4f80cf1ce77b..3e57a00b8cba 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1213,7 +1213,7 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
stw r3,_TRAP(r1)
2: addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r9
- bl do_signal
+ bl do_notify_resume
REST_NVGPRS(r1)
b recheck
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d834425186ae..f8a7a1a1a9f4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,6 +32,7 @@
#include <asm/ptrace.h>
#include <asm/irqflags.h>
#include <asm/ftrace.h>
+#include <asm/hw_irq.h>
/*
* System calls.
@@ -115,39 +116,33 @@ BEGIN_FW_FTR_SECTION
END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_on
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r12,_MSR(r1)
-#endif /* CONFIG_TRACE_IRQFLAGS */
- li r10,1
- stb r10,PACASOFTIRQEN(r13)
- stb r10,PACAHARDIRQEN(r13)
- std r10,SOFTE(r1)
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- /* Hack for handling interrupts when soft-enabling on iSeries */
- cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
- andi. r10,r12,MSR_PR /* from kernel */
- crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
- bne 2f
- b hardware_interrupt_entry
-2:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
+ /*
+ * A syscall should always be called with interrupts enabled
+ * so we just unconditionally hard-enable here. When some kind
+ * of irq tracing is used, we additionally check that condition
+ * is correct
+ */
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
+ lbz r10,PACASOFTIRQEN(r13)
+ xori r10,r10,1
+1: tdnei r10,0
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
+#endif
- /* Hard enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r11
+ ld r11,PACAKMSR(r13)
ori r11,r11,MSR_EE
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
+ /* We do need to set SOFTE in the stack frame or the return
+ * from interrupt will be painful
+ */
+ li r10,1
+ std r10,SOFTE(r1)
+
#ifdef SHOW_SYSCALLS
bl .do_show_syscall
REST_GPR(0,r1)
@@ -198,16 +193,14 @@ syscall_exit:
andi. r10,r8,MSR_RI
beq- unrecov_restore
#endif
-
- /* Disable interrupts so current_thread_info()->flags can't change,
+ /*
+ * Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
*/
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
+ ld r10,PACAKMSR(r13)
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -319,7 +312,7 @@ syscall_exit_work:
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
#else
- mfmsr r10
+ ld r10,PACAKMSR(r13)
ori r10,r10,MSR_EE
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
@@ -565,10 +558,8 @@ _GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10 /* Get current interrupt state */
- rldicl r9,r10,48,1 /* clear MSR_EE */
- rotldi r9,r9,16
- mtmsrd r9,1 /* Update machine state */
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PREEMPT
@@ -591,25 +582,74 @@ _GLOBAL(ret_from_except_lite)
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_USER_WORK_MASK
bne do_work
-#endif
+#endif /* !CONFIG_PREEMPT */
+ .globl fast_exc_return_irq
+fast_exc_return_irq:
restore:
-BEGIN_FW_FTR_SECTION
+ /*
+ * This is the main kernel exit path, we first check if we
+ * have to change our interrupt state.
+ */
ld r5,SOFTE(r1)
-FW_FTR_SECTION_ELSE
- b .Liseries_check_pending_irqs
-ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
-2:
- TRACE_AND_RESTORE_IRQ(r5);
+ lbz r6,PACASOFTIRQEN(r13)
+ cmpwi cr1,r5,0
+ cmpw cr0,r5,r6
+ beq cr0,4f
+
+ /* We do, handle disable first, which is easy */
+ bne cr1,3f;
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13);
+ TRACE_DISABLE_INTS
+ b 4f
- /* extract EE bit and use it to restore paca->hard_enabled */
- ld r3,_MSR(r1)
- rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
- stb r4,PACAHARDIRQEN(r13)
+3: /*
+ * We are about to soft-enable interrupts (we are hard disabled
+ * at this point). We check if there's anything that needs to
+ * be replayed first.
+ */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ bne- restore_check_irq_replay
+ /*
+ * Get here when nothing happened while soft-disabled, just
+ * soft-enable and move-on. We will hard-enable as a side
+ * effect of rfi
+ */
+restore_no_replay:
+ TRACE_ENABLE_INTS
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13);
+
+ /*
+ * Final return path. BookE is handled in a different file
+ */
+4:
#ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e
#else
+ /*
+ * Clear the reservation. If we know the CPU tracks the address of
+ * the reservation then we can potentially save some cycles and use
+ * a larx. On POWER6 and POWER7 this is significantly faster.
+ */
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r4,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ /*
+ * Some code path such as load_up_fpu or altivec return directly
+ * here. They run entirely hard disabled and do not alter the
+ * interrupt state. They also don't use lwarx/stwcx. and thus
+ * are known not to leave dangling reservations.
+ */
+ .globl fast_exception_return
+fast_exception_return:
+ ld r3,_MSR(r1)
ld r4,_CTR(r1)
ld r0,_LINK(r1)
mtctr r4
@@ -623,28 +663,18 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
beq- unrecov_restore
/*
- * Clear the reservation. If we know the CPU tracks the address of
- * the reservation then we can potentially save some cycles and use
- * a larx. On POWER6 and POWER7 this is significantly faster.
- */
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r4,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- /*
* Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13,
* we end up corrupting the userspace r13 value.
*/
- mfmsr r4
- andc r4,r4,r0 /* r0 contains MSR_RI here */
+ ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
+ andc r4,r4,r0 /* r0 contains MSR_RI here */
mtmsrd r4,1
/*
* r13 is our per cpu area, only restore it if we are returning to
- * userspace
+ * userspace the value stored in the stack frame may belong to
+ * another CPU.
*/
andi. r0,r3,MSR_PR
beq 1f
@@ -669,30 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
#endif /* CONFIG_PPC_BOOK3E */
-.Liseries_check_pending_irqs:
-#ifdef CONFIG_PPC_ISERIES
- ld r5,SOFTE(r1)
- cmpdi 0,r5,0
- beq 2b
- /* Check for pending interrupts (iSeries) */
- ld r3,PACALPPACAPTR(r13)
- ld r3,LPPACAANYINT(r3)
- cmpdi r3,0
- beq+ 2b /* skip do_IRQ if no interrupts */
-
- li r3,0
- stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_off
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- mtmsrd r10 /* hard-enable again */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite /* loop back and handle more */
-#endif
+ /*
+ * Something did happen, check if a re-emit is needed
+ * (this also clears paca->irq_happened)
+ */
+restore_check_irq_replay:
+ /* XXX: We could implement a fast path here where we check
+ * for irq_happened being just 0x01, in which case we can
+ * clear it and return. That means that we would potentially
+ * miss a decrementer having wrapped all the way around.
+ *
+ * Still, this might be useful for things like hash_page
+ */
+ bl .__check_irq_replay
+ cmpwi cr0,r3,0
+ beq restore_no_replay
+
+ /*
+ * We need to re-emit an interrupt. We do so by re-using our
+ * existing exception frame. We first change the trap value,
+ * but we need to ensure we preserve the low nibble of it
+ */
+ ld r4,_TRAP(r1)
+ clrldi r4,r4,60
+ or r4,r4,r3
+ std r4,_TRAP(r1)
+ /*
+ * Then find the right handler and call it. Interrupts are
+ * still soft-disabled and we keep them that way.
+ */
+ cmpwi cr0,r3,0x500
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .do_IRQ
+ b .ret_from_except
+1: cmpwi cr0,r3,0x900
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .timer_interrupt
+ b .ret_from_except
+#ifdef CONFIG_PPC_BOOK3E
+1: cmpwi cr0,r3,0x280
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl .doorbell_exception
+ b .ret_from_except
+#endif /* CONFIG_PPC_BOOK3E */
+1: b .ret_from_except /* What else to do here ? */
+
do_work:
#ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -705,31 +760,22 @@ do_work:
crandc eq,cr1*4+eq,eq
bne restore
- /* Here we are preempting the current task.
- *
- * Ensure interrupts are soft-disabled. We also properly mark
- * the PACA to reflect the fact that they are hard-disabled
- * and trace the change
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first
*/
- li r0,0
- stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
- TRACE_DISABLE_INTS
-
- /* Call the scheduler with soft IRQs off */
+ SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq
/* Hard-disable interrupts again (and update PACA) */
#ifdef CONFIG_PPC_BOOK3E
wrteei 0
#else
- mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
- li r0,0
- stb r0,PACAHARDIRQEN(r13)
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
@@ -751,12 +797,14 @@ user_work:
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
+ bl .restore_interrupts
bl .schedule
b .ret_from_except_lite
1: bl .save_nvgprs
+ bl .restore_interrupts
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_signal
+ bl .do_notify_resume
b .ret_from_except
unrecov_restore:
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 429983c06f91..7215cc2495df 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -24,6 +24,7 @@
#include <asm/ptrace.h>
#include <asm/ppc-opcode.h>
#include <asm/mmu.h>
+#include <asm/hw_irq.h>
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -77,59 +78,55 @@
#define SPRN_MC_SRR1 SPRN_MCSRR1
#define NORMAL_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, GEN, addition##_GEN)
+ EXCEPTION_PROLOG(n, GEN, addition##_GEN(n))
#define CRIT_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, CRIT, addition##_CRIT)
+ EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n))
#define DBG_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, DBG, addition##_DBG)
+ EXCEPTION_PROLOG(n, DBG, addition##_DBG(n))
#define MC_EXCEPTION_PROLOG(n, addition) \
- EXCEPTION_PROLOG(n, MC, addition##_MC)
+ EXCEPTION_PROLOG(n, MC, addition##_MC(n))
/* Variants of the "addition" argument for the prolog
*/
-#define PROLOG_ADDITION_NONE_GEN
-#define PROLOG_ADDITION_NONE_CRIT
-#define PROLOG_ADDITION_NONE_DBG
-#define PROLOG_ADDITION_NONE_MC
+#define PROLOG_ADDITION_NONE_GEN(n)
+#define PROLOG_ADDITION_NONE_CRIT(n)
+#define PROLOG_ADDITION_NONE_DBG(n)
+#define PROLOG_ADDITION_NONE_MC(n)
-#define PROLOG_ADDITION_MASKABLE_GEN \
+#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
cmpwi cr0,r11,0; /* yes -> go out of line */ \
- beq masked_interrupt_book3e;
+ beq masked_interrupt_book3e_##n
-#define PROLOG_ADDITION_2REGS_GEN \
+#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
std r15,PACA_EXGEN+EX_R15(r13)
-#define PROLOG_ADDITION_1REG_GEN \
+#define PROLOG_ADDITION_1REG_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13);
-#define PROLOG_ADDITION_2REGS_CRIT \
+#define PROLOG_ADDITION_2REGS_CRIT(n) \
std r14,PACA_EXCRIT+EX_R14(r13); \
std r15,PACA_EXCRIT+EX_R15(r13)
-#define PROLOG_ADDITION_2REGS_DBG \
+#define PROLOG_ADDITION_2REGS_DBG(n) \
std r14,PACA_EXDBG+EX_R14(r13); \
std r15,PACA_EXDBG+EX_R15(r13)
-#define PROLOG_ADDITION_2REGS_MC \
+#define PROLOG_ADDITION_2REGS_MC(n) \
std r14,PACA_EXMC+EX_R14(r13); \
std r15,PACA_EXMC+EX_R15(r13)
-#define PROLOG_ADDITION_DOORBELL_GEN \
- lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r11,0; /* yes -> go out of line */ \
- beq masked_doorbell_book3e
-
/* Core exception code for all exceptions except TLB misses.
* XXX: Needs to make SPRN_SPRG_GEN depend on exception type
*/
#define EXCEPTION_COMMON(n, excf, ints) \
+exc_##n##_common: \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
@@ -167,20 +164,21 @@
std r0,RESULT(r1); /* clear regs->result */ \
ints;
-/* Variants for the "ints" argument */
+/* Variants for the "ints" argument. This one does nothing when we want
+ * to keep interrupts in their original state
+ */
#define INTS_KEEP
-#define INTS_DISABLE_SOFT \
- stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \
- TRACE_DISABLE_INTS;
-#define INTS_DISABLE_HARD \
- stb r0,PACAHARDIRQEN(r13); /* and hard disabled */
-#define INTS_DISABLE_ALL \
- INTS_DISABLE_SOFT \
- INTS_DISABLE_HARD
-
-/* This is called by exceptions that used INTS_KEEP (that is did not clear
- * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE
- * to it's previous value
+
+/* This second version is meant for exceptions that don't immediately
+ * hard-enable. We set a bit in paca->irq_happened to ensure that
+ * a subsequent call to arch_local_irq_restore() will properly
+ * hard-enable and avoid the fast-path
+ */
+#define INTS_DISABLE SOFT_DISABLE_INTS(r3,r4)
+
+/* This is called by exceptions that used INTS_KEEP (that did not touch
+ * irq indicators in the PACA). This will restore MSR:EE to it's previous
+ * value
*
* XXX In the long run, we may want to open-code it in order to separate the
* load from the wrtee, thus limiting the latency caused by the dependency
@@ -238,7 +236,7 @@ exc_##n##_bad_stack: \
#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \
- EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \
+ EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \
ack(r8); \
CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -289,7 +287,7 @@ interrupt_end_book3e:
/* Critical Input Interrupt */
START_EXCEPTION(critical_input);
CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
-// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL)
+// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -300,7 +298,7 @@ interrupt_end_book3e:
/* Machine Check Interrupt */
START_EXCEPTION(machine_check);
CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE)
-// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL)
+// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE)
// bl special_reg_save_mc
// addi r3,r1,STACK_FRAME_OVERHEAD
// CHECK_NAPPING();
@@ -313,7 +311,7 @@ interrupt_end_book3e:
NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
- EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_KEEP)
+ EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE)
b storage_fault_common
/* Instruction Storage Interrupt */
@@ -321,7 +319,7 @@ interrupt_end_book3e:
NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
- EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_KEEP)
+ EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE)
b storage_fault_common
/* External Input Interrupt */
@@ -339,12 +337,11 @@ interrupt_end_book3e:
START_EXCEPTION(program);
NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
- EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT)
+ EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE)
std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXGEN+EX_R14(r13)
bl .save_nvgprs
- INTS_RESTORE_HARD
bl .program_check_exception
b .ret_from_except
@@ -353,15 +350,16 @@ interrupt_end_book3e:
NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE)
/* we can probably do a shorter exception entry for that one... */
EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
- bne 1f /* if from user, just load it up */
+ ld r12,_MSR(r1)
+ andi. r0,r12,MSR_PR;
+ beq- 1f
+ bl .load_up_fpu
+ b fast_exception_return
+1: INTS_DISABLE
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
- INTS_RESTORE_HARD
bl .kernel_fp_unavailable_exception
- BUG_OPCODE
-1: ld r12,_MSR(r1)
- bl .load_up_fpu
- b fast_exception_return
+ b .ret_from_except
/* Decrementer Interrupt */
MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC)
@@ -372,7 +370,7 @@ interrupt_end_book3e:
/* Watchdog Timer Interrupt */
START_EXCEPTION(watchdog);
CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
-// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL)
+// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -391,10 +389,9 @@ interrupt_end_book3e:
/* Auxiliary Processor Unavailable Interrupt */
START_EXCEPTION(ap_unavailable);
NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
- EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE)
bl .save_nvgprs
- INTS_RESTORE_HARD
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl .unknown_exception
b .ret_from_except
@@ -450,7 +447,7 @@ interrupt_end_book3e:
mfspr r15,SPRN_SPRG_CRIT_SCRATCH
mtspr SPRN_SPRG_GEN_SCRATCH,r15
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL)
+ EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE)
std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r14
@@ -465,7 +462,7 @@ kernel_dbg_exc:
/* Debug exception as a debug interrupt*/
START_EXCEPTION(debug_debug);
- DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
+ DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS)
/*
* If there is a single step or branch-taken exception in an
@@ -515,7 +512,7 @@ kernel_dbg_exc:
mfspr r15,SPRN_SPRG_DBG_SCRATCH
mtspr SPRN_SPRG_GEN_SCRATCH,r15
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL)
+ EXCEPTION_COMMON(0xd08, PACA_EXDBG, INTS_DISABLE)
std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
mr r4,r14
@@ -525,21 +522,20 @@ kernel_dbg_exc:
bl .DebugException
b .ret_from_except
- MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
-
-/* Doorbell interrupt */
- START_EXCEPTION(doorbell)
- NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL)
- EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL)
- CHECK_NAPPING()
+ START_EXCEPTION(perfmon);
+ NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .doorbell_exception
+ bl .performance_monitor_exception
b .ret_from_except_lite
+/* Doorbell interrupt */
+ MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE)
+
/* Doorbell critical Interrupt */
START_EXCEPTION(doorbell_crit);
- CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE)
-// EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL)
+ CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE)
+// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE)
// bl special_reg_save_crit
// CHECK_NAPPING();
// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -547,36 +543,114 @@ kernel_dbg_exc:
// b ret_from_crit_except
b .
+/* Guest Doorbell */
MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
- MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE)
- MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE)
- MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE)
+/* Guest Doorbell critical Interrupt */
+ START_EXCEPTION(guest_doorbell_crit);
+ CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE)
+// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE)
+// bl special_reg_save_crit
+// CHECK_NAPPING();
+// addi r3,r1,STACK_FRAME_OVERHEAD
+// bl .guest_doorbell_critical_exception
+// b ret_from_crit_except
+ b .
+
+/* Hypervisor call */
+ START_EXCEPTION(hypercall);
+ NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .save_nvgprs
+ INTS_RESTORE_HARD
+ bl .unknown_exception
+ b .ret_from_except
+
+/* Embedded Hypervisor priviledged */
+ START_EXCEPTION(ehpriv);
+ NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE)
+ EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .save_nvgprs
+ INTS_RESTORE_HARD
+ bl .unknown_exception
+ b .ret_from_except
/*
- * An interrupt came in while soft-disabled; clear EE in SRR1,
- * clear paca->hard_enabled and return.
+ * An interrupt came in while soft-disabled; We mark paca->irq_happened
+ * accordingly and if the interrupt is level sensitive, we hard disable
*/
-masked_doorbell_book3e:
- mtcr r10
- /* Resend the doorbell to fire again when ints enabled */
- mfspr r10,SPRN_PIR
- PPC_MSGSND(r10)
- b masked_interrupt_book3e_common
-masked_interrupt_book3e:
+masked_interrupt_book3e_0x500:
+ /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */
+ li r11,PACA_IRQ_EE
+ b masked_interrupt_book3e_full_mask
+
+masked_interrupt_book3e_0x900:
+ ACK_DEC(r11);
+ li r11,PACA_IRQ_DEC
+ b masked_interrupt_book3e_no_mask
+masked_interrupt_book3e_0x980:
+ ACK_FIT(r11);
+ li r11,PACA_IRQ_DEC
+ b masked_interrupt_book3e_no_mask
+masked_interrupt_book3e_0x280:
+masked_interrupt_book3e_0x2c0:
+ li r11,PACA_IRQ_DBELL
+ b masked_interrupt_book3e_no_mask
+
+masked_interrupt_book3e_no_mask:
+ mtcr r10
+ lbz r10,PACAIRQHAPPENED(r13)
+ or r10,r10,r11
+ stb r10,PACAIRQHAPPENED(r13)
+ b 1f
+masked_interrupt_book3e_full_mask:
mtcr r10
-masked_interrupt_book3e_common:
- stb r11,PACAHARDIRQEN(r13)
+ lbz r10,PACAIRQHAPPENED(r13)
+ or r10,r10,r11
+ stb r10,PACAIRQHAPPENED(r13)
mfspr r10,SPRN_SRR1
rldicl r11,r10,48,1 /* clear MSR_EE */
rotldi r10,r11,16
mtspr SPRN_SRR1,r10
- ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */
+1: ld r10,PACA_EXGEN+EX_R10(r13);
ld r11,PACA_EXGEN+EX_R11(r13);
mfspr r13,SPRN_SPRG_GEN_SCRATCH;
rfi
b .
+/*
+ * Called from arch_local_irq_enable when an interrupt needs
+ * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
+ * to indicate the kind of interrupt. MSR:EE is already off.
+ * We generate a stackframe like if a real interrupt had happened.
+ *
+ * Note: While MSR:EE is off, we need to make sure that _MSR
+ * in the generated frame has EE set to 1 or the exception
+ * handler will not properly re-enable them.
+ */
+_GLOBAL(__replay_interrupt)
+ /* We are going to jump to the exception common code which
+ * will retrieve various register values from the PACA which
+ * we don't give a damn about.
+ */
+ mflr r10
+ mfmsr r11
+ mfcr r4
+ mtspr SPRN_SPRG_GEN_SCRATCH,r13;
+ std r1,PACA_EXGEN+EX_R1(r13);
+ stw r4,PACA_EXGEN+EX_CR(r13);
+ ori r11,r11,MSR_EE
+ subi r1,r1,INT_FRAME_SIZE;
+ cmpwi cr0,r3,0x500
+ beq exc_0x500_common
+ cmpwi cr0,r3,0x900
+ beq exc_0x900_common
+ cmpwi cr0,r3,0x280
+ beq exc_0x280_common
+ blr
+
/*
* This is called from 0x300 and 0x400 handlers after the prologs with
@@ -591,7 +665,6 @@ storage_fault_common:
mr r5,r15
ld r14,PACA_EXGEN+EX_R14(r13)
ld r15,PACA_EXGEN+EX_R15(r13)
- INTS_RESTORE_HARD
bl .do_page_fault
cmpdi r3,0
bne- 1f
@@ -680,6 +753,8 @@ BAD_STACK_TRAMPOLINE(0x000)
BAD_STACK_TRAMPOLINE(0x100)
BAD_STACK_TRAMPOLINE(0x200)
BAD_STACK_TRAMPOLINE(0x260)
+BAD_STACK_TRAMPOLINE(0x280)
+BAD_STACK_TRAMPOLINE(0x2a0)
BAD_STACK_TRAMPOLINE(0x2c0)
BAD_STACK_TRAMPOLINE(0x2e0)
BAD_STACK_TRAMPOLINE(0x300)
@@ -697,11 +772,10 @@ BAD_STACK_TRAMPOLINE(0xa00)
BAD_STACK_TRAMPOLINE(0xb00)
BAD_STACK_TRAMPOLINE(0xc00)
BAD_STACK_TRAMPOLINE(0xd00)
+BAD_STACK_TRAMPOLINE(0xd08)
BAD_STACK_TRAMPOLINE(0xe00)
BAD_STACK_TRAMPOLINE(0xf00)
BAD_STACK_TRAMPOLINE(0xf20)
-BAD_STACK_TRAMPOLINE(0x2070)
-BAD_STACK_TRAMPOLINE(0x2080)
.globl bad_stack_book3e
bad_stack_book3e:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d4be7bb3dbdf..cb705fdbb458 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -12,6 +12,7 @@
*
*/
+#include <asm/hw_irq.h>
#include <asm/exception-64s.h>
#include <asm/ptrace.h>
@@ -19,7 +20,7 @@
* We layout physical memory as follows:
* 0x0000 - 0x00ff : Secondary processor spin code
* 0x0100 - 0x2fff : pSeries Interrupt prologs
- * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
+ * 0x3000 - 0x5fff : interrupt support common interrupt prologs
* 0x6000 - 0x6fff : Initial (CPU0) segment table
* 0x7000 - 0x7fff : FWNMI data area
* 0x8000 - : Early init and support code
@@ -100,14 +101,14 @@ data_access_not_stab:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
#endif
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
- KVMTEST_PR, 0x300)
+ KVMTEST, 0x300)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
#ifdef __DISABLED__
@@ -329,8 +330,8 @@ do_stab_bolted_pSeries:
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
#endif /* CONFIG_POWER4_ONLY */
- KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
- KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
+ KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
@@ -356,34 +357,60 @@ do_stab_bolted_pSeries:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
/*
- * An interrupt came in while soft-disabled; clear EE in SRR1,
- * clear paca->hard_enabled and return.
+ * An interrupt came in while soft-disabled. We set paca->irq_happened,
+ * then, if it was a decrementer interrupt, we bump the dec to max and
+ * and return, else we hard disable and return. This is called with
+ * r10 containing the value to OR to the paca field.
*/
-masked_interrupt:
- stb r10,PACAHARDIRQEN(r13)
- mtcrf 0x80,r9
- ld r9,PACA_EXGEN+EX_R9(r13)
- mfspr r10,SPRN_SRR1
- rldicl r10,r10,48,1 /* clear MSR_EE */
- rotldi r10,r10,16
- mtspr SPRN_SRR1,r10
- ld r10,PACA_EXGEN+EX_R10(r13)
- GET_SCRATCH0(r13)
- rfid
+#define MASKED_INTERRUPT(_H) \
+masked_##_H##interrupt: \
+ std r11,PACA_EXGEN+EX_R11(r13); \
+ lbz r11,PACAIRQHAPPENED(r13); \
+ or r11,r11,r10; \
+ stb r11,PACAIRQHAPPENED(r13); \
+ andi. r10,r10,PACA_IRQ_DEC; \
+ beq 1f; \
+ lis r10,0x7fff; \
+ ori r10,r10,0xffff; \
+ mtspr SPRN_DEC,r10; \
+ b 2f; \
+1: mfspr r10,SPRN_##_H##SRR1; \
+ rldicl r10,r10,48,1; /* clear MSR_EE */ \
+ rotldi r10,r10,16; \
+ mtspr SPRN_##_H##SRR1,r10; \
+2: mtcrf 0x80,r9; \
+ ld r9,PACA_EXGEN+EX_R9(r13); \
+ ld r10,PACA_EXGEN+EX_R10(r13); \
+ ld r11,PACA_EXGEN+EX_R11(r13); \
+ GET_SCRATCH0(r13); \
+ ##_H##rfid; \
b .
+
+ MASKED_INTERRUPT()
+ MASKED_INTERRUPT(H)
-masked_Hinterrupt:
- stb r10,PACAHARDIRQEN(r13)
- mtcrf 0x80,r9
- ld r9,PACA_EXGEN+EX_R9(r13)
- mfspr r10,SPRN_HSRR1
- rldicl r10,r10,48,1 /* clear MSR_EE */
- rotldi r10,r10,16
- mtspr SPRN_HSRR1,r10
- ld r10,PACA_EXGEN+EX_R10(r13)
- GET_SCRATCH0(r13)
- hrfid
- b .
+/*
+ * Called from arch_local_irq_enable when an interrupt needs
+ * to be resent. r3 contains 0x500 or 0x900 to indicate which
+ * kind of interrupt. MSR:EE is already off. We generate a
+ * stackframe like if a real interrupt had happened.
+ *
+ * Note: While MSR:EE is off, we need to make sure that _MSR
+ * in the generated frame has EE set to 1 or the exception
+ * handler will not properly re-enable them.
+ */
+_GLOBAL(__replay_interrupt)
+ /* We are going to jump to the exception common code which
+ * will retrieve various register values from the PACA which
+ * we don't give a damn about, so we don't bother storing them.
+ */
+ mfmsr r12
+ mflr r11
+ mfcr r9
+ ori r12,r12,MSR_EE
+ andi. r3,r3,0x0800
+ bne decrementer_common
+ b hardware_interrupt_common
#ifdef CONFIG_PPC_PSERIES
/*
@@ -458,14 +485,15 @@ machine_check_common:
bl .machine_check_exception
b .ret_from_except
- STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
+ STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
- STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
+ STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
#ifdef CONFIG_ALTIVEC
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
@@ -482,6 +510,9 @@ machine_check_common:
system_call_entry:
b system_call_common
+ppc64_runlatch_on_trampoline:
+ b .__ppc64_runlatch_on
+
/*
* Here we have detected that the kernel stack pointer is bad.
* R9 contains the saved CR, r13 points to the paca,
@@ -555,6 +586,8 @@ data_access_common:
mfspr r10,SPRN_DSISR
stw r10,PACA_EXGEN+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+ DISABLE_INTS
+ ld r12,_MSR(r1)
ld r3,PACA_EXGEN+EX_DAR(r13)
lwz r4,PACA_EXGEN+EX_DSISR(r13)
li r5,0x300
@@ -569,6 +602,7 @@ h_data_storage_common:
stw r10,PACA_EXGEN+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
bl .unknown_exception
b .ret_from_except
@@ -577,6 +611,8 @@ h_data_storage_common:
.globl instruction_access_common
instruction_access_common:
EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+ DISABLE_INTS
+ ld r12,_MSR(r1)
ld r3,_NIP(r1)
andis. r4,r12,0x5820
li r5,0x400
@@ -672,12 +708,6 @@ _GLOBAL(slb_miss_realmode)
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- ld r11,PACALPPACAPTR(r13)
- ld r11,LPPACASRR0(r11) /* get SRR0 value */
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
mtlr r10
@@ -690,12 +720,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
@@ -704,13 +728,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
rfid
b . /* prevent speculative execution */
-2:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- b unrecov_slb
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
- mfspr r11,SPRN_SRR0
+2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
@@ -727,20 +745,6 @@ unrecov_slb:
bl .unrecoverable_exception
b 1b
- .align 7
- .globl hardware_interrupt_common
- .globl hardware_interrupt_entry
-hardware_interrupt_common:
- EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
- FINISH_NAP
-hardware_interrupt_entry:
- DISABLE_INTS
-BEGIN_FTR_SECTION
- bl .ppc64_runlatch_on
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
@@ -774,8 +778,8 @@ alignment_common:
program_check_common:
EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .program_check_exception
b .ret_from_except
@@ -785,8 +789,8 @@ fp_unavailable_common:
EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
bne 1f /* if from user, just load it up */
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .kernel_fp_unavailable_exception
BUG_OPCODE
1: bl .load_up_fpu
@@ -805,8 +809,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .altivec_unavailable_exception
b .ret_from_except
@@ -816,13 +820,14 @@ vsx_unavailable_common:
EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
- bne .load_up_vsx
+ beq 1f
+ b .load_up_vsx
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .vsx_unavailable_exception
b .ret_from_except
@@ -831,66 +836,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
__end_handlers:
/*
- * Return from an exception with minimal checks.
- * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
- * If interrupts have been enabled, or anything has been
- * done that might have changed the scheduling status of
- * any task or sent any task a signal, you should use
- * ret_from_except or ret_from_except_lite instead of this.
- */
-fast_exc_return_irq: /* restores irq state too */
- ld r3,SOFTE(r1)
- TRACE_AND_RESTORE_IRQ(r3);
- ld r12,_MSR(r1)
- rldicl r4,r12,49,63 /* get MSR_EE to LSB */
- stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
- b 1f
-
- .globl fast_exception_return
-fast_exception_return:
- ld r12,_MSR(r1)
-1: ld r11,_NIP(r1)
- andi. r3,r12,MSR_RI /* check if RI is set */
- beq- unrecov_fer
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- andi. r3,r12,MSR_PR
- beq 2f
- ACCOUNT_CPU_USER_EXIT(r3, r4)
-2:
-#endif
-
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- mtcr r3
- mtlr r4
- mtctr r5
- mtxer r6
- REST_GPR(0, r1)
- REST_8GPRS(2, r1)
-
- mfmsr r10
- rldicl r10,r10,48,1 /* clear EE */
- rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
- mtmsrd r10,1
-
- mtspr SPRN_SRR1,r12
- mtspr SPRN_SRR0,r11
- REST_4GPRS(10, r1)
- ld r1,GPR1(r1)
- rfid
- b . /* prevent speculative execution */
-
-unrecov_fer:
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-
-/*
* Hash table stuff
*/
.align 7
@@ -912,28 +857,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
-
- /*
- * On iSeries, we soft-disable interrupts here, then
- * hard-enable interrupts so that the hash_page code can spin on
- * the hash_table_lock without problems on a shared processor.
- */
- DISABLE_INTS
-
- /*
- * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
- * and will clobber volatile registers when irq tracing is enabled
- * so we need to reload them. It may be possible to be smarter here
- * and move the irq tracing elsewhere but let's keep it simple for
- * now
- */
-#ifdef CONFIG_TRACE_IRQFLAGS
- ld r3,_DAR(r1)
- ld r4,_DSISR(r1)
- ld r5,_TRAP(r1)
- ld r12,_MSR(r1)
- clrrdi r5,r5,4
-#endif /* CONFIG_TRACE_IRQFLAGS */
/*
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
* accessing a userspace segment (even from the kernel). We assume
@@ -951,62 +874,25 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
* r4 contains the required access permissions
* r5 contains the trap number
*
- * at return r3 = 0 for success
+ * at return r3 = 0 for success, 1 for page fault, negative for error
*/
bl .hash_page /* build HPTE if possible */
cmpdi r3,0 /* see if hash_page succeeded */
-BEGIN_FW_FTR_SECTION
- /*
- * If we had interrupts soft-enabled at the point where the
- * DSI/ISI occurred, and an interrupt came in during hash_page,
- * handle it now.
- * We jump to ret_from_except_lite rather than fast_exception_return
- * because ret_from_except_lite will check for and handle pending
- * interrupts if necessary.
- */
- beq 13f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-
-BEGIN_FW_FTR_SECTION
- /*
- * Here we have interrupts hard-disabled, so it is sufficient
- * to restore paca->{soft,hard}_enable and get out.
- */
+ /* Success */
beq fast_exc_return_irq /* Return from exception on success */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
- /* For a hash failure, we don't bother re-enabling interrupts */
- ble- 12f
-
- /*
- * hash_page couldn't handle it, set soft interrupt enable back
- * to what it was before the trap. Note that .arch_local_irq_restore
- * handles any interrupts pending at this point.
- */
- ld r3,SOFTE(r1)
- TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
- bl .arch_local_irq_restore
- b 11f
-
-/* We have a data breakpoint exception - handle it */
-handle_dabr_fault:
- bl .save_nvgprs
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_dabr
- b .ret_from_except_lite
+ /* Error */
+ blt- 13f
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
- ENABLE_INTS
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_page_fault
cmpdi r3,0
- beq+ 13f
+ beq+ 12f
bl .save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1014,12 +900,20 @@ handle_page_fault:
bl .bad_page_fault
b .ret_from_except
-13: b .ret_from_except_lite
+/* We have a data breakpoint exception - handle it */
+handle_dabr_fault:
+ bl .save_nvgprs
+ ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_dabr
+12: b .ret_from_except_lite
+
/* We have a page fault that hash_page could handle but HV refused
* the PTE insertion
*/
-12: bl .save_nvgprs
+13: bl .save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
ld r4,_DAR(r1)
@@ -1141,51 +1035,19 @@ _GLOBAL(do_stab_bolted)
.= 0x7000
.globl fwnmi_data_area
fwnmi_data_area:
-#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
- /* iSeries does not use the FWNMI stuff, so it is safe to put
- * this here, even if we later allow kernels that will boot on
- * both pSeries and iSeries */
-#ifdef CONFIG_PPC_ISERIES
- . = LPARMAP_PHYS
- .globl xLparMap
-xLparMap:
- .quad HvEsidsToMap /* xNumberEsids */
- .quad HvRangesToMap /* xNumberRanges */
- .quad STAB0_PAGE /* xSegmentTableOffs */
- .zero 40 /* xRsvd */
- /* xEsids (HvEsidsToMap entries of 2 quads) */
- .quad PAGE_OFFSET_ESID /* xKernelEsid */
- .quad PAGE_OFFSET_VSID /* xKernelVsid */
- .quad VMALLOC_START_ESID /* xKernelEsid */
- .quad VMALLOC_START_VSID /* xKernelVsid */
- /* xRanges (HvRangesToMap entries of 3 quads) */
- .quad HvPagesToMap /* xPages */
- .quad 0 /* xOffset */
- .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
-
-#endif /* CONFIG_PPC_ISERIES */
-
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/* pseries and powernv need to keep the whole page from
* 0x7000 to 0x8000 free for use by the firmware
*/
. = 0x8000
#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on). The address is given to the hv
- * as a page number (see xLparMap above), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
- . = STAB0_OFFSET /* 0x8000 */
+/* Space for CPU0's segment table */
+ .balign 4096
.globl initial_stab
initial_stab:
.space 4096
+
#ifdef CONFIG_PPC_POWERNV
_GLOBAL(opal_mc_secondary_handler)
HMT_MEDIUM
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
new file mode 100644
index 000000000000..cfe7a38708c3
--- /dev/null
+++ b/arch/powerpc/kernel/fadump.c
@@ -0,0 +1,1315 @@
+/*
+ * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
+ * dump with assistance from firmware. This approach does not use kexec,
+ * instead firmware assists in booting the kdump kernel while preserving
+ * memory contents. The most of the code implementation has been adapted
+ * from phyp assisted dump implementation written by Linas Vepstas and
+ * Manish Ahuja
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2011 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+#define pr_fmt(fmt) "fadump: " fmt
+
+#include <linux/string.h>
+#include <linux/memblock.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/crash_dump.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include <asm/page.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/fadump.h>
+
+static struct fw_dump fw_dump;
+static struct fadump_mem_struct fdm;
+static const struct fadump_mem_struct *fdm_active;
+
+static DEFINE_MUTEX(fadump_mutex);
+struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
+int crash_mem_ranges;
+
+/* Scan the Firmware Assisted dump configuration details. */
+int __init early_init_dt_scan_fw_dump(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ __be32 *sections;
+ int i, num_sections;
+ unsigned long size;
+ const int *token;
+
+ if (depth != 1 || strcmp(uname, "rtas") != 0)
+ return 0;
+
+ /*
+ * Check if Firmware Assisted dump is supported. if yes, check
+ * if dump has been initiated on last reboot.
+ */
+ token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL);
+ if (!token)
+ return 0;
+
+ fw_dump.fadump_supported = 1;
+ fw_dump.ibm_configure_kernel_dump = *token;
+
+ /*
+ * The 'ibm,kernel-dump' rtas node is present only if there is
+ * dump data waiting for us.
+ */
+ fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL);
+ if (fdm_active)
+ fw_dump.dump_active = 1;
+
+ /* Get the sizes required to store dump data for the firmware provided
+ * dump sections.
+ * For each dump section type supported, a 32bit cell which defines
+ * the ID of a supported section followed by two 32 bit cells which
+ * gives teh size of the section in bytes.
+ */
+ sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
+ &size);
+
+ if (!sections)
+ return 0;
+
+ num_sections = size / (3 * sizeof(u32));
+
+ for (i = 0; i < num_sections; i++, sections += 3) {
+ u32 type = (u32)of_read_number(sections, 1);
+
+ switch (type) {
+ case FADUMP_CPU_STATE_DATA:
+ fw_dump.cpu_state_data_size =
+ of_read_ulong(&sections[1], 2);
+ break;
+ case FADUMP_HPTE_REGION:
+ fw_dump.hpte_region_size =
+ of_read_ulong(&sections[1], 2);
+ break;
+ }
+ }
+ return 1;
+}
+
+int is_fadump_active(void)
+{
+ return fw_dump.dump_active;
+}
+
+/* Print firmware assisted dump configurations for debugging purpose. */
+static void fadump_show_config(void)
+{
+ pr_debug("Support for firmware-assisted dump (fadump): %s\n",
+ (fw_dump.fadump_supported ? "present" : "no support"));
+
+ if (!fw_dump.fadump_supported)
+ return;
+
+ pr_debug("Fadump enabled : %s\n",
+ (fw_dump.fadump_enabled ? "yes" : "no"));
+ pr_debug("Dump Active : %s\n",
+ (fw_dump.dump_active ? "yes" : "no"));
+ pr_debug("Dump section sizes:\n");
+ pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
+ pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
+ pr_debug("Boot memory size : %lx\n", fw_dump.boot_memory_size);
+}
+
+static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
+ unsigned long addr)
+{
+ if (!fdm)
+ return 0;
+
+ memset(fdm, 0, sizeof(struct fadump_mem_struct));
+ addr = addr & PAGE_MASK;
+
+ fdm->header.dump_format_version = 0x00000001;
+ fdm->header.dump_num_sections = 3;
+ fdm->header.dump_status_flag = 0;
+ fdm->header.offset_first_dump_section =
+ (u32)offsetof(struct fadump_mem_struct, cpu_state_data);
+
+ /*
+ * Fields for disk dump option.
+ * We are not using disk dump option, hence set these fields to 0.
+ */
+ fdm->header.dd_block_size = 0;
+ fdm->header.dd_block_offset = 0;
+ fdm->header.dd_num_blocks = 0;
+ fdm->header.dd_offset_disk_path = 0;
+
+ /* set 0 to disable an automatic dump-reboot. */
+ fdm->header.max_time_auto = 0;
+
+ /* Kernel dump sections */
+ /* cpu state data section. */
+ fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG;
+ fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA;
+ fdm->cpu_state_data.source_address = 0;
+ fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size;
+ fdm->cpu_state_data.destination_address = addr;
+ addr += fw_dump.cpu_state_data_size;
+
+ /* hpte region section */
+ fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG;
+ fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION;
+ fdm->hpte_region.source_address = 0;
+ fdm->hpte_region.source_len = fw_dump.hpte_region_size;
+ fdm->hpte_region.destination_address = addr;
+ addr += fw_dump.hpte_region_size;
+
+ /* RMA region section */
+ fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG;
+ fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION;
+ fdm->rmr_region.source_address = RMA_START;
+ fdm->rmr_region.source_len = fw_dump.boot_memory_size;
+ fdm->rmr_region.destination_address = addr;
+ addr += fw_dump.boot_memory_size;
+
+ return addr;
+}
+
+/**
+ * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
+ *
+ * Function to find the largest memory size we need to reserve during early
+ * boot process. This will be the size of the memory that is required for a
+ * kernel to boot successfully.
+ *
+ * This function has been taken from phyp-assisted dump feature implementation.
+ *
+ * returns larger of 256MB or 5% rounded down to multiples of 256MB.
+ *
+ * TODO: Come up with better approach to find out more accurate memory size
+ * that is required for a kernel to boot successfully.
+ *
+ */
+static inline unsigned long fadump_calculate_reserve_size(void)
+{
+ unsigned long size;
+
+ /*
+ * Check if the size is specified through fadump_reserve_mem= cmdline
+ * option. If yes, then use that.
+ */
+ if (fw_dump.reserve_bootvar)
+ return fw_dump.reserve_bootvar;
+
+ /* divide by 20 to get 5% of value */
+ size = memblock_end_of_DRAM() / 20;
+
+ /* round it down in multiples of 256 */
+ size = size & ~0x0FFFFFFFUL;
+
+ /* Truncate to memory_limit. We don't want to over reserve the memory.*/
+ if (memory_limit && size > memory_limit)
+ size = memory_limit;
+
+ return (size > MIN_BOOT_MEM ? size : MIN_BOOT_MEM);
+}
+
+/*
+ * Calculate the total memory size required to be reserved for
+ * firmware-assisted dump registration.
+ */
+static unsigned long get_fadump_area_size(void)
+{
+ unsigned long size = 0;
+
+ size += fw_dump.cpu_state_data_size;
+ size += fw_dump.hpte_region_size;
+ size += fw_dump.boot_memory_size;
+ size += sizeof(struct fadump_crash_info_header);
+ size += sizeof(struct elfhdr); /* ELF core header.*/
+ size += sizeof(struct elf_phdr); /* place holder for cpu notes */
+ /* Program headers for crash memory regions. */
+ size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
+
+ size = PAGE_ALIGN(size);
+ return size;
+}
+
+int __init fadump_reserve_mem(void)
+{
+ unsigned long base, size, memory_boundary;
+
+ if (!fw_dump.fadump_enabled)
+ return 0;
+
+ if (!fw_dump.fadump_supported) {
+ printk(KERN_INFO "Firmware-assisted dump is not supported on"
+ " this hardware\n");
+ fw_dump.fadump_enabled = 0;
+ return 0;
+ }
+ /*
+ * Initialize boot memory size
+ * If dump is active then we have already calculated the size during
+ * first kernel.
+ */
+ if (fdm_active)
+ fw_dump.boot_memory_size = fdm_active->rmr_region.source_len;
+ else
+ fw_dump.boot_memory_size = fadump_calculate_reserve_size();
+
+ /*
+ * Calculate the memory boundary.
+ * If memory_limit is less than actual memory boundary then reserve
+ * the memory for fadump beyond the memory_limit and adjust the
+ * memory_limit accordingly, so that the running kernel can run with
+ * specified memory_limit.
+ */
+ if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
+ size = get_fadump_area_size();
+ if ((memory_limit + size) < memblock_end_of_DRAM())
+ memory_limit += size;
+ else
+ memory_limit = memblock_end_of_DRAM();
+ printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
+ " dump, now %#016llx\n",
+ (unsigned long long)memory_limit);
+ }
+ if (memory_limit)
+ memory_boundary = memory_limit;
+ else
+ memory_boundary = memblock_end_of_DRAM();
+
+ if (fw_dump.dump_active) {
+ printk(KERN_INFO "Firmware-assisted dump is active.\n");
+ /*
+ * If last boot has crashed then reserve all the memory
+ * above boot_memory_size so that we don't touch it until
+ * dump is written to disk by userspace tool. This memory
+ * will be released for general use once the dump is saved.
+ */
+ base = fw_dump.boot_memory_size;
+ size = memory_boundary - base;
+ memblock_reserve(base, size);
+ printk(KERN_INFO "Reserved %ldMB of memory at %ldMB "
+ "for saving crash dump\n",
+ (unsigned long)(size >> 20),
+ (unsigned long)(base >> 20));
+
+ fw_dump.fadumphdr_addr =
+ fdm_active->rmr_region.destination_address +
+ fdm_active->rmr_region.source_len;
+ pr_debug("fadumphdr_addr = %p\n",
+ (void *) fw_dump.fadumphdr_addr);
+ } else {
+ /* Reserve the memory at the top of memory. */
+ size = get_fadump_area_size();
+ base = memory_boundary - size;
+ memblock_reserve(base, size);
+ printk(KERN_INFO "Reserved %ldMB of memory at %ldMB "
+ "for firmware-assisted dump\n",
+ (unsigned long)(size >> 20),
+ (unsigned long)(base >> 20));
+ }
+ fw_dump.reserve_dump_area_start = base;
+ fw_dump.reserve_dump_area_size = size;
+ return 1;
+}
+
+/* Look for fadump= cmdline option. */
+static int __init early_fadump_param(char *p)
+{
+ if (!p)
+ return 1;
+
+ if (strncmp(p, "on", 2) == 0)
+ fw_dump.fadump_enabled = 1;
+ else if (strncmp(p, "off", 3) == 0)
+ fw_dump.fadump_enabled = 0;
+
+ return 0;
+}
+early_param("fadump", early_fadump_param);
+
+/* Look for fadump_reserve_mem= cmdline option */
+static int __init early_fadump_reserve_mem(char *p)
+{
+ if (p)
+ fw_dump.reserve_bootvar = memparse(p, &p);
+ return 0;
+}
+early_param("fadump_reserve_mem", early_fadump_reserve_mem);
+
+static void register_fw_dump(struct fadump_mem_struct *fdm)
+{
+ int rc;
+ unsigned int wait_time;
+
+ pr_debug("Registering for firmware-assisted kernel dump...\n");
+
+ /* TODO: Add upper time limit for the delay */
+ do {
+ rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
+ FADUMP_REGISTER, fdm,
+ sizeof(struct fadump_mem_struct));
+
+ wait_time = rtas_busy_delay_time(rc);
+ if (wait_time)
+ mdelay(wait_time);
+
+ } while (wait_time);
+
+ switch (rc) {
+ case -1:
+ printk(KERN_ERR "Failed to register firmware-assisted kernel"
+ " dump. Hardware Error(%d).\n", rc);
+ break;
+ case -3:
+ printk(KERN_ERR "Failed to register firmware-assisted kernel"
+ " dump. Parameter Error(%d).\n", rc);
+ break;
+ case -9:
+ printk(KERN_ERR "firmware-assisted kernel dump is already "
+ " registered.");
+ fw_dump.dump_registered = 1;
+ break;
+ case 0:
+ printk(KERN_INFO "firmware-assisted kernel dump registration"
+ " is successful\n");
+ fw_dump.dump_registered = 1;
+ break;
+ }
+}
+
+void crash_fadump(struct pt_regs *regs, const char *str)
+{
+ struct fadump_crash_info_header *fdh = NULL;
+
+ if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
+ return;
+
+ fdh = __va(fw_dump.fadumphdr_addr);
+ crashing_cpu = smp_processor_id();
+ fdh->crashing_cpu = crashing_cpu;
+ crash_save_vmcoreinfo();
+
+ if (regs)
+ fdh->regs = *regs;
+ else
+ ppc_save_regs(&fdh->regs);
+
+ fdh->cpu_online_mask = *cpu_online_mask;
+
+ /* Call ibm,os-term rtas call to trigger firmware assisted dump */
+ rtas_os_term((char *)str);
+}
+
+#define GPR_MASK 0xffffff0000000000
+static inline int fadump_gpr_index(u64 id)
+{
+ int i = -1;
+ char str[3];
+
+ if ((id & GPR_MASK) == REG_ID("GPR")) {
+ /* get the digits at the end */
+ id &= ~GPR_MASK;
+ id >>= 24;
+ str[2] = '\0';
+ str[1] = id & 0xff;
+ str[0] = (id >> 8) & 0xff;
+ sscanf(str, "%d", &i);
+ if (i > 31)
+ i = -1;
+ }
+ return i;
+}
+
+static inline void fadump_set_regval(struct pt_regs *regs, u64 reg_id,
+ u64 reg_val)
+{
+ int i;
+
+ i = fadump_gpr_index(reg_id);
+ if (i >= 0)
+ regs->gpr[i] = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("NIA"))
+ regs->nip = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("MSR"))
+ regs->msr = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("CTR"))
+ regs->ctr = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("LR"))
+ regs->link = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("XER"))
+ regs->xer = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("CR"))
+ regs->ccr = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("DAR"))
+ regs->dar = (unsigned long)reg_val;
+ else if (reg_id == REG_ID("DSISR"))
+ regs->dsisr = (unsigned long)reg_val;
+}
+
+static struct fadump_reg_entry*
+fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
+{
+ memset(regs, 0, sizeof(struct pt_regs));
+
+ while (reg_entry->reg_id != REG_ID("CPUEND")) {
+ fadump_set_regval(regs, reg_entry->reg_id,
+ reg_entry->reg_value);
+ reg_entry++;
+ }
+ reg_entry++;
+ return reg_entry;
+}
+
+static u32 *fadump_append_elf_note(u32 *buf, char *name, unsigned type,
+ void *data, size_t data_len)
+{
+ struct elf_note note;
+
+ note.n_namesz = strlen(name) + 1;
+ note.n_descsz = data_len;
+ note.n_type = type;
+ memcpy(buf, &note, sizeof(note));
+ buf += (sizeof(note) + 3)/4;
+ memcpy(buf, name, note.n_namesz);
+ buf += (note.n_namesz + 3)/4;
+ memcpy(buf, data, note.n_descsz);
+ buf += (note.n_descsz + 3)/4;
+
+ return buf;
+}
+
+static void fadump_final_note(u32 *buf)
+{
+ struct elf_note note;
+
+ note.n_namesz = 0;
+ note.n_descsz = 0;
+ note.n_type = 0;
+ memcpy(buf, &note, sizeof(note));
+}
+
+static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
+{
+ struct elf_prstatus prstatus;
+
+ memset(&prstatus, 0, sizeof(prstatus));
+ /*
+ * FIXME: How do i get PID? Do I really need it?
+ * prstatus.pr_pid = ????
+ */
+ elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
+ buf = fadump_append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
+ &prstatus, sizeof(prstatus));
+ return buf;
+}
+
+static void fadump_update_elfcore_header(char *bufp)
+{
+ struct elfhdr *elf;
+ struct elf_phdr *phdr;
+
+ elf = (struct elfhdr *)bufp;
+ bufp += sizeof(struct elfhdr);
+
+ /* First note is a place holder for cpu notes info. */
+ phdr = (struct elf_phdr *)bufp;
+
+ if (phdr->p_type == PT_NOTE) {
+ phdr->p_paddr = fw_dump.cpu_notes_buf;
+ phdr->p_offset = phdr->p_paddr;
+ phdr->p_filesz = fw_dump.cpu_notes_buf_size;
+ phdr->p_memsz = fw_dump.cpu_notes_buf_size;
+ }
+ return;
+}
+
+static void *fadump_cpu_notes_buf_alloc(unsigned long size)
+{
+ void *vaddr;
+ struct page *page;
+ unsigned long order, count, i;
+
+ order = get_order(size);
+ vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
+ if (!vaddr)
+ return NULL;
+
+ count = 1 << order;
+ page = virt_to_page(vaddr);
+ for (i = 0; i < count; i++)
+ SetPageReserved(page + i);
+ return vaddr;
+}
+
+static void fadump_cpu_notes_buf_free(unsigned long vaddr, unsigned long size)
+{
+ struct page *page;
+ unsigned long order, count, i;
+
+ order = get_order(size);
+ count = 1 << order;
+ page = virt_to_page(vaddr);
+ for (i = 0; i < count; i++)
+ ClearPageReserved(page + i);
+ __free_pages(page, order);
+}
+
+/*
+ * Read CPU state dump data and convert it into ELF notes.
+ * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be
+ * used to access the data to allow for additional fields to be added without
+ * affecting compatibility. Each list of registers for a CPU starts with
+ * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes,
+ * 8 Byte ASCII identifier and 8 Byte register value. The register entry
+ * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part
+ * of register value. For more details refer to PAPR document.
+ *
+ * Only for the crashing cpu we ignore the CPU dump data and get exact
+ * state from fadump crash info structure populated by first kernel at the
+ * time of crash.
+ */
+static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
+{
+ struct fadump_reg_save_area_header *reg_header;
+ struct fadump_reg_entry *reg_entry;
+ struct fadump_crash_info_header *fdh = NULL;
+ void *vaddr;
+ unsigned long addr;
+ u32 num_cpus, *note_buf;
+ struct pt_regs regs;
+ int i, rc = 0, cpu = 0;
+
+ if (!fdm->cpu_state_data.bytes_dumped)
+ return -EINVAL;
+
+ addr = fdm->cpu_state_data.destination_address;
+ vaddr = __va(addr);
+
+ reg_header = vaddr;
+ if (reg_header->magic_number != REGSAVE_AREA_MAGIC) {
+ printk(KERN_ERR "Unable to read register save area.\n");
+ return -ENOENT;
+ }
+ pr_debug("--------CPU State Data------------\n");
+ pr_debug("Magic Number: %llx\n", reg_header->magic_number);
+ pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset);
+
+ vaddr += reg_header->num_cpu_offset;
+ num_cpus = *((u32 *)(vaddr));
+ pr_debug("NumCpus : %u\n", num_cpus);
+ vaddr += sizeof(u32);
+ reg_entry = (struct fadump_reg_entry *)vaddr;
+
+ /* Allocate buffer to hold cpu crash notes. */
+ fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
+ fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
+ note_buf = fadump_cpu_notes_buf_alloc(fw_dump.cpu_notes_buf_size);
+ if (!note_buf) {
+ printk(KERN_ERR "Failed to allocate 0x%lx bytes for "
+ "cpu notes buffer\n", fw_dump.cpu_notes_buf_size);
+ return -ENOMEM;
+ }
+ fw_dump.cpu_notes_buf = __pa(note_buf);
+
+ pr_debug("Allocated buffer for cpu notes of size %ld at %p\n",
+ (num_cpus * sizeof(note_buf_t)), note_buf);
+
+ if (fw_dump.fadumphdr_addr)
+ fdh = __va(fw_dump.fadumphdr_addr);
+
+ for (i = 0; i < num_cpus; i++) {
+ if (reg_entry->reg_id != REG_ID("CPUSTRT")) {
+ printk(KERN_ERR "Unable to read CPU state data\n");
+ rc = -ENOENT;
+ goto error_out;
+ }
+ /* Lower 4 bytes of reg_value contains logical cpu id */
+ cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK;
+ if (!cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
+ SKIP_TO_NEXT_CPU(reg_entry);
+ continue;
+ }
+ pr_debug("Reading register data for cpu %d...\n", cpu);
+ if (fdh && fdh->crashing_cpu == cpu) {
+ regs = fdh->regs;
+ note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
+ SKIP_TO_NEXT_CPU(reg_entry);
+ } else {
+ reg_entry++;
+ reg_entry = fadump_read_registers(reg_entry, &regs);
+ note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
+ }
+ }
+ fadump_final_note(note_buf);
+
+ pr_debug("Updating elfcore header (%llx) with cpu notes\n",
+ fdh->elfcorehdr_addr);
+ fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr));
+ return 0;
+
+error_out:
+ fadump_cpu_notes_buf_free((unsigned long)__va(fw_dump.cpu_notes_buf),
+ fw_dump.cpu_notes_buf_size);
+ fw_dump.cpu_notes_buf = 0;
+ fw_dump.cpu_notes_buf_size = 0;
+ return rc;
+
+}
+
+/*
+ * Validate and process the dump data stored by firmware before exporting
+ * it through '/proc/vmcore'.
+ */
+static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
+{
+ struct fadump_crash_info_header *fdh;
+ int rc = 0;
+
+ if (!fdm_active || !fw_dump.fadumphdr_addr)
+ return -EINVAL;
+
+ /* Check if the dump data is valid. */
+ if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) ||
+ (fdm_active->cpu_state_data.error_flags != 0) ||
+ (fdm_active->rmr_region.error_flags != 0)) {
+ printk(KERN_ERR "Dump taken by platform is not valid\n");
+ return -EINVAL;
+ }
+ if ((fdm_active->rmr_region.bytes_dumped !=
+ fdm_active->rmr_region.source_len) ||
+ !fdm_active->cpu_state_data.bytes_dumped) {
+ printk(KERN_ERR "Dump taken by platform is incomplete\n");
+ return -EINVAL;
+ }
+
+ /* Validate the fadump crash info header */
+ fdh = __va(fw_dump.fadumphdr_addr);
+ if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
+ printk(KERN_ERR "Crash info header is not valid.\n");
+ return -EINVAL;
+ }
+
+ rc = fadump_build_cpu_notes(fdm_active);
+ if (rc)
+ return rc;
+
+ /*
+ * We are done validating dump info and elfcore header is now ready
+ * to be exported. set elfcorehdr_addr so that vmcore module will
+ * export the elfcore header through '/proc/vmcore'.
+ */
+ elfcorehdr_addr = fdh->elfcorehdr_addr;
+
+ return 0;
+}
+
+static inline void fadump_add_crash_memory(unsigned long long base,
+ unsigned long long end)
+{
+ if (base == end)
+ return;
+
+ pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
+ crash_mem_ranges, base, end - 1, (end - base));
+ crash_memory_ranges[crash_mem_ranges].base = base;
+ crash_memory_ranges[crash_mem_ranges].size = end - base;
+ crash_mem_ranges++;
+}
+
+static void fadump_exclude_reserved_area(unsigned long long start,
+ unsigned long long end)
+{
+ unsigned long long ra_start, ra_end;
+
+ ra_start = fw_dump.reserve_dump_area_start;
+ ra_end = ra_start + fw_dump.reserve_dump_area_size;
+
+ if ((ra_start < end) && (ra_end > start)) {
+ if ((start < ra_start) && (end > ra_end)) {
+ fadump_add_crash_memory(start, ra_start);
+ fadump_add_crash_memory(ra_end, end);
+ } else if (start < ra_start) {
+ fadump_add_crash_memory(start, ra_start);
+ } else if (ra_end < end) {
+ fadump_add_crash_memory(ra_end, end);
+ }
+ } else
+ fadump_add_crash_memory(start, end);
+}
+
+static int fadump_init_elfcore_header(char *bufp)
+{
+ struct elfhdr *elf;
+
+ elf = (struct elfhdr *) bufp;
+ bufp += sizeof(struct elfhdr);
+ memcpy(elf->e_ident, ELFMAG, SELFMAG);
+ elf->e_ident[EI_CLASS] = ELF_CLASS;
+ elf->e_ident[EI_DATA] = ELF_DATA;
+ elf->e_ident[EI_VERSION] = EV_CURRENT;
+ elf->e_ident[EI_OSABI] = ELF_OSABI;
+ memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
+ elf->e_type = ET_CORE;
+ elf->e_machine = ELF_ARCH;
+ elf->e_version = EV_CURRENT;
+ elf->e_entry = 0;
+ elf->e_phoff = sizeof(struct elfhdr);
+ elf->e_shoff = 0;
+ elf->e_flags = ELF_CORE_EFLAGS;
+ elf->e_ehsize = sizeof(struct elfhdr);
+ elf->e_phentsize = sizeof(struct elf_phdr);
+ elf->e_phnum = 0;
+ elf->e_shentsize = 0;
+ elf->e_shnum = 0;
+ elf->e_shstrndx = 0;
+
+ return 0;
+}
+
+/*
+ * Traverse through memblock structure and setup crash memory ranges. These
+ * ranges will be used create PT_LOAD program headers in elfcore header.
+ */
+static void fadump_setup_crash_memory_ranges(void)
+{
+ struct memblock_region *reg;
+ unsigned long long start, end;
+
+ pr_debug("Setup crash memory ranges.\n");
+ crash_mem_ranges = 0;
+ /*
+ * add the first memory chunk (RMA_START through boot_memory_size) as
+ * a separate memory chunk. The reason is, at the time crash firmware
+ * will move the content of this memory chunk to different location
+ * specified during fadump registration. We need to create a separate
+ * program header for this chunk with the correct offset.
+ */
+ fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+
+ for_each_memblock(memory, reg) {
+ start = (unsigned long long)reg->base;
+ end = start + (unsigned long long)reg->size;
+ if (start == RMA_START && end >= fw_dump.boot_memory_size)
+ start = fw_dump.boot_memory_size;
+
+ /* add this range excluding the reserved dump area. */
+ fadump_exclude_reserved_area(start, end);
+ }
+}
+
+/*
+ * If the given physical address falls within the boot memory region then
+ * return the relocated address that points to the dump region reserved
+ * for saving initial boot memory contents.
+ */
+static inline unsigned long fadump_relocate(unsigned long paddr)
+{
+ if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
+ return fdm.rmr_region.destination_address + paddr;
+ else
+ return paddr;
+}
+
+static int fadump_create_elfcore_headers(char *bufp)
+{
+ struct elfhdr *elf;
+ struct elf_phdr *phdr;
+ int i;
+
+ fadump_init_elfcore_header(bufp);
+ elf = (struct elfhdr *)bufp;
+ bufp += sizeof(struct elfhdr);
+
+ /*
+ * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
+ * will be populated during second kernel boot after crash. Hence
+ * this PT_NOTE will always be the first elf note.
+ *
+ * NOTE: Any new ELF note addition should be placed after this note.
+ */
+ phdr = (struct elf_phdr *)bufp;
+ bufp += sizeof(struct elf_phdr);
+ phdr->p_type = PT_NOTE;
+ phdr->p_flags = 0;
+ phdr->p_vaddr = 0;
+ phdr->p_align = 0;
+
+ phdr->p_offset = 0;
+ phdr->p_paddr = 0;
+ phdr->p_filesz = 0;
+ phdr->p_memsz = 0;
+
+ (elf->e_phnum)++;
+
+ /* setup ELF PT_NOTE for vmcoreinfo */
+ phdr = (struct elf_phdr *)bufp;
+ bufp += sizeof(struct elf_phdr);
+ phdr->p_type = PT_NOTE;
+ phdr->p_flags = 0;
+ phdr->p_vaddr = 0;
+ phdr->p_align = 0;
+
+ phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
+ phdr->p_offset = phdr->p_paddr;
+ phdr->p_memsz = vmcoreinfo_max_size;
+ phdr->p_filesz = vmcoreinfo_max_size;
+
+ /* Increment number of program headers. */
+ (elf->e_phnum)++;
+
+ /* setup PT_LOAD sections. */
+
+ for (i = 0; i < crash_mem_ranges; i++) {
+ unsigned long long mbase, msize;
+ mbase = crash_memory_ranges[i].base;
+ msize = crash_memory_ranges[i].size;
+
+ if (!msize)
+ continue;
+
+ phdr = (struct elf_phdr *)bufp;
+ bufp += sizeof(struct elf_phdr);
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R|PF_W|PF_X;
+ phdr->p_offset = mbase;
+
+ if (mbase == RMA_START) {
+ /*
+ * The entire RMA region will be moved by firmware
+ * to the specified destination_address. Hence set
+ * the correct offset.
+ */
+ phdr->p_offset = fdm.rmr_region.destination_address;
+ }
+
+ phdr->p_paddr = mbase;
+ phdr->p_vaddr = (unsigned long)__va(mbase);
+ phdr->p_filesz = msize;
+ phdr->p_memsz = msize;
+ phdr->p_align = 0;
+
+ /* Increment number of program headers. */
+ (elf->e_phnum)++;
+ }
+ return 0;
+}
+
+static unsigned long init_fadump_header(unsigned long addr)
+{
+ struct fadump_crash_info_header *fdh;
+
+ if (!addr)
+ return 0;
+
+ fw_dump.fadumphdr_addr = addr;
+ fdh = __va(addr);
+ addr += sizeof(struct fadump_crash_info_header);
+
+ memset(fdh, 0, sizeof(struct fadump_crash_info_header));
+ fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
+ fdh->elfcorehdr_addr = addr;
+ /* We will set the crashing cpu id in crash_fadump() during crash. */
+ fdh->crashing_cpu = CPU_UNKNOWN;
+
+ return addr;
+}
+
+static void register_fadump(void)
+{
+ unsigned long addr;
+ void *vaddr;
+
+ /*
+ * If no memory is reserved then we can not register for firmware-
+ * assisted dump.
+ */
+ if (!fw_dump.reserve_dump_area_size)
+ return;
+
+ fadump_setup_crash_memory_ranges();
+
+ addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len;
+ /* Initialize fadump crash info header. */
+ addr = init_fadump_header(addr);
+ vaddr = __va(addr);
+
+ pr_debug("Creating ELF core headers at %#016lx\n", addr);
+ fadump_create_elfcore_headers(vaddr);
+
+ /* register the future kernel dump with firmware. */
+ register_fw_dump(&fdm);
+}
+
+static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
+{
+ int rc = 0;
+ unsigned int wait_time;
+
+ pr_debug("Un-register firmware-assisted dump\n");
+
+ /* TODO: Add upper time limit for the delay */
+ do {
+ rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
+ FADUMP_UNREGISTER, fdm,
+ sizeof(struct fadump_mem_struct));
+
+ wait_time = rtas_busy_delay_time(rc);
+ if (wait_time)
+ mdelay(wait_time);
+ } while (wait_time);
+
+ if (rc) {
+ printk(KERN_ERR "Failed to un-register firmware-assisted dump."
+ " unexpected error(%d).\n", rc);
+ return rc;
+ }
+ fw_dump.dump_registered = 0;
+ return 0;
+}
+
+static int fadump_invalidate_dump(struct fadump_mem_struct *fdm)
+{
+ int rc = 0;
+ unsigned int wait_time;
+
+ pr_debug("Invalidating firmware-assisted dump registration\n");
+
+ /* TODO: Add upper time limit for the delay */
+ do {
+ rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
+ FADUMP_INVALIDATE, fdm,
+ sizeof(struct fadump_mem_struct));
+
+ wait_time = rtas_busy_delay_time(rc);
+ if (wait_time)
+ mdelay(wait_time);
+ } while (wait_time);
+
+ if (rc) {
+ printk(KERN_ERR "Failed to invalidate firmware-assisted dump "
+ "rgistration. unexpected error(%d).\n", rc);
+ return rc;
+ }
+ fw_dump.dump_active = 0;
+ fdm_active = NULL;
+ return 0;
+}
+
+void fadump_cleanup(void)
+{
+ /* Invalidate the registration only if dump is active. */
+ if (fw_dump.dump_active) {
+ init_fadump_mem_struct(&fdm,
+ fdm_active->cpu_state_data.destination_address);
+ fadump_invalidate_dump(&fdm);
+ }
+}
+
+/*
+ * Release the memory that was reserved in early boot to preserve the memory
+ * contents. The released memory will be available for general use.
+ */
+static void fadump_release_memory(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+ unsigned long ra_start, ra_end;
+
+ ra_start = fw_dump.reserve_dump_area_start;
+ ra_end = ra_start + fw_dump.reserve_dump_area_size;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ /*
+ * exclude the dump reserve area. Will reuse it for next
+ * fadump registration.
+ */
+ if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
+ continue;
+
+ ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+ init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+ free_page((unsigned long)__va(addr));
+ totalram_pages++;
+ }
+}
+
+static void fadump_invalidate_release_mem(void)
+{
+ unsigned long reserved_area_start, reserved_area_end;
+ unsigned long destination_address;
+
+ mutex_lock(&fadump_mutex);
+ if (!fw_dump.dump_active) {
+ mutex_unlock(&fadump_mutex);
+ return;
+ }
+
+ destination_address = fdm_active->cpu_state_data.destination_address;
+ fadump_cleanup();
+ mutex_unlock(&fadump_mutex);
+
+ /*
+ * Save the current reserved memory bounds we will require them
+ * later for releasing the memory for general use.
+ */
+ reserved_area_start = fw_dump.reserve_dump_area_start;
+ reserved_area_end = reserved_area_start +
+ fw_dump.reserve_dump_area_size;
+ /*
+ * Setup reserve_dump_area_start and its size so that we can
+ * reuse this reserved memory for Re-registration.
+ */
+ fw_dump.reserve_dump_area_start = destination_address;
+ fw_dump.reserve_dump_area_size = get_fadump_area_size();
+
+ fadump_release_memory(reserved_area_start, reserved_area_end);
+ if (fw_dump.cpu_notes_buf) {
+ fadump_cpu_notes_buf_free(
+ (unsigned long)__va(fw_dump.cpu_notes_buf),
+ fw_dump.cpu_notes_buf_size);
+ fw_dump.cpu_notes_buf = 0;
+ fw_dump.cpu_notes_buf_size = 0;
+ }
+ /* Initialize the kernel dump memory structure for FAD registration. */
+ init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
+}
+
+static ssize_t fadump_release_memory_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (!fw_dump.dump_active)
+ return -EPERM;
+
+ if (buf[0] == '1') {
+ /*
+ * Take away the '/proc/vmcore'. We are releasing the dump
+ * memory, hence it will not be valid anymore.
+ */
+ vmcore_cleanup();
+ fadump_invalidate_release_mem();
+
+ } else
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t fadump_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
+}
+
+static ssize_t fadump_register_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", fw_dump.dump_registered);
+}
+
+static ssize_t fadump_register_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+
+ if (!fw_dump.fadump_enabled || fdm_active)
+ return -EPERM;
+
+ mutex_lock(&fadump_mutex);
+
+ switch (buf[0]) {
+ case '0':
+ if (fw_dump.dump_registered == 0) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ /* Un-register Firmware-assisted dump */
+ fadump_unregister_dump(&fdm);
+ break;
+ case '1':
+ if (fw_dump.dump_registered == 1) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+ /* Register Firmware-assisted dump */
+ register_fadump();
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+unlock_out:
+ mutex_unlock(&fadump_mutex);
+ return ret < 0 ? ret : count;
+}
+
+static int fadump_region_show(struct seq_file *m, void *private)
+{
+ const struct fadump_mem_struct *fdm_ptr;
+
+ if (!fw_dump.fadump_enabled)
+ return 0;
+
+ mutex_lock(&fadump_mutex);
+ if (fdm_active)
+ fdm_ptr = fdm_active;
+ else {
+ mutex_unlock(&fadump_mutex);
+ fdm_ptr = &fdm;
+ }
+
+ seq_printf(m,
+ "CPU : [%#016llx-%#016llx] %#llx bytes, "
+ "Dumped: %#llx\n",
+ fdm_ptr->cpu_state_data.destination_address,
+ fdm_ptr->cpu_state_data.destination_address +
+ fdm_ptr->cpu_state_data.source_len - 1,
+ fdm_ptr->cpu_state_data.source_len,
+ fdm_ptr->cpu_state_data.bytes_dumped);
+ seq_printf(m,
+ "HPTE: [%#016llx-%#016llx] %#llx bytes, "
+ "Dumped: %#llx\n",
+ fdm_ptr->hpte_region.destination_address,
+ fdm_ptr->hpte_region.destination_address +
+ fdm_ptr->hpte_region.source_len - 1,
+ fdm_ptr->hpte_region.source_len,
+ fdm_ptr->hpte_region.bytes_dumped);
+ seq_printf(m,
+ "DUMP: [%#016llx-%#016llx] %#llx bytes, "
+ "Dumped: %#llx\n",
+ fdm_ptr->rmr_region.destination_address,
+ fdm_ptr->rmr_region.destination_address +
+ fdm_ptr->rmr_region.source_len - 1,
+ fdm_ptr->rmr_region.source_len,
+ fdm_ptr->rmr_region.bytes_dumped);
+
+ if (!fdm_active ||
+ (fw_dump.reserve_dump_area_start ==
+ fdm_ptr->cpu_state_data.destination_address))
+ goto out;
+
+ /* Dump is active. Show reserved memory region. */
+ seq_printf(m,
+ " : [%#016llx-%#016llx] %#llx bytes, "
+ "Dumped: %#llx\n",
+ (unsigned long long)fw_dump.reserve_dump_area_start,
+ fdm_ptr->cpu_state_data.destination_address - 1,
+ fdm_ptr->cpu_state_data.destination_address -
+ fw_dump.reserve_dump_area_start,
+ fdm_ptr->cpu_state_data.destination_address -
+ fw_dump.reserve_dump_area_start);
+out:
+ if (fdm_active)
+ mutex_unlock(&fadump_mutex);
+ return 0;
+}
+
+static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
+ 0200, NULL,
+ fadump_release_memory_store);
+static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
+ 0444, fadump_enabled_show,
+ NULL);
+static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
+ 0644, fadump_register_show,
+ fadump_register_store);
+
+static int fadump_region_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fadump_region_show, inode->i_private);
+}
+
+static const struct file_operations fadump_region_fops = {
+ .open = fadump_region_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void fadump_init_files(void)
+{
+ struct dentry *debugfs_file;
+ int rc = 0;
+
+ rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
+ if (rc)
+ printk(KERN_ERR "fadump: unable to create sysfs file"
+ " fadump_enabled (%d)\n", rc);
+
+ rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
+ if (rc)
+ printk(KERN_ERR "fadump: unable to create sysfs file"
+ " fadump_registered (%d)\n", rc);
+
+ debugfs_file = debugfs_create_file("fadump_region", 0444,
+ powerpc_debugfs_root, NULL,
+ &fadump_region_fops);
+ if (!debugfs_file)
+ printk(KERN_ERR "fadump: unable to create debugfs file"
+ " fadump_region\n");
+
+ if (fw_dump.dump_active) {
+ rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
+ if (rc)
+ printk(KERN_ERR "fadump: unable to create sysfs file"
+ " fadump_release_mem (%d)\n", rc);
+ }
+ return;
+}
+
+/*
+ * Prepare for firmware-assisted dump.
+ */
+int __init setup_fadump(void)
+{
+ if (!fw_dump.fadump_enabled)
+ return 0;
+
+ if (!fw_dump.fadump_supported) {
+ printk(KERN_ERR "Firmware-assisted dump is not supported on"
+ " this hardware\n");
+ return 0;
+ }
+
+ fadump_show_config();
+ /*
+ * If dump data is available then see if it is valid and prepare for
+ * saving it to the disk.
+ */
+ if (fw_dump.dump_active) {
+ /*
+ * if dump process fails then invalidate the registration
+ * and release memory before proceeding for re-registration.
+ */
+ if (process_fadump(fdm_active) < 0)
+ fadump_invalidate_release_mem();
+ }
+ /* Initialize the kernel dump memory structure for FAD registration. */
+ else if (fw_dump.reserve_dump_area_size)
+ init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
+ fadump_init_files();
+
+ return 1;
+}
+subsys_initcall(setup_fadump);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0654dba2c1f1..dc0488b6f6e1 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -395,7 +395,7 @@ DataAccess:
bl hash_page
1: lwz r5,_DSISR(r11) /* get DSISR value */
mfspr r4,SPRN_DAR
- EXC_XFER_EE_LITE(0x300, handle_page_fault)
+ EXC_XFER_LITE(0x300, handle_page_fault)
/* Instruction access exception. */
@@ -410,7 +410,7 @@ InstructionAccess:
bl hash_page
1: mr r4,r12
mr r5,r9
- EXC_XFER_EE_LITE(0x400, handle_page_fault)
+ EXC_XFER_LITE(0x400, handle_page_fault)
/* External interrupt */
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 872a6af83bad..4989661b710b 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -394,7 +394,7 @@ label:
NORMAL_EXCEPTION_PROLOG
mr r4,r12 /* Pass SRR0 as arg2 */
li r5,0 /* Pass zero as arg3 */
- EXC_XFER_EE_LITE(0x400, handle_page_fault)
+ EXC_XFER_LITE(0x400, handle_page_fault)
/* 0x0500 - External Interrupt Exception */
EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
@@ -747,7 +747,7 @@ DataAccess:
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
- EXC_XFER_EE_LITE(0x300, handle_page_fault)
+ EXC_XFER_LITE(0x300, handle_page_fault)
/* Other PowerPC processors, namely those derived from the 6xx-series
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 06c7251c1bf7..58bddee8e1e8 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -32,13 +32,13 @@
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
-#include <asm/iseries/lpar_map.h>
#include <asm/thread_info.h>
#include <asm/firmware.h>
#include <asm/page_64.h>
#include <asm/irqflags.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/ptrace.h>
+#include <asm/hw_irq.h>
/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -57,10 +57,6 @@
* entry in r9 for debugging purposes
* 2. Secondary processors enter at 0x60 with PIR in gpr3
*
- * For iSeries:
- * 1. The MMU is on (as it always is for iSeries)
- * 2. The kernel is entered at system_reset_iSeries
- *
* For Book3E processors:
* 1. The MMU is on running in AS0 in a state defined in ePAPR
* 2. The kernel is entered at __start
@@ -93,15 +89,6 @@ __secondary_hold_spinloop:
__secondary_hold_acknowledge:
.llong 0x0
-#ifdef CONFIG_PPC_ISERIES
- /*
- * At offset 0x20, there is a pointer to iSeries LPAR data.
- * This is required by the hypervisor
- */
- . = 0x20
- .llong hvReleaseData-KERNELBASE
-#endif /* CONFIG_PPC_ISERIES */
-
#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
* at the loaded address instead of the linked address. This
@@ -564,7 +551,8 @@ _GLOBAL(pmac_secondary_start)
*/
li r0,0
stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
/* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
@@ -582,7 +570,7 @@ _GLOBAL(pmac_secondary_start)
* 1. Processor number
* 2. Segment table pointer (virtual address)
* On entry the following are set:
- * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
+ * r1 = stack pointer (real addr of temp stack)
* r24 = cpu# (in Linux terms)
* r13 = paca virtual address
* SPRG_PACA = paca virtual address
@@ -595,7 +583,7 @@ __secondary_start:
/* Set thread priority to MEDIUM */
HMT_MEDIUM
- /* Initialize the kernel stack. Just a repeat for iSeries. */
+ /* Initialize the kernel stack */
LOAD_REG_ADDR(r3, current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
ldx r14,r3,r28
@@ -615,20 +603,16 @@ __secondary_start:
li r7,0
mtlr r7
+ /* Mark interrupts soft and hard disabled (they might be enabled
+ * in the PACA when doing hotplug)
+ */
+ stb r7,PACASOFTIRQEN(r13)
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
+
/* enable MMU and jump to start_secondary */
LOAD_REG_ADDR(r3, .start_secondary_prolog)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- ori r4,r4,MSR_EE
- li r8,1
- stb r8,PACAHARDIRQEN(r13)
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
- stb r7,PACAHARDIRQEN(r13)
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
- stb r7,PACASOFTIRQEN(r13)
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
@@ -771,22 +755,18 @@ _INIT_GLOBAL(start_here_common)
/* Load the TOC (virtual address) */
ld r2,PACATOC(r13)
+ /* Do more system initializations in virtual mode */
bl .setup_system
- /* Load up the kernel context */
-5:
- li r5,0
- stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- mfmsr r5
- ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/
- mtmsrd r5
- li r5,1
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
- stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */
+ /* Mark interrupts soft and hard disabled (they might be enabled
+ * in the PACA when doing hotplug)
+ */
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13)
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
+ /* Generic kernel entry */
bl .start_kernel
/* Not reached */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index b68cb173ba2c..b2a5860accfb 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -220,7 +220,7 @@ DataAccess:
mfspr r4,SPRN_DAR
li r10,0x00f0
mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
- EXC_XFER_EE_LITE(0x300, handle_page_fault)
+ EXC_XFER_LITE(0x300, handle_page_fault)
/* Instruction access exception.
* This is "never generated" by the MPC8xx. We jump to it for other
@@ -231,7 +231,7 @@ InstructionAccess:
EXCEPTION_PROLOG
mr r4,r12
mr r5,r9
- EXC_XFER_EE_LITE(0x400, handle_page_fault)
+ EXC_XFER_LITE(0x400, handle_page_fault)
/* External interrupt */
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index fc921bf62e15..0e4175388f47 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -359,7 +359,7 @@ label:
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
- EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+ EXC_XFER_LITE(0x0300, handle_page_fault)
#define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \
@@ -368,7 +368,7 @@ label:
stw r5,_ESR(r11); \
mr r4,r12; /* Pass SRR0 as arg2 */ \
li r5,0; /* Pass zero as arg3 */ \
- EXC_XFER_EE_LITE(0x0400, handle_page_fault)
+ EXC_XFER_LITE(0x0400, handle_page_fault)
#define ALIGNMENT_EXCEPTION \
START_EXCEPTION(Alignment) \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index d5d78c4ceef6..28e62598d0e8 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -319,7 +319,7 @@ interrupt_base:
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
bne 1f
- EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+ EXC_XFER_LITE(0x0300, handle_page_fault)
1:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x0300, CacheLockingException)
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index d39ae606ff8d..79bb282e6501 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -713,7 +713,7 @@ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
struct bus_type ibmebus_bus_type = {
.name = "ibmebus",
- .uevent = of_device_uevent,
+ .uevent = of_device_uevent_modalias,
.bus_attrs = ibmebus_bus_attrs,
.match = ibmebus_bus_bus_match,
.probe = ibmebus_bus_device_probe,
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 0a48bf5db6c8..e8e821146f38 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -84,7 +84,11 @@ void cpu_idle(void)
start_critical_timings();
- local_irq_enable();
+ /* Some power_save functions return with
+ * interrupts enabled, some don't.
+ */
+ if (irqs_disabled())
+ local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
} else {
@@ -101,11 +105,11 @@ void cpu_idle(void)
ppc64_runlatch_on();
rcu_idle_exit();
tick_nohz_idle_exit();
- preempt_enable_no_resched();
- if (cpu_should_die())
+ if (cpu_should_die()) {
+ sched_preempt_enable_no_resched();
cpu_die();
- schedule();
- preempt_disable();
+ }
+ schedule_preempt_disabled();
}
}
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index 16c002d6bdf1..ff007b59448d 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -29,43 +29,30 @@ _GLOBAL(book3e_idle)
wrteei 0
/* Now check if an interrupt came in while we were soft disabled
- * since we may otherwise lose it (doorbells etc...). We know
- * that since PACAHARDIRQEN will have been cleared in that case.
+ * since we may otherwise lose it (doorbells etc...).
*/
- lbz r3,PACAHARDIRQEN(r13)
+ lbz r3,PACAIRQHAPPENED(r13)
cmpwi cr0,r3,0
- beqlr
+ bnelr
- /* Now we are going to mark ourselves as soft and hard enables in
+ /* Now we are going to mark ourselves as soft and hard enabled in
* order to be able to take interrupts while asleep. We inform lockdep
* of that. We don't actually turn interrupts on just yet tho.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
stdu r1,-128(r1)
bl .trace_hardirqs_on
+ addi r1,r1,128
#endif
li r0,1
stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
/* Interrupts will make use return to LR, so get something we want
* in there
*/
bl 1f
- /* Hard disable interrupts again */
- wrteei 0
-
- /* Mark them off again in the PACA as well */
- li r0,0
- stb r0,PACASOFTIRQEN(r13)
- stb r0,PACAHARDIRQEN(r13)
-
- /* Tell lockdep about it */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_off
- addi r1,r1,128
-#endif
+ /* And return (interrupts are on) */
ld r0,16(r1)
mtlr r0
blr
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index ba3195478600..2c71b0fc9f91 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -14,6 +14,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/irqflags.h>
#undef DEBUG
@@ -29,14 +30,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
cmpwi 0,r4,0
beqlr
- /* Go to NAP now */
+ /* Hard disable interrupts */
mfmsr r7
rldicl r0,r7,48,1
rotldi r0,r0,16
- mtmsrd r0,1 /* hard-disable interrupts */
+ mtmsrd r0,1
+
+ /* Check if something happened while soft-disabled */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ bnelr
+
+ /* Soft-enable interrupts */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-128(r1)
+ bl .trace_hardirqs_on
+ addi r1,r1,128
+ ld r0,16(r1)
+ mtlr r0
+ mfmsr r7
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
li r0,1
stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
- stb r0,PACAHARDIRQEN(r13)
BEGIN_FTR_SECTION
DSSALL
sync
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index fcdff198da4b..0cdc9a392839 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -1,5 +1,5 @@
/*
- * This file contains the power_save function for 970-family CPUs.
+ * This file contains the power_save function for Power7 CPUs.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -15,6 +15,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ppc-opcode.h>
+#include <asm/hw_irq.h>
#undef DEBUG
@@ -51,9 +52,25 @@ _GLOBAL(power7_idle)
rldicl r9,r9,48,1
rotldi r9,r9,16
mtmsrd r9,1 /* hard-disable interrupts */
+
+ /* Check if something happened while soft-disabled */
+ lbz r0,PACAIRQHAPPENED(r13)
+ cmpwi cr0,r0,0
+ beq 1f
+ addi r1,r1,INT_FRAME_SIZE
+ ld r0,16(r1)
+ mtlr r0
+ blr
+
+1: /* We mark irqs hard disabled as this is the state we'll
+ * be in when returning and we need to tell arch_local_irq_restore()
+ * about it
+ */
+ li r0,PACA_IRQ_HARD_DIS
+ stb r0,PACAIRQHAPPENED(r13)
+
+ /* We haven't lost state ... yet */
li r0,0
- stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
- stb r0,PACAHARDIRQEN(r13)
stb r0,PACA_NAPSTATELOST(r13)
/* Continue saving state */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0cfcf98aafca..359f078571c7 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -39,6 +39,7 @@
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/kdump.h>
+#include <asm/fadump.h>
#define DBG(...)
@@ -445,7 +446,12 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
static void iommu_table_clear(struct iommu_table *tbl)
{
- if (!is_kdump_kernel()) {
+ /*
+ * In case of firmware assisted dump system goes through clean
+ * reboot process at the time of system crash. Hence it's safe to
+ * clear the TCE entries if firmware assisted dump is active.
+ */
+ if (!is_kdump_kernel() || is_fadump_active()) {
/* Clear the table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
return;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 701d4aceb4f4..cea2d9f3ae4e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -93,20 +93,16 @@ extern int tau_interrupts(int);
#ifdef CONFIG_PPC64
-#ifndef CONFIG_SPARSE_IRQ
-EXPORT_SYMBOL(irq_desc);
-#endif
-
int distribute_irqs = 1;
-static inline notrace unsigned long get_hard_enabled(void)
+static inline notrace unsigned long get_irq_happened(void)
{
- unsigned long enabled;
+ unsigned long happened;
__asm__ __volatile__("lbz %0,%1(13)"
- : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
+ : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
- return enabled;
+ return happened;
}
static inline notrace void set_soft_enabled(unsigned long enable)
@@ -115,84 +111,162 @@ static inline notrace void set_soft_enabled(unsigned long enable)
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
}
-static inline notrace void decrementer_check_overflow(void)
+static inline notrace int decrementer_check_overflow(void)
{
- u64 now = get_tb_or_rtc();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
-
+ u64 now = get_tb_or_rtc();
+ u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+
if (now >= *next_tb)
set_dec(1);
+ return now >= *next_tb;
}
-notrace void arch_local_irq_restore(unsigned long en)
+/* This is called whenever we are re-enabling interrupts
+ * and returns either 0 (nothing to do) or 500/900 if there's
+ * either an EE or a DEC to generate.
+ *
+ * This is called in two contexts: From arch_local_irq_restore()
+ * before soft-enabling interrupts, and from the exception exit
+ * path when returning from an interrupt from a soft-disabled to
+ * a soft enabled context. In both case we have interrupts hard
+ * disabled.
+ *
+ * We take care of only clearing the bits we handled in the
+ * PACA irq_happened field since we can only re-emit one at a
+ * time and we don't want to "lose" one.
+ */
+notrace unsigned int __check_irq_replay(void)
{
/*
- * get_paca()->soft_enabled = en;
- * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
- * That was allowed before, and in such a case we do need to take care
- * that gcc will set soft_enabled directly via r13, not choose to use
- * an intermediate register, lest we're preempted to a different cpu.
+ * We use local_paca rather than get_paca() to avoid all
+ * the debug_smp_processor_id() business in this low level
+ * function
*/
- set_soft_enabled(en);
- if (!en)
- return;
+ unsigned char happened = local_paca->irq_happened;
-#ifdef CONFIG_PPC_STD_MMU_64
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- /*
- * Do we need to disable preemption here? Not really: in the
- * unlikely event that we're preempted to a different cpu in
- * between getting r13, loading its lppaca_ptr, and loading
- * its any_int, we might call iseries_handle_interrupts without
- * an interrupt pending on the new cpu, but that's no disaster,
- * is it? And the business of preempting us off the old cpu
- * would itself involve a local_irq_restore which handles the
- * interrupt to that cpu.
- *
- * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
- * to avoid any preemption checking added into get_paca().
- */
- if (local_paca->lppaca_ptr->int_dword.any_int)
- iseries_handle_interrupts();
+ /* Clear bit 0 which we wouldn't clear otherwise */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
+ /*
+ * Force the delivery of pending soft-disabled interrupts on PS3.
+ * Any HV call will have this side effect.
+ */
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
+ u64 tmp, tmp2;
+ lv1_get_version_info(&tmp, &tmp2);
}
-#endif /* CONFIG_PPC_STD_MMU_64 */
/*
- * if (get_paca()->hard_enabled) return;
- * But again we need to take care that gcc gets hard_enabled directly
- * via r13, not choose to use an intermediate register, lest we're
- * preempted to a different cpu in between the two instructions.
+ * We may have missed a decrementer interrupt. We check the
+ * decrementer itself rather than the paca irq_happened field
+ * in case we also had a rollover while hard disabled
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
+ if (decrementer_check_overflow())
+ return 0x900;
+
+ /* Finally check if an external interrupt happened */
+ local_paca->irq_happened &= ~PACA_IRQ_EE;
+ if (happened & PACA_IRQ_EE)
+ return 0x500;
+
+#ifdef CONFIG_PPC_BOOK3E
+ /* Finally check if an EPR external interrupt happened
+ * this bit is typically set if we need to handle another
+ * "edge" interrupt from within the MPIC "EPR" handler
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
+ if (happened & PACA_IRQ_EE_EDGE)
+ return 0x500;
+
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
+ if (happened & PACA_IRQ_DBELL)
+ return 0x280;
+#endif /* CONFIG_PPC_BOOK3E */
+
+ /* There should be nothing left ! */
+ BUG_ON(local_paca->irq_happened != 0);
+
+ return 0;
+}
+
+notrace void arch_local_irq_restore(unsigned long en)
+{
+ unsigned char irq_happened;
+ unsigned int replay;
+
+ /* Write the new soft-enabled value */
+ set_soft_enabled(en);
+ if (!en)
+ return;
+ /*
+ * From this point onward, we can take interrupts, preempt,
+ * etc... unless we got hard-disabled. We check if an event
+ * happened. If none happened, we know we can just return.
+ *
+ * We may have preempted before the check below, in which case
+ * we are checking the "new" CPU instead of the old one. This
+ * is only a problem if an event happened on the "old" CPU.
+ *
+ * External interrupt events will have caused interrupts to
+ * be hard-disabled, so there is no problem, we
+ * cannot have preempted.
*/
- if (get_hard_enabled())
+ irq_happened = get_irq_happened();
+ if (!irq_happened)
return;
/*
- * Need to hard-enable interrupts here. Since currently disabled,
- * no need to take further asm precautions against preemption; but
- * use local_paca instead of get_paca() to avoid preemption checking.
+ * We need to hard disable to get a trusted value from
+ * __check_irq_replay(). We also need to soft-disable
+ * again to avoid warnings in there due to the use of
+ * per-cpu variables.
+ *
+ * We know that if the value in irq_happened is exactly 0x01
+ * then we are already hard disabled (there are other less
+ * common cases that we'll ignore for now), so we skip the
+ * (expensive) mtmsrd.
*/
- local_paca->hard_enabled = en;
+ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
+ __hard_irq_disable();
+ set_soft_enabled(0);
/*
- * Trigger the decrementer if we have a pending event. Some processors
- * only trigger on edge transitions of the sign bit. We might also
- * have disabled interrupts long enough that the decrementer wrapped
- * to positive.
+ * Check if anything needs to be re-emitted. We haven't
+ * soft-enabled yet to avoid warnings in decrementer_check_overflow
+ * accessing per-cpu variables
*/
- decrementer_check_overflow();
+ replay = __check_irq_replay();
+
+ /* We can soft-enable now */
+ set_soft_enabled(1);
/*
- * Force the delivery of pending soft-disabled interrupts on PS3.
- * Any HV call will have this side effect.
+ * And replay if we have to. This will return with interrupts
+ * hard-enabled.
*/
- if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- u64 tmp, tmp2;
- lv1_get_version_info(&tmp, &tmp2);
+ if (replay) {
+ __replay_interrupt(replay);
+ return;
}
+ /* Finally, let's ensure we are hard enabled */
__hard_irq_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
+
+/*
+ * This is specifically called by assembly code to re-enable interrupts
+ * if they are currently disabled. This is typically called before
+ * schedule() or do_signal() when returning to userspace. We do it
+ * in C to avoid the burden of dealing with lockdep etc...
+ */
+void restore_interrupts(void)
+{
+ if (irqs_disabled())
+ local_irq_enable();
+}
+
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
@@ -360,25 +434,25 @@ void do_IRQ(struct pt_regs *regs)
check_stack_overflow();
+ /*
+ * Query the platform PIC for the interrupt & ack it.
+ *
+ * This will typically lower the interrupt line to the CPU
+ */
irq = ppc_md.get_irq();
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
+ /* We can hard enable interrupts now */
+ may_hard_irq_enable();
+
+ /* And finally process it */
+ if (irq != NO_IRQ)
handle_one_irq(irq);
- else if (irq != NO_IRQ_IGNORE)
+ else
__get_cpu_var(irq_stat).spurious_irqs++;
irq_exit();
set_irq_regs(old_regs);
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES) &&
- get_lppaca()->int_dword.fields.decr_int) {
- get_lppaca()->int_dword.fields.decr_int = 0;
- /* Signal a fake decrementer interrupt */
- timer_interrupt(regs);
- }
-#endif
-
trace_irq_exit(regs);
}
@@ -486,409 +560,19 @@ void do_softirq(void)
local_irq_restore(flags);
}
-
-/*
- * IRQ controller and virtual interrupts
- */
-
-/* The main irq map itself is an array of NR_IRQ entries containing the
- * associate host and irq number. An entry with a host of NULL is free.
- * An entry can be allocated if it's free, the allocator always then sets
- * hwirq first to the host's invalid irq number and then fills ops.
- */
-struct irq_map_entry {
- irq_hw_number_t hwirq;
- struct irq_host *host;
-};
-
-static LIST_HEAD(irq_hosts);
-static DEFINE_RAW_SPINLOCK(irq_big_lock);
-static DEFINE_MUTEX(revmap_trees_mutex);
-static struct irq_map_entry irq_map[NR_IRQS];
-static unsigned int irq_virq_count = NR_IRQS;
-static struct irq_host *irq_default_host;
-
irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
- return irq_map[d->irq].hwirq;
+ return d->hwirq;
}
EXPORT_SYMBOL_GPL(irqd_to_hwirq);
irq_hw_number_t virq_to_hw(unsigned int virq)
{
- return irq_map[virq].hwirq;
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+ return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);
-bool virq_is_host(unsigned int virq, struct irq_host *host)
-{
- return irq_map[virq].host == host;
-}
-EXPORT_SYMBOL_GPL(virq_is_host);
-
-static int default_irq_host_match(struct irq_host *h, struct device_node *np)
-{
- return h->of_node != NULL && h->of_node == np;
-}
-
-struct irq_host *irq_alloc_host(struct device_node *of_node,
- unsigned int revmap_type,
- unsigned int revmap_arg,
- struct irq_host_ops *ops,
- irq_hw_number_t inval_irq)
-{
- struct irq_host *host;
- unsigned int size = sizeof(struct irq_host);
- unsigned int i;
- unsigned int *rmap;
- unsigned long flags;
-
- /* Allocate structure and revmap table if using linear mapping */
- if (revmap_type == IRQ_HOST_MAP_LINEAR)
- size += revmap_arg * sizeof(unsigned int);
- host = kzalloc(size, GFP_KERNEL);
- if (host == NULL)
- return NULL;
-
- /* Fill structure */
- host->revmap_type = revmap_type;
- host->inval_irq = inval_irq;
- host->ops = ops;
- host->of_node = of_node_get(of_node);
-
- if (host->ops->match == NULL)
- host->ops->match = default_irq_host_match;
-
- raw_spin_lock_irqsave(&irq_big_lock, flags);
-
- /* If it's a legacy controller, check for duplicates and
- * mark it as allocated (we use irq 0 host pointer for that
- */
- if (revmap_type == IRQ_HOST_MAP_LEGACY) {
- if (irq_map[0].host != NULL) {
- raw_spin_unlock_irqrestore(&irq_big_lock, flags);
- of_node_put(host->of_node);
- kfree(host);
- return NULL;
- }
- irq_map[0].host = host;
- }
-
- list_add(&host->link, &irq_hosts);
- raw_spin_unlock_irqrestore(&irq_big_lock, flags);
-
- /* Additional setups per revmap type */
- switch(revmap_type) {
- case IRQ_HOST_MAP_LEGACY:
- /* 0 is always the invalid number for legacy */
- host->inval_irq = 0;
- /* setup us as the host for all legacy interrupts */
- for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
- irq_map[i].hwirq = i;
- smp_wmb();
- irq_map[i].host = host;
- smp_wmb();
-
- /* Legacy flags are left to default at this point,
- * one can then use irq_create_mapping() to
- * explicitly change them
- */
- ops->map(host, i, i);
-
- /* Clear norequest flags */
- irq_clear_status_flags(i, IRQ_NOREQUEST);
- }
- break;
- case IRQ_HOST_MAP_LINEAR:
- rmap = (unsigned int *)(host + 1);
- for (i = 0; i < revmap_arg; i++)
- rmap[i] = NO_IRQ;
- host->revmap_data.linear.size = revmap_arg;
- smp_wmb();
- host->revmap_data.linear.revmap = rmap;
- break;
- case IRQ_HOST_MAP_TREE:
- INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
- break;
- default:
- break;
- }
-
- pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
-
- return host;
-}
-
-struct irq_host *irq_find_host(struct device_node *node)
-{
- struct irq_host *h, *found = NULL;
- unsigned long flags;
-
- /* We might want to match the legacy controller last since
- * it might potentially be set to match all interrupts in
- * the absence of a device node. This isn't a problem so far
- * yet though...
- */
- raw_spin_lock_irqsave(&irq_big_lock, flags);
- list_for_each_entry(h, &irq_hosts, link)
- if (h->ops->match(h, node)) {
- found = h;
- break;
- }
- raw_spin_unlock_irqrestore(&irq_big_lock, flags);
- return found;
-}
-EXPORT_SYMBOL_GPL(irq_find_host);
-
-void irq_set_default_host(struct irq_host *host)
-{
- pr_debug("irq: Default host set to @0x%p\n", host);
-
- irq_default_host = host;
-}
-
-void irq_set_virq_count(unsigned int count)
-{
- pr_debug("irq: Trying to set virq count to %d\n", count);
-
- BUG_ON(count < NUM_ISA_INTERRUPTS);
- if (count < NR_IRQS)
- irq_virq_count = count;
-}
-
-static int irq_setup_virq(struct irq_host *host, unsigned int virq,
- irq_hw_number_t hwirq)
-{
- int res;
-
- res = irq_alloc_desc_at(virq, 0);
- if (res != virq) {
- pr_debug("irq: -> allocating desc failed\n");
- goto error;
- }
-
- /* map it */
- smp_wmb();
- irq_map[virq].hwirq = hwirq;
- smp_mb();
-
- if (host->ops->map(host, virq, hwirq)) {
- pr_debug("irq: -> mapping failed, freeing\n");
- goto errdesc;
- }
-
- irq_clear_status_flags(virq, IRQ_NOREQUEST);
-
- return 0;
-
-errdesc:
- irq_free_descs(virq, 1);
-error:
- irq_free_virt(virq, 1);
- return -1;
-}
-
-unsigned int irq_create_direct_mapping(struct irq_host *host)
-{
- unsigned int virq;
-
- if (host == NULL)
- host = irq_default_host;
-
- BUG_ON(host == NULL);
- WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
-
- virq = irq_alloc_virt(host, 1, 0);
- if (virq == NO_IRQ) {
- pr_debug("irq: create_direct virq allocation failed\n");
- return NO_IRQ;
- }
-
- pr_debug("irq: create_direct obtained virq %d\n", virq);
-
- if (irq_setup_virq(host, virq, virq))
- return NO_IRQ;
-
- return virq;
-}
-
-unsigned int irq_create_mapping(struct irq_host *host,
- irq_hw_number_t hwirq)
-{
- unsigned int virq, hint;
-
- pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
-
- /* Look for default host if nececssary */
- if (host == NULL)
- host = irq_default_host;
- if (host == NULL) {
- printk(KERN_WARNING "irq_create_mapping called for"
- " NULL host, hwirq=%lx\n", hwirq);
- WARN_ON(1);
- return NO_IRQ;
- }
- pr_debug("irq: -> using host @%p\n", host);
-
- /* Check if mapping already exists */
- virq = irq_find_mapping(host, hwirq);
- if (virq != NO_IRQ) {
- pr_debug("irq: -> existing mapping on virq %d\n", virq);
- return virq;
- }
-
- /* Get a virtual interrupt number */
- if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
- /* Handle legacy */
- virq = (unsigned int)hwirq;
- if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
- return NO_IRQ;
- return virq;
- } else {
- /* Allocate a virtual interrupt number */
- hint = hwirq % irq_virq_count;
- virq = irq_alloc_virt(host, 1, hint);
- if (virq == NO_IRQ) {
- pr_debug("irq: -> virq allocation failed\n");
- return NO_IRQ;
- }
- }
-
- if (irq_setup_virq(host, virq, hwirq))
- return NO_IRQ;
-
- pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
- hwirq, host->of_node ? host->of_node->full_name : "null", virq);
-
- return virq;
-}
-EXPORT_SYMBOL_GPL(irq_create_mapping);
-
-unsigned int irq_create_of_mapping(struct device_node *controller,
- const u32 *intspec, unsigned int intsize)
-{
- struct irq_host *host;
- irq_hw_number_t hwirq;
- unsigned int type = IRQ_TYPE_NONE;
- unsigned int virq;
-
- if (controller == NULL)
- host = irq_default_host;
- else
- host = irq_find_host(controller);
- if (host == NULL) {
- printk(KERN_WARNING "irq: no irq host found for %s !\n",
- controller->full_name);
- return NO_IRQ;
- }
-
- /* If host has no translation, then we assume interrupt line */
- if (host->ops->xlate == NULL)
- hwirq = intspec[0];
- else {
- if (host->ops->xlate(host, controller, intspec, intsize,
- &hwirq, &type))
- return NO_IRQ;
- }
-
- /* Create mapping */
- virq = irq_create_mapping(host, hwirq);
- if (virq == NO_IRQ)
- return virq;
-
- /* Set type if specified and different than the current one */
- if (type != IRQ_TYPE_NONE &&
- type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
- irq_set_irq_type(virq, type);
- return virq;
-}
-EXPORT_SYMBOL_GPL(irq_create_of_mapping);
-
-void irq_dispose_mapping(unsigned int virq)
-{
- struct irq_host *host;
- irq_hw_number_t hwirq;
-
- if (virq == NO_IRQ)
- return;
-
- host = irq_map[virq].host;
- if (WARN_ON(host == NULL))
- return;
-
- /* Never unmap legacy interrupts */
- if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
- return;
-
- irq_set_status_flags(virq, IRQ_NOREQUEST);
-
- /* remove chip and handler */
- irq_set_chip_and_handler(virq, NULL, NULL);
-
- /* Make sure it's completed */
- synchronize_irq(virq);
-
- /* Tell the PIC about it */
- if (host->ops->unmap)
- host->ops->unmap(host, virq);
- smp_mb();
-
- /* Clear reverse map */
- hwirq = irq_map[virq].hwirq;
- switch(host->revmap_type) {
- case IRQ_HOST_MAP_LINEAR:
- if (hwirq < host->revmap_data.linear.size)
- host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
- break;
- case IRQ_HOST_MAP_TREE:
- mutex_lock(&revmap_trees_mutex);
- radix_tree_delete(&host->revmap_data.tree, hwirq);
- mutex_unlock(&revmap_trees_mutex);
- break;
- }
-
- /* Destroy map */
- smp_mb();
- irq_map[virq].hwirq = host->inval_irq;
-
- irq_free_descs(virq, 1);
- /* Free it */
- irq_free_virt(virq, 1);
-}
-EXPORT_SYMBOL_GPL(irq_dispose_mapping);
-
-unsigned int irq_find_mapping(struct irq_host *host,
- irq_hw_number_t hwirq)
-{
- unsigned int i;
- unsigned int hint = hwirq % irq_virq_count;
-
- /* Look for default host if nececssary */
- if (host == NULL)
- host = irq_default_host;
- if (host == NULL)
- return NO_IRQ;
-
- /* legacy -> bail early */
- if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
- return hwirq;
-
- /* Slow path does a linear search of the map */
- if (hint < NUM_ISA_INTERRUPTS)
- hint = NUM_ISA_INTERRUPTS;
- i = hint;
- do {
- if (irq_map[i].host == host &&
- irq_map[i].hwirq == hwirq)
- return i;
- i++;
- if (i >= irq_virq_count)
- i = NUM_ISA_INTERRUPTS;
- } while(i != hint);
- return NO_IRQ;
-}
-EXPORT_SYMBOL_GPL(irq_find_mapping);
-
#ifdef CONFIG_SMP
int irq_choose_cpu(const struct cpumask *mask)
{
@@ -925,232 +609,11 @@ int irq_choose_cpu(const struct cpumask *mask)
}
#endif
-unsigned int irq_radix_revmap_lookup(struct irq_host *host,
- irq_hw_number_t hwirq)
-{
- struct irq_map_entry *ptr;
- unsigned int virq;
-
- if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
- return irq_find_mapping(host, hwirq);
-
- /*
- * The ptr returned references the static global irq_map.
- * but freeing an irq can delete nodes along the path to
- * do the lookup via call_rcu.
- */
- rcu_read_lock();
- ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
- rcu_read_unlock();
-
- /*
- * If found in radix tree, then fine.
- * Else fallback to linear lookup - this should not happen in practice
- * as it means that we failed to insert the node in the radix tree.
- */
- if (ptr)
- virq = ptr - irq_map;
- else
- virq = irq_find_mapping(host, hwirq);
-
- return virq;
-}
-
-void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
- irq_hw_number_t hwirq)
-{
- if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
- return;
-
- if (virq != NO_IRQ) {
- mutex_lock(&revmap_trees_mutex);
- radix_tree_insert(&host->revmap_data.tree, hwirq,
- &irq_map[virq]);
- mutex_unlock(&revmap_trees_mutex);
- }
-}
-
-unsigned int irq_linear_revmap(struct irq_host *host,
- irq_hw_number_t hwirq)
-{
- unsigned int *revmap;
-
- if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
- return irq_find_mapping(host, hwirq);
-
- /* Check revmap bounds */
- if (unlikely(hwirq >= host->revmap_data.linear.size))
- return irq_find_mapping(host, hwirq);
-
- /* Check if revmap was allocated */
- revmap = host->revmap_data.linear.revmap;
- if (unlikely(revmap == NULL))
- return irq_find_mapping(host, hwirq);
-
- /* Fill up revmap with slow path if no mapping found */
- if (unlikely(revmap[hwirq] == NO_IRQ))
- revmap[hwirq] = irq_find_mapping(host, hwirq);
-
- return revmap[hwirq];
-}
-
-unsigned int irq_alloc_virt(struct irq_host *host,
- unsigned int count,
- unsigned int hint)
-{
- unsigned long flags;
- unsigned int i, j, found = NO_IRQ;
-
- if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
- return NO_IRQ;
-
- raw_spin_lock_irqsave(&irq_big_lock, flags);
-
- /* Use hint for 1 interrupt if any */
- if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
- hint < irq_virq_count && irq_map[hint].host == NULL) {
- found = hint;
- goto hint_found;
- }
-
- /* Look for count consecutive numbers in the allocatable
- * (non-legacy) space
- */
- for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
- if (irq_map[i].host != NULL)
- j = 0;
- else
- j++;
-
- if (j == count) {
- found = i - count + 1;
- break;
- }
- }
- if (found == NO_IRQ) {
- raw_spin_unlock_irqrestore(&irq_big_lock, flags);
- return NO_IRQ;
- }
- hint_found:
- for (i = found; i < (found + count); i++) {
- irq_map[i].hwirq = host->inval_irq;
- smp_wmb();
- irq_map[i].host = host;
- }
- raw_spin_unlock_irqrestore(&irq_big_lock, flags);
- return found;
-}
-
-void irq_free_virt(unsigned int virq, unsigned int count)
-{
- unsigned long flags;
- unsigned int i;
-
- WARN_ON (virq < NUM_ISA_INTERRUPTS);
- WARN_ON (count == 0 || (virq + count) > irq_virq_count);
-
- if (virq < NUM_ISA_INTERRUPTS) {
- if (virq + count < NUM_ISA_INTERRUPTS)
- return;
- count =- NUM_ISA_INTERRUPTS - virq;
- virq = NUM_ISA_INTERRUPTS;
- }
-
- if (count > irq_virq_count || virq > irq_virq_count - count) {
- if (virq > irq_virq_count)
- return;
- count = irq_virq_count - virq;
- }
-
- raw_spin_lock_irqsave(&irq_big_lock, flags);
- for (i = virq; i < (virq + count); i++) {
- struct irq_host *host;
-
- host = irq_map[i].host;
- irq_map[i].hwirq = host->inval_irq;
- smp_wmb();
- irq_map[i].host = NULL;
- }
- raw_spin_unlock_irqrestore(&irq_big_lock, flags);
-}
-
int arch_early_irq_init(void)
{
return 0;
}
-#ifdef CONFIG_VIRQ_DEBUG
-static int virq_debug_show(struct seq_file *m, void *private)
-{
- unsigned long flags;
- struct irq_desc *desc;
- const char *p;
- static const char none[] = "none";
- void *data;
- int i;
-
- seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
- "chip name", "chip data", "host name");
-
- for (i = 1; i < nr_irqs; i++) {
- desc = irq_to_desc(i);
- if (!desc)
- continue;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
-
- if (desc->action && desc->action->handler) {
- struct irq_chip *chip;
-
- seq_printf(m, "%5d ", i);
- seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
-
- chip = irq_desc_get_chip(desc);
- if (chip && chip->name)
- p = chip->name;
- else
- p = none;
- seq_printf(m, "%-15s ", p);
-
- data = irq_desc_get_chip_data(desc);
- seq_printf(m, "0x%16p ", data);
-
- if (irq_map[i].host && irq_map[i].host->of_node)
- p = irq_map[i].host->of_node->full_name;
- else
- p = none;
- seq_printf(m, "%s\n", p);
- }
-
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-
- return 0;
-}
-
-static int virq_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, virq_debug_show, inode->i_private);
-}
-
-static const struct file_operations virq_debug_fops = {
- .open = virq_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init irq_debugfs_init(void)
-{
- if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
- NULL, &virq_debug_fops) == NULL)
- return -ENOMEM;
-
- return 0;
-}
-__initcall(irq_debugfs_init);
-#endif /* CONFIG_VIRQ_DEBUG */
-
#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 479752901ec6..d45ec58703ce 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -29,7 +29,6 @@
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
-#include <asm/firmware.h>
unsigned long isa_io_base; /* NULL if no ISA bus */
EXPORT_SYMBOL(isa_io_base);
@@ -261,8 +260,6 @@ static struct notifier_block isa_bridge_notifier = {
*/
static int __init isa_bridge_init(void)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
return 0;
}
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 2985338d0e10..62bdf2389669 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors:
* Alexander Graf <agraf@suse.de>
@@ -29,6 +30,7 @@
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
#define KVM_MAGIC_PAGE (-4096L)
#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
@@ -41,34 +43,30 @@
#define KVM_INST_B 0x48000000
#define KVM_INST_B_MASK 0x03ffffff
#define KVM_INST_B_MAX 0x01ffffff
+#define KVM_INST_LI 0x38000000
#define KVM_MASK_RT 0x03e00000
#define KVM_RT_30 0x03c00000
#define KVM_MASK_RB 0x0000f800
#define KVM_INST_MFMSR 0x7c0000a6
-#define KVM_INST_MFSPR_SPRG0 0x7c1042a6
-#define KVM_INST_MFSPR_SPRG1 0x7c1142a6
-#define KVM_INST_MFSPR_SPRG2 0x7c1242a6
-#define KVM_INST_MFSPR_SPRG3 0x7c1342a6
-#define KVM_INST_MFSPR_SRR0 0x7c1a02a6
-#define KVM_INST_MFSPR_SRR1 0x7c1b02a6
-#define KVM_INST_MFSPR_DAR 0x7c1302a6
-#define KVM_INST_MFSPR_DSISR 0x7c1202a6
-
-#define KVM_INST_MTSPR_SPRG0 0x7c1043a6
-#define KVM_INST_MTSPR_SPRG1 0x7c1143a6
-#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
-#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
-#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
-#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
-#define KVM_INST_MTSPR_DAR 0x7c1303a6
-#define KVM_INST_MTSPR_DSISR 0x7c1203a6
+
+#define SPR_FROM 0
+#define SPR_TO 0x100
+
+#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
+ (((sprn) & 0x1f) << 16) | \
+ (((sprn) & 0x3e0) << 6) | \
+ (moveto))
+
+#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
+#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
#define KVM_INST_TLBSYNC 0x7c00046c
#define KVM_INST_MTMSRD_L0 0x7c000164
#define KVM_INST_MTMSRD_L1 0x7c010164
#define KVM_INST_MTMSR 0x7c000124
+#define KVM_INST_WRTEE 0x7c000106
#define KVM_INST_WRTEEI_0 0x7c000146
#define KVM_INST_WRTEEI_1 0x7c008146
@@ -270,26 +268,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
#ifdef CONFIG_BOOKE
-extern u32 kvm_emulate_wrteei_branch_offs;
-extern u32 kvm_emulate_wrteei_ee_offs;
-extern u32 kvm_emulate_wrteei_len;
-extern u32 kvm_emulate_wrteei[];
+extern u32 kvm_emulate_wrtee_branch_offs;
+extern u32 kvm_emulate_wrtee_reg_offs;
+extern u32 kvm_emulate_wrtee_orig_ins_offs;
+extern u32 kvm_emulate_wrtee_len;
+extern u32 kvm_emulate_wrtee[];
-static void kvm_patch_ins_wrteei(u32 *inst)
+static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
{
u32 *p;
int distance_start;
int distance_end;
ulong next_inst;
- p = kvm_alloc(kvm_emulate_wrteei_len * 4);
+ p = kvm_alloc(kvm_emulate_wrtee_len * 4);
if (!p)
return;
/* Find out where we are and put everything there */
distance_start = (ulong)p - (ulong)inst;
next_inst = ((ulong)inst + 4);
- distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
+ distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
/* Make sure we only write valid b instructions */
if (distance_start > KVM_INST_B_MAX) {
@@ -298,10 +297,65 @@ static void kvm_patch_ins_wrteei(u32 *inst)
}
/* Modify the chunk to fit the invocation */
- memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
- p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
- p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
- flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
+ memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
+ p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
+
+ if (imm_one) {
+ p[kvm_emulate_wrtee_reg_offs] =
+ KVM_INST_LI | __PPC_RT(30) | MSR_EE;
+ } else {
+ /* Make clobbered registers work too */
+ switch (get_rt(rt)) {
+ case 30:
+ kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
+ magic_var(scratch2), KVM_RT_30);
+ break;
+ case 31:
+ kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
+ magic_var(scratch1), KVM_RT_30);
+ break;
+ default:
+ p[kvm_emulate_wrtee_reg_offs] |= rt;
+ break;
+ }
+ }
+
+ p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
+ flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
+
+ /* Patch the invocation */
+ kvm_patch_ins_b(inst, distance_start);
+}
+
+extern u32 kvm_emulate_wrteei_0_branch_offs;
+extern u32 kvm_emulate_wrteei_0_len;
+extern u32 kvm_emulate_wrteei_0[];
+
+static void kvm_patch_ins_wrteei_0(u32 *inst)
+{
+ u32 *p;
+ int distance_start;
+ int distance_end;
+ ulong next_inst;
+
+ p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
+ if (!p)
+ return;
+
+ /* Find out where we are and put everything there */
+ distance_start = (ulong)p - (ulong)inst;
+ next_inst = ((ulong)inst + 4);
+ distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
+
+ /* Make sure we only write valid b instructions */
+ if (distance_start > KVM_INST_B_MAX) {
+ kvm_patching_worked = false;
+ return;
+ }
+
+ memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
+ p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
+ flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
/* Patch the invocation */
kvm_patch_ins_b(inst, distance_start);
@@ -380,56 +434,191 @@ static void kvm_check_ins(u32 *inst, u32 features)
case KVM_INST_MFMSR:
kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
break;
- case KVM_INST_MFSPR_SPRG0:
+ case KVM_INST_MFSPR(SPRN_SPRG0):
kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
break;
- case KVM_INST_MFSPR_SPRG1:
+ case KVM_INST_MFSPR(SPRN_SPRG1):
kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
break;
- case KVM_INST_MFSPR_SPRG2:
+ case KVM_INST_MFSPR(SPRN_SPRG2):
kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
break;
- case KVM_INST_MFSPR_SPRG3:
+ case KVM_INST_MFSPR(SPRN_SPRG3):
kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
break;
- case KVM_INST_MFSPR_SRR0:
+ case KVM_INST_MFSPR(SPRN_SRR0):
kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
break;
- case KVM_INST_MFSPR_SRR1:
+ case KVM_INST_MFSPR(SPRN_SRR1):
kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
break;
- case KVM_INST_MFSPR_DAR:
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MFSPR(SPRN_DEAR):
+#else
+ case KVM_INST_MFSPR(SPRN_DAR):
+#endif
kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
break;
- case KVM_INST_MFSPR_DSISR:
+ case KVM_INST_MFSPR(SPRN_DSISR):
kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
break;
+#ifdef CONFIG_PPC_BOOK3E_MMU
+ case KVM_INST_MFSPR(SPRN_MAS0):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_MAS1):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_MAS2):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_MAS3):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_MAS4):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_MAS6):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_MAS7):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
+ break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+ case KVM_INST_MFSPR(SPRN_SPRG4):
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MFSPR(SPRN_SPRG4R):
+#endif
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_SPRG5):
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MFSPR(SPRN_SPRG5R):
+#endif
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_SPRG6):
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MFSPR(SPRN_SPRG6R):
+#endif
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
+ break;
+ case KVM_INST_MFSPR(SPRN_SPRG7):
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MFSPR(SPRN_SPRG7R):
+#endif
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
+ break;
+
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MFSPR(SPRN_ESR):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
+ break;
+#endif
+
+ case KVM_INST_MFSPR(SPRN_PIR):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
+ break;
+
+
/* Stores */
- case KVM_INST_MTSPR_SPRG0:
+ case KVM_INST_MTSPR(SPRN_SPRG0):
kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
break;
- case KVM_INST_MTSPR_SPRG1:
+ case KVM_INST_MTSPR(SPRN_SPRG1):
kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
break;
- case KVM_INST_MTSPR_SPRG2:
+ case KVM_INST_MTSPR(SPRN_SPRG2):
kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
break;
- case KVM_INST_MTSPR_SPRG3:
+ case KVM_INST_MTSPR(SPRN_SPRG3):
kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
break;
- case KVM_INST_MTSPR_SRR0:
+ case KVM_INST_MTSPR(SPRN_SRR0):
kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
break;
- case KVM_INST_MTSPR_SRR1:
+ case KVM_INST_MTSPR(SPRN_SRR1):
kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
break;
- case KVM_INST_MTSPR_DAR:
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MTSPR(SPRN_DEAR):
+#else
+ case KVM_INST_MTSPR(SPRN_DAR):
+#endif
kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
break;
- case KVM_INST_MTSPR_DSISR:
+ case KVM_INST_MTSPR(SPRN_DSISR):
kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
break;
+#ifdef CONFIG_PPC_BOOK3E_MMU
+ case KVM_INST_MTSPR(SPRN_MAS0):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_MAS1):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_MAS2):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_MAS3):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_MAS4):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_MAS6):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_MAS7):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
+ break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+ case KVM_INST_MTSPR(SPRN_SPRG4):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_SPRG5):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_SPRG6):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
+ break;
+ case KVM_INST_MTSPR(SPRN_SPRG7):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
+ break;
+
+#ifdef CONFIG_BOOKE
+ case KVM_INST_MTSPR(SPRN_ESR):
+ if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+ kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
+ break;
+#endif
/* Nops */
case KVM_INST_TLBSYNC:
@@ -444,6 +633,11 @@ static void kvm_check_ins(u32 *inst, u32 features)
case KVM_INST_MTMSRD_L0:
kvm_patch_ins_mtmsr(inst, inst_rt);
break;
+#ifdef CONFIG_BOOKE
+ case KVM_INST_WRTEE:
+ kvm_patch_ins_wrtee(inst, inst_rt, 0);
+ break;
+#endif
}
switch (inst_no_rt & ~KVM_MASK_RB) {
@@ -461,13 +655,19 @@ static void kvm_check_ins(u32 *inst, u32 features)
switch (_inst) {
#ifdef CONFIG_BOOKE
case KVM_INST_WRTEEI_0:
+ kvm_patch_ins_wrteei_0(inst);
+ break;
+
case KVM_INST_WRTEEI_1:
- kvm_patch_ins_wrteei(inst);
+ kvm_patch_ins_wrtee(inst, 0, 1);
break;
#endif
}
}
+extern u32 kvm_template_start[];
+extern u32 kvm_template_end[];
+
static void kvm_use_magic_page(void)
{
u32 *p;
@@ -488,8 +688,23 @@ static void kvm_use_magic_page(void)
start = (void*)_stext;
end = (void*)_etext;
- for (p = start; p < end; p++)
+ /*
+ * Being interrupted in the middle of patching would
+ * be bad for SPRG4-7, which KVM can't keep in sync
+ * with emulated accesses because reads don't trap.
+ */
+ local_irq_disable();
+
+ for (p = start; p < end; p++) {
+ /* Avoid patching the template code */
+ if (p >= kvm_template_start && p < kvm_template_end) {
+ p = kvm_template_end - 1;
+ continue;
+ }
kvm_check_ins(p, features);
+ }
+
+ local_irq_enable();
printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
kvm_patching_worked ? "worked" : "failed");
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index f2b1b2523e61..e291cf3cf954 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright SUSE Linux Products GmbH 2010
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors: Alexander Graf <agraf@suse.de>
*/
@@ -65,6 +66,9 @@ kvm_hypercall_start:
shared->critical == r1 and r2 is always != r1 */ \
STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
+.global kvm_template_start
+kvm_template_start:
+
.global kvm_emulate_mtmsrd
kvm_emulate_mtmsrd:
@@ -167,6 +171,9 @@ maybe_stay_in_guest:
kvm_emulate_mtmsr_reg2:
ori r30, r0, 0
+ /* Put MSR into magic page because we don't call mtmsr */
+ STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
+
/* Check if we have to fetch an interrupt */
lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
cmpwi r31, 0
@@ -174,15 +181,10 @@ kvm_emulate_mtmsr_reg2:
/* Check if we may trigger an interrupt */
andi. r31, r30, MSR_EE
- beq no_mtmsr
-
- b do_mtmsr
+ bne do_mtmsr
no_mtmsr:
- /* Put MSR into magic page because we don't call mtmsr */
- STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
-
SCRATCH_RESTORE
/* Go back to caller */
@@ -210,24 +212,80 @@ kvm_emulate_mtmsr_orig_ins_offs:
kvm_emulate_mtmsr_len:
.long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
+/* also used for wrteei 1 */
+.global kvm_emulate_wrtee
+kvm_emulate_wrtee:
+
+ SCRATCH_SAVE
+
+ /* Fetch old MSR in r31 */
+ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
+
+ /* Insert new MSR[EE] */
+kvm_emulate_wrtee_reg:
+ ori r30, r0, 0
+ rlwimi r31, r30, 0, MSR_EE
+
+ /*
+ * If MSR[EE] is now set, check for a pending interrupt.
+ * We could skip this if MSR[EE] was already on, but that
+ * should be rare, so don't bother.
+ */
+ andi. r30, r30, MSR_EE
+
+ /* Put MSR into magic page because we don't call wrtee */
+ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
+
+ beq no_wrtee
+
+ /* Check if we have to fetch an interrupt */
+ lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
+ cmpwi r30, 0
+ bne do_wrtee
+
+no_wrtee:
+ SCRATCH_RESTORE
+
+ /* Go back to caller */
+kvm_emulate_wrtee_branch:
+ b .
+
+do_wrtee:
+ SCRATCH_RESTORE
+ /* Just fire off the wrtee if it's critical */
+kvm_emulate_wrtee_orig_ins:
+ wrtee r0
-.global kvm_emulate_wrteei
-kvm_emulate_wrteei:
+ b kvm_emulate_wrtee_branch
+kvm_emulate_wrtee_end:
+
+.global kvm_emulate_wrtee_branch_offs
+kvm_emulate_wrtee_branch_offs:
+ .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrtee_reg_offs
+kvm_emulate_wrtee_reg_offs:
+ .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrtee_orig_ins_offs
+kvm_emulate_wrtee_orig_ins_offs:
+ .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrtee_len
+kvm_emulate_wrtee_len:
+ .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
+
+.global kvm_emulate_wrteei_0
+kvm_emulate_wrteei_0:
SCRATCH_SAVE
/* Fetch old MSR in r31 */
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
/* Remove MSR_EE from old MSR */
- li r30, 0
- ori r30, r30, MSR_EE
- andc r31, r31, r30
-
- /* OR new MSR_EE onto the old MSR */
-kvm_emulate_wrteei_ee:
- ori r31, r31, 0
+ rlwinm r31, r31, 0, ~MSR_EE
/* Write new MSR value back */
STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
@@ -235,22 +293,17 @@ kvm_emulate_wrteei_ee:
SCRATCH_RESTORE
/* Go back to caller */
-kvm_emulate_wrteei_branch:
+kvm_emulate_wrteei_0_branch:
b .
-kvm_emulate_wrteei_end:
-
-.global kvm_emulate_wrteei_branch_offs
-kvm_emulate_wrteei_branch_offs:
- .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
+kvm_emulate_wrteei_0_end:
-.global kvm_emulate_wrteei_ee_offs
-kvm_emulate_wrteei_ee_offs:
- .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
-
-.global kvm_emulate_wrteei_len
-kvm_emulate_wrteei_len:
- .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
+.global kvm_emulate_wrteei_0_branch_offs
+kvm_emulate_wrteei_0_branch_offs:
+ .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
+.global kvm_emulate_wrteei_0_len
+kvm_emulate_wrteei_0_len:
+ .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
.global kvm_emulate_mtsrin
kvm_emulate_mtsrin:
@@ -300,3 +353,6 @@ kvm_emulate_mtsrin_orig_ins_offs:
.global kvm_emulate_mtsrin_len
kvm_emulate_mtsrin_len:
.long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
+
+.global kvm_template_end
+kvm_template_end:
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 3fea3689527e..bedd12e1cfbc 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -442,8 +442,10 @@ static void __init fixup_port_irq(int index,
port->irq = virq;
+#ifdef CONFIG_SERIAL_8250_FSL
if (of_device_is_compatible(np, "fsl,ns16550"))
port->handle_irq = fsl8250_handle_irq;
+#endif
}
static void __init fixup_port_pio(int index,
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 578f35f18723..ac12bd80ad95 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -26,7 +26,6 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/iseries/hv_lp_config.h>
#include <asm/lppaca.h>
#include <asm/hvcall.h>
#include <asm/firmware.h>
@@ -55,80 +54,14 @@ static unsigned long get_purr(void)
int cpu;
for_each_possible_cpu(cpu) {
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- sum_purr += lppaca_of(cpu).emulated_time_base;
- else {
- struct cpu_usage *cu;
+ struct cpu_usage *cu;
- cu = &per_cpu(cpu_usage_array, cpu);
- sum_purr += cu->current_tb;
- }
+ cu = &per_cpu(cpu_usage_array, cpu);
+ sum_purr += cu->current_tb;
}
return sum_purr;
}
-#ifdef CONFIG_PPC_ISERIES
-
-/*
- * Methods used to fetch LPAR data when running on an iSeries platform.
- */
-static int iseries_lparcfg_data(struct seq_file *m, void *v)
-{
- unsigned long pool_id;
- int shared, entitled_capacity, max_entitled_capacity;
- int processors, max_processors;
- unsigned long purr = get_purr();
-
- shared = (int)(local_paca->lppaca_ptr->shared_proc);
-
- seq_printf(m, "system_active_processors=%d\n",
- (int)HvLpConfig_getSystemPhysicalProcessors());
-
- seq_printf(m, "system_potential_processors=%d\n",
- (int)HvLpConfig_getSystemPhysicalProcessors());
-
- processors = (int)HvLpConfig_getPhysicalProcessors();
- seq_printf(m, "partition_active_processors=%d\n", processors);
-
- max_processors = (int)HvLpConfig_getMaxPhysicalProcessors();
- seq_printf(m, "partition_potential_processors=%d\n", max_processors);
-
- if (shared) {
- entitled_capacity = HvLpConfig_getSharedProcUnits();
- max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits();
- } else {
- entitled_capacity = processors * 100;
- max_entitled_capacity = max_processors * 100;
- }
- seq_printf(m, "partition_entitled_capacity=%d\n", entitled_capacity);
-
- seq_printf(m, "partition_max_entitled_capacity=%d\n",
- max_entitled_capacity);
-
- if (shared) {
- pool_id = HvLpConfig_getSharedPoolIndex();
- seq_printf(m, "pool=%d\n", (int)pool_id);
- seq_printf(m, "pool_capacity=%d\n",
- (int)(HvLpConfig_getNumProcsInSharedPool(pool_id) *
- 100));
- seq_printf(m, "purr=%ld\n", purr);
- }
-
- seq_printf(m, "shared_processor_mode=%d\n", shared);
-
- return 0;
-}
-
-#else /* CONFIG_PPC_ISERIES */
-
-static int iseries_lparcfg_data(struct seq_file *m, void *v)
-{
- return 0;
-}
-
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_PSERIES
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
@@ -648,8 +581,7 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
u8 new_weight, *new_weight_ptr = &new_weight;
ssize_t retval;
- if (!firmware_has_feature(FW_FEATURE_SPLPAR) ||
- firmware_has_feature(FW_FEATURE_ISERIES))
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return -EINVAL;
if (count > kbuf_sz)
@@ -709,21 +641,6 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
return retval;
}
-#else /* CONFIG_PPC_PSERIES */
-
-static int pseries_lparcfg_data(struct seq_file *m, void *v)
-{
- return 0;
-}
-
-static ssize_t lparcfg_write(struct file *file, const char __user * buf,
- size_t count, loff_t * off)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_PPC_PSERIES */
-
static int lparcfg_data(struct seq_file *m, void *v)
{
struct device_node *rootdn;
@@ -738,19 +655,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
rootdn = of_find_node_by_path("/");
if (rootdn) {
tmp = of_get_property(rootdn, "model", NULL);
- if (tmp) {
+ if (tmp)
model = tmp;
- /* Skip "IBM," - see platforms/iseries/dt.c */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- model += 4;
- }
tmp = of_get_property(rootdn, "system-id", NULL);
- if (tmp) {
+ if (tmp)
system_id = tmp;
- /* Skip "IBM," - see platforms/iseries/dt.c */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- system_id += 4;
- }
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
NULL);
if (lp_index_ptr)
@@ -761,8 +670,6 @@ static int lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "system_type=%s\n", model);
seq_printf(m, "partition_id=%d\n", (int)lp_index);
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return iseries_lparcfg_data(m, v);
return pseries_lparcfg_data(m, v);
}
@@ -786,8 +693,7 @@ static int __init lparcfg_init(void)
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
/* Allow writing if we have FW_FEATURE_SPLPAR */
- if (firmware_has_feature(FW_FEATURE_SPLPAR) &&
- !firmware_has_feature(FW_FEATURE_ISERIES))
+ if (firmware_has_feature(FW_FEATURE_SPLPAR))
mode |= S_IWUSR;
ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index b69463ec2010..ba16874fe294 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -5,7 +5,6 @@
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
- * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* setjmp/longjmp code by Paul Mackerras.
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index e1612dfb4a93..2049f2d00ffe 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -21,12 +21,13 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/atomic.h>
#include <asm/errno.h>
#include <asm/topology.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
-#include <linux/atomic.h>
+#include <asm/eeh.h>
#ifdef CONFIG_PPC_OF_PLATFORM_PCI
@@ -66,6 +67,9 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
+ /* Create EEH devices for the PHB */
+ eeh_dev_phb_init_dynamic(phb);
+
/* Register devices with EEH */
#ifdef CONFIG_EEH
if (dev->dev.of_node->child)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 41456ff55e14..0bb1f98613ba 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -11,13 +11,10 @@
#include <linux/export.h>
#include <linux/memblock.h>
-#include <asm/firmware.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
-#include <asm/iseries/lpar_map.h>
-#include <asm/iseries/hv_types.h>
#include <asm/kexec.h>
/* This symbol is provided by the linker - let it fill in the paca
@@ -30,8 +27,8 @@ extern unsigned long __toc_start;
* The structure which the hypervisor knows about - this structure
* should not cross a page boundary. The vpa_init/register_vpa call
* is now known to fail if the lppaca structure crosses a page
- * boundary. The lppaca is also used on legacy iSeries and POWER5
- * pSeries boxes. The lppaca is 640 bytes long, and cannot readily
+ * boundary. The lppaca is also used on POWER5 pSeries boxes.
+ * The lppaca is 640 bytes long, and cannot readily
* change since the hypervisor knows its layout, so a 1kB alignment
* will suffice to ensure that it doesn't cross a page boundary.
*/
@@ -183,12 +180,9 @@ void __init allocate_pacas(void)
/*
* We can't take SLB misses on the paca, and we want to access them
* in real mode, so allocate them within the RMA and also within
- * the first segment. On iSeries they must be within the area mapped
- * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
+ * the first segment.
*/
limit = min(0x10000000ULL, ppc64_rma_size);
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- limit = min(limit, HvPagesToMap * HVPAGESIZE);
paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index cce98d76e905..8e78e93c8185 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -38,7 +38,6 @@
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
-#include <asm/firmware.h>
#include <asm/eeh.h>
static DEFINE_SPINLOCK(hose_spinlock);
@@ -50,9 +49,6 @@ static int global_phb_number; /* Global phb counter */
/* ISA Memory physical address */
resource_size_t isa_mem_base;
-/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
-unsigned int pci_flags = 0;
-
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
@@ -219,20 +215,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
struct of_irq oirq;
unsigned int virq;
- /* The current device-tree that iSeries generates from the HV
- * PCI informations doesn't contain proper interrupt routing,
- * and all the fallback would do is print out crap, so we
- * don't attempt to resolve the interrupts here at all, some
- * iSeries specific fixup does it.
- *
- * In the long run, we will hopefully fix the generated device-tree
- * instead.
- */
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return -1;
-#endif
-
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG
@@ -849,60 +831,6 @@ int pci_proc_domain(struct pci_bus *bus)
return 1;
}
-void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res)
-{
- resource_size_t offset = 0, mask = (resource_size_t)-1;
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- if (!hose)
- return;
- if (res->flags & IORESOURCE_IO) {
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
- mask = 0xffffffffu;
- } else if (res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-
- region->start = (res->start - offset) & mask;
- region->end = (res->end - offset) & mask;
-}
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
-{
- resource_size_t offset = 0, mask = (resource_size_t)-1;
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- if (!hose)
- return;
- if (res->flags & IORESOURCE_IO) {
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
- mask = 0xffffffffu;
- } else if (res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
- res->start = (region->start + offset) & mask;
- res->end = (region->end + offset) & mask;
-}
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-
-/* Fixup a bus resource into a linux resource */
-static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- resource_size_t offset = 0, mask = (resource_size_t)-1;
-
- if (res->flags & IORESOURCE_IO) {
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
- mask = 0xffffffffu;
- } else if (res->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-
- res->start = (res->start + offset) & mask;
- res->end = (res->end + offset) & mask;
-}
-
-
/* This header fixup will do the resource fixup for all devices as they are
* probed, but not for bridge ranges
*/
@@ -942,18 +870,11 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
continue;
}
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
+ pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
pci_name(dev), i,
(unsigned long long)res->start,\
(unsigned long long)res->end,
(unsigned int)res->flags);
-
- fixup_resource(res, dev);
-
- pr_debug("PCI:%s %016llx-%016llx\n",
- pci_name(dev),
- (unsigned long long)res->start,
- (unsigned long long)res->end);
}
/* Call machine specific resource fixup */
@@ -1055,27 +976,18 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
continue;
}
- pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
+ pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
pci_name(dev), i,
(unsigned long long)res->start,\
(unsigned long long)res->end,
(unsigned int)res->flags);
- /* Perform fixup */
- fixup_resource(res, dev);
-
/* Try to detect uninitialized P2P bridge resources,
* and clear them out so they get re-assigned later
*/
if (pcibios_uninitialized_bridge_resource(bus, res)) {
res->flags = 0;
pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
- } else {
-
- pr_debug("PCI:%s %016llx-%016llx\n",
- pci_name(dev),
- (unsigned long long)res->start,
- (unsigned long long)res->end);
}
}
}
@@ -1565,6 +1477,11 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pci_enable_resources(dev, mask);
}
+resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
+{
+ return (unsigned long) hose->io_base_virt - _IO_BASE;
+}
+
static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
{
struct resource *res;
@@ -1589,7 +1506,7 @@ static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, s
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned long)res->flags);
- pci_add_resource(resources, res);
+ pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
/* Hookup PHB Memory resources */
for (i = 0; i < 3; ++i) {
@@ -1612,7 +1529,7 @@ static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, s
(unsigned long long)res->start,
(unsigned long long)res->end,
(unsigned long)res->flags);
- pci_add_resource(resources, res);
+ pci_add_resource_offset(resources, res, hose->pci_mem_offset);
}
pr_debug("PCI: PHB MEM offset = %016llx\n",
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index fdd1a3d951dc..4b06ec5a502e 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -219,9 +219,9 @@ void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose)
struct resource *res = &hose->io_resource;
/* Fixup IO space offset */
- io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
- res->start = (res->start + io_offset) & 0xffffffffu;
- res->end = (res->end + io_offset) & 0xffffffffu;
+ io_offset = pcibios_io_space_offset(hose);
+ res->start += io_offset;
+ res->end += io_offset;
}
static int __init pcibios_init(void)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3318d39b7d4c..94a54f61d341 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -33,8 +33,6 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
-unsigned long pci_probe_only = 1;
-
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
* and it *must* be the start of ISA space if an ISA bus exists because
@@ -55,9 +53,6 @@ static int __init pcibios_init(void)
*/
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
- if (pci_probe_only)
- pci_add_flags(PCI_PROBE_ONLY);
-
/* On ppc64, we always enable PCI domains and we keep domain 0
* backward compatible in /proc for video cards
*/
@@ -173,7 +168,7 @@ static int __devinit pcibios_map_phb_io_space(struct pci_controller *hose)
return -ENOMEM;
/* Fixup hose IO resource */
- io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ io_virt_offset = pcibios_io_space_offset(hose);
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index b37d0b5a796e..89dde171a6fa 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -75,6 +75,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
{
u64 base, size;
unsigned int flags;
+ struct pci_bus_region region;
struct resource *res;
const u32 *addrs;
u32 i;
@@ -106,10 +107,11 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue;
}
- res->start = base;
- res->end = base + size - 1;
res->flags = flags;
res->name = pci_name(dev);
+ region.start = base;
+ region.end = base + size - 1;
+ pcibios_bus_to_resource(dev, res, &region);
}
}
@@ -209,6 +211,7 @@ void __devinit of_scan_pci_bridge(struct pci_dev *dev)
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, mode;
+ struct pci_bus_region region;
struct resource *res;
unsigned int flags;
u64 size;
@@ -270,9 +273,10 @@ void __devinit of_scan_pci_bridge(struct pci_dev *dev)
res = bus->resource[i];
++i;
}
- res->start = of_read_number(&ranges[1], 2);
- res->end = res->start + size - 1;
res->flags = flags;
+ region.start = of_read_number(&ranges[1], 2);
+ region.end = region.start + size - 1;
+ pcibios_bus_to_resource(dev, res, &region);
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index a841a9d136a2..58eaa3ddf7b9 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -13,6 +13,7 @@
*/
#include <linux/errno.h>
+#include <linux/bug.h>
#include <linux/spinlock.h>
#include <linux/export.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ebe5766781aa..e40707032ac3 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -566,12 +566,12 @@ static void show_instructions(struct pt_regs *regs)
*/
if (!__kernel_text_address(pc) ||
__get_user(instr, (unsigned int __user *)pc)) {
- printk("XXXXXXXX ");
+ printk(KERN_CONT "XXXXXXXX ");
} else {
if (regs->nip == pc)
- printk("<%08x> ", instr);
+ printk(KERN_CONT "<%08x> ", instr);
else
- printk("%08x ", instr);
+ printk(KERN_CONT "%08x ", instr);
}
pc += sizeof(int);
@@ -647,6 +647,9 @@ void show_regs(struct pt_regs * regs)
printk("MSR: "REG" ", regs->msr);
printbits(regs->msr, msr_bits);
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
+#ifdef CONFIG_PPC64
+ printk("SOFTE: %ld\n", regs->softe);
+#endif
trap = TRAP(regs);
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
printk("CFAR: "REG"\n", regs->orig_gpr3);
@@ -1220,34 +1223,32 @@ void dump_stack(void)
EXPORT_SYMBOL(dump_stack);
#ifdef CONFIG_PPC64
-void ppc64_runlatch_on(void)
+/* Called with hard IRQs off */
+void __ppc64_runlatch_on(void)
{
+ struct thread_info *ti = current_thread_info();
unsigned long ctrl;
- if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
- HMT_medium();
-
- ctrl = mfspr(SPRN_CTRLF);
- ctrl |= CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
- set_thread_flag(TIF_RUNLATCH);
- }
+ ti->local_flags |= TLF_RUNLATCH;
}
+/* Called with hard IRQs off */
void __ppc64_runlatch_off(void)
{
+ struct thread_info *ti = current_thread_info();
unsigned long ctrl;
- HMT_medium();
-
- clear_thread_flag(TIF_RUNLATCH);
+ ti->local_flags &= ~TLF_RUNLATCH;
ctrl = mfspr(SPRN_CTRLF);
ctrl &= ~CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl);
}
-#endif
+#endif /* CONFIG_PPC64 */
#if THREAD_SHIFT < PAGE_SHIFT
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index abe405dab34d..89e850af3dd6 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -52,9 +52,9 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
-#include <asm/phyp_dump.h>
#include <asm/kexec.h>
#include <asm/opal.h>
+#include <asm/fadump.h>
#include <mm/mmu_decl.h>
@@ -615,86 +615,6 @@ static void __init early_reserve_mem(void)
}
}
-#ifdef CONFIG_PHYP_DUMP
-/**
- * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
- *
- * Function to find the largest size we need to reserve
- * during early boot process.
- *
- * It either looks for boot param and returns that OR
- * returns larger of 256 or 5% rounded down to multiples of 256MB.
- *
- */
-static inline unsigned long phyp_dump_calculate_reserve_size(void)
-{
- unsigned long tmp;
-
- if (phyp_dump_info->reserve_bootvar)
- return phyp_dump_info->reserve_bootvar;
-
- /* divide by 20 to get 5% of value */
- tmp = memblock_end_of_DRAM();
- do_div(tmp, 20);
-
- /* round it down in multiples of 256 */
- tmp = tmp & ~0x0FFFFFFFUL;
-
- return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
-}
-
-/**
- * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
- *
- * This routine may reserve memory regions in the kernel only
- * if the system is supported and a dump was taken in last
- * boot instance or if the hardware is supported and the
- * scratch area needs to be setup. In other instances it returns
- * without reserving anything. The memory in case of dump being
- * active is freed when the dump is collected (by userland tools).
- */
-static void __init phyp_dump_reserve_mem(void)
-{
- unsigned long base, size;
- unsigned long variable_reserve_size;
-
- if (!phyp_dump_info->phyp_dump_configured) {
- printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
- return;
- }
-
- if (!phyp_dump_info->phyp_dump_at_boot) {
- printk(KERN_INFO "Phyp-dump disabled at boot time\n");
- return;
- }
-
- variable_reserve_size = phyp_dump_calculate_reserve_size();
-
- if (phyp_dump_info->phyp_dump_is_active) {
- /* Reserve *everything* above RMR.Area freed by userland tools*/
- base = variable_reserve_size;
- size = memblock_end_of_DRAM() - base;
-
- /* XXX crashed_ram_end is wrong, since it may be beyond
- * the memory_limit, it will need to be adjusted. */
- memblock_reserve(base, size);
-
- phyp_dump_info->init_reserve_start = base;
- phyp_dump_info->init_reserve_size = size;
- } else {
- size = phyp_dump_info->cpu_state_size +
- phyp_dump_info->hpte_region_size +
- variable_reserve_size;
- base = memblock_end_of_DRAM() - size;
- memblock_reserve(base, size);
- phyp_dump_info->init_reserve_start = base;
- phyp_dump_info->init_reserve_size = size;
- }
-}
-#else
-static inline void __init phyp_dump_reserve_mem(void) {}
-#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
-
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
@@ -714,9 +634,9 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_opal, NULL);
#endif
-#ifdef CONFIG_PHYP_DUMP
- /* scan tree to see if dump occurred during last boot */
- of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
+#ifdef CONFIG_FA_DUMP
+ /* scan tree to see if dump is active during last boot */
+ of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
#endif
/* Pre-initialize the cmd_line with the content of boot_commmand_line,
@@ -750,9 +670,15 @@ void __init early_init_devtree(void *params)
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
reserve_kdump_trampoline();
- reserve_crashkernel();
+#ifdef CONFIG_FA_DUMP
+ /*
+ * If we fail to reserve memory for firmware-assisted dump then
+ * fallback to kexec based kdump.
+ */
+ if (fadump_reserve_mem() == 0)
+#endif
+ reserve_crashkernel();
early_reserve_mem();
- phyp_dump_reserve_mem();
/*
* Ensure that total memory size is page-aligned, because otherwise
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index eca626ea3f23..ea4e311e09d2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -48,14 +48,6 @@
#include <linux/linux_logo.h>
/*
- * Properties whose value is longer than this get excluded from our
- * copy of the device tree. This value does need to be big enough to
- * ensure that we don't lose things like the interrupt-map property
- * on a PCI-PCI bridge.
- */
-#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
-
-/*
* Eventually bump that one up
*/
#define DEVTREE_CHUNK_SIZE 0x100000
@@ -455,7 +447,7 @@ static void __init __attribute__((noreturn)) prom_panic(const char *reason)
if (RELOC(of_platform) == PLATFORM_POWERMAC)
asm("trap\n");
- /* ToDo: should put up an SRC here on p/iSeries */
+ /* ToDo: should put up an SRC here on pSeries */
call_prom("exit", 0, 0);
for (;;) /* should never get here */
@@ -2273,13 +2265,6 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
/* sanity checks */
if (l == PROM_ERROR)
continue;
- if (l > MAX_PROPERTY_LENGTH) {
- prom_printf("WARNING: ignoring large property ");
- /* It seems OF doesn't null-terminate the path :-( */
- prom_printf("[%s] ", path);
- prom_printf("%s length 0x%x\n", RELOC(pname), l);
- continue;
- }
/* push property head */
dt_push_token(OF_DT_PROP, mem_start, mem_end);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 517b1d8f455b..4d1a7babe342 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -716,7 +716,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
int cpu;
slb_set_size(SLB_MIN_SIZE);
- stop_topology_update();
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -732,7 +731,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
rc = atomic_read(&data->error);
atomic_set(&data->error, rc);
- start_topology_update();
pSeries_coalesce_init();
if (wake_when_done) {
@@ -846,6 +844,7 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
atomic_set(&data.error, 0);
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+ stop_topology_update();
/* Call function on all CPUs. One of us will make the
* rtas call
@@ -858,6 +857,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
if (atomic_read(&data.error) != 0)
printk(KERN_ERR "Error doing global join\n");
+ start_topology_update();
+
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
@@ -867,6 +868,40 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
}
#endif
+/**
+ * Find a specific pseries error log in an RTAS extended event log.
+ * @log: RTAS error/event log
+ * @section_id: two character section identifier
+ *
+ * Returns a pointer to the specified errorlog or NULL if not found.
+ */
+struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+ uint16_t section_id)
+{
+ struct rtas_ext_event_log_v6 *ext_log =
+ (struct rtas_ext_event_log_v6 *)log->buffer;
+ struct pseries_errorlog *sect;
+ unsigned char *p, *log_end;
+
+ /* Check that we understand the format */
+ if (log->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
+ ext_log->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
+ ext_log->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
+ return NULL;
+
+ log_end = log->buffer + log->extended_log_length;
+ p = ext_log->vendor_log;
+
+ while (p < log_end) {
+ sect = (struct pseries_errorlog *)p;
+ if (sect->id == section_id)
+ return sect;
+ p += sect->length;
+ }
+
+ return NULL;
+}
+
asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
{
struct rtas_args args;
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 6cd8f0196b6d..179af906dcda 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -275,8 +275,11 @@ void __init find_and_init_phbs(void)
of_node_put(root);
pci_devs_phb_init();
+ /* Create EEH devices for all PHBs */
+ eeh_dev_phb_init();
+
/*
- * pci_probe_only and pci_assign_all_buses can be set via properties
+ * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
* in chosen.
*/
if (of_chosen) {
@@ -284,8 +287,12 @@ void __init find_and_init_phbs(void)
prop = of_get_property(of_chosen,
"linux,pci-probe-only", NULL);
- if (prop)
- pci_probe_only = *prop;
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
#ifdef CONFIG_PPC32 /* Will be made generic soon */
prop = of_get_property(of_chosen,
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 77bb77da05c1..b0ebdeab9494 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -61,6 +61,7 @@
#include <asm/xmon.h>
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
+#include <asm/fadump.h>
#include "setup.h"
@@ -109,6 +110,14 @@ EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
/* also used by kexec */
void machine_shutdown(void)
{
+#ifdef CONFIG_FA_DUMP
+ /*
+ * if fadump is active, cleanup the fadump registration before we
+ * shutdown.
+ */
+ fadump_cleanup();
+#endif
+
if (ppc_md.machine_shutdown)
ppc_md.machine_shutdown();
}
@@ -639,6 +648,11 @@ EXPORT_SYMBOL(check_legacy_ioport);
static int ppc_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ /*
+ * If firmware-assisted dump has been registered then trigger
+ * firmware-assisted dump and let firmware handle everything else.
+ */
+ crash_fadump(NULL, ptr);
ppc_md.panic(ptr); /* May not return */
return NOTIFY_DONE;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4cb8f1e9d044..4721b0c8d7b7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -598,7 +598,7 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
- kvm_rma_init();
+ kvm_linear_init();
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 2300426e531a..7006b7f4267a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
#include <linux/tracehook.h>
#include <linux/signal.h>
+#include <linux/key.h>
#include <asm/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -56,10 +57,7 @@ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
void restore_sigmask(sigset_t *set)
{
sigdelsetmask(set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = *set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(set);
}
static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
@@ -113,8 +111,9 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
}
}
-static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
+static int do_signal(struct pt_regs *regs)
{
+ sigset_t *oldset;
siginfo_t info;
int signr;
struct k_sigaction ka;
@@ -123,7 +122,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
oldset = &current->saved_sigmask;
- else if (!oldset)
+ else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -167,13 +166,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
regs->trap = 0;
if (ret) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked,
- &ka.sa.sa_mask);
- if (!(ka.sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ block_sigmask(&ka, signr);
/*
* A signal was successfully delivered; the saved sigmask is in
@@ -191,14 +184,16 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
return ret;
}
-void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
+void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal_pending(NULL, regs);
+ do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 6c0ddfc0603e..8dde973aaaf5 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags);
+extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size, int is_32);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 836a5a19eb2c..e061ef5dd449 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -242,12 +242,13 @@ static inline int restore_general_regs(struct pt_regs *regs,
*/
long sys_sigsuspend(old_sigset_t mask)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 883e74c0d1b3..0c683d376b1c 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -12,7 +12,6 @@
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/cputable.h>
-#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/prom.h>
#include <asm/machdep.h>
@@ -341,8 +340,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
int i, nattrs;
#ifdef CONFIG_PPC64
- if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
- cpu_has_feature(CPU_FTR_SMT))
+ if (cpu_has_feature(CPU_FTR_SMT))
device_create_file(s, &dev_attr_smt_snooze_delay);
#endif
@@ -414,8 +412,7 @@ static void unregister_cpu_online(unsigned int cpu)
BUG_ON(!c->hotpluggable);
#ifdef CONFIG_PPC64
- if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
- cpu_has_feature(CPU_FTR_SMT))
+ if (cpu_has_feature(CPU_FTR_SMT))
device_remove_file(s, &dev_attr_smt_snooze_delay);
#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 567dd7c3ac2a..2c42cd72d0f5 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -17,8 +17,7 @@
*
* TODO (not necessarily in this file):
* - improve precision and reproducibility of timebase frequency
- * measurement at boot time. (for iSeries, we calibrate the timebase
- * against the Titan chip's clock.)
+ * measurement at boot time.
* - for astronomical applications: add a new function to get
* non ambiguous timestamps even around leap seconds. This needs
* a new timestamp format and a good name.
@@ -70,10 +69,6 @@
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
#include <asm/cputime.h>
-#ifdef CONFIG_PPC_ISERIES
-#include <asm/iseries/it_lp_queue.h>
-#include <asm/iseries/hv_call_xm.h>
-#endif
/* powerpc clocksource/clockevent code */
@@ -117,14 +112,6 @@ static struct clock_event_device decrementer_clockevent = {
DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
-#ifdef CONFIG_PPC_ISERIES
-static unsigned long __initdata iSeries_recal_titan;
-static signed long __initdata iSeries_recal_tb;
-
-/* Forward declaration is only needed for iSereis compiles */
-static void __init clocksource_init(void);
-#endif
-
#define XSEC_PER_SEC (1024*1024)
#ifdef CONFIG_PPC64
@@ -259,7 +246,6 @@ void accumulate_stolen_time(void)
u64 sst, ust;
u8 save_soft_enabled = local_paca->soft_enabled;
- u8 save_hard_enabled = local_paca->hard_enabled;
/* We are called early in the exception entry, before
* soft/hard_enabled are sync'ed to the expected state
@@ -268,7 +254,6 @@ void accumulate_stolen_time(void)
* complain
*/
local_paca->soft_enabled = 0;
- local_paca->hard_enabled = 0;
sst = scan_dispatch_log(local_paca->starttime_user);
ust = scan_dispatch_log(local_paca->starttime);
@@ -277,7 +262,6 @@ void accumulate_stolen_time(void)
local_paca->stolen_time += ust + sst;
local_paca->soft_enabled = save_soft_enabled;
- local_paca->hard_enabled = save_hard_enabled;
}
static inline u64 calculate_stolen_time(u64 stop_tb)
@@ -426,74 +410,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#ifdef CONFIG_PPC_ISERIES
-
-/*
- * This function recalibrates the timebase based on the 49-bit time-of-day
- * value in the Titan chip. The Titan is much more accurate than the value
- * returned by the service processor for the timebase frequency.
- */
-
-static int __init iSeries_tb_recal(void)
-{
- unsigned long titan, tb;
-
- /* Make sure we only run on iSeries */
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
- tb = get_tb();
- titan = HvCallXm_loadTod();
- if ( iSeries_recal_titan ) {
- unsigned long tb_ticks = tb - iSeries_recal_tb;
- unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
- unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
- unsigned long new_tb_ticks_per_jiffy =
- DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
- long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
- char sign = '+';
- /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
- new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
-
- if ( tick_diff < 0 ) {
- tick_diff = -tick_diff;
- sign = '-';
- }
- if ( tick_diff ) {
- if ( tick_diff < tb_ticks_per_jiffy/25 ) {
- printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
- new_tb_ticks_per_jiffy, sign, tick_diff );
- tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
- tb_ticks_per_sec = new_tb_ticks_per_sec;
- calc_cputime_factors();
- vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
- setup_cputime_one_jiffy();
- }
- else {
- printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
- " new tb_ticks_per_jiffy = %lu\n"
- " old tb_ticks_per_jiffy = %lu\n",
- new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
- }
- }
- }
- iSeries_recal_titan = titan;
- iSeries_recal_tb = tb;
-
- /* Called here as now we know accurate values for the timebase */
- clocksource_init();
- return 0;
-}
-late_initcall(iSeries_tb_recal);
-
-/* Called from platform early init */
-void __init iSeries_time_init_early(void)
-{
- iSeries_recal_tb = get_tb();
- iSeries_recal_titan = HvCallXm_loadTod();
-}
-#endif /* CONFIG_PPC_ISERIES */
-
#ifdef CONFIG_IRQ_WORK
/*
@@ -550,16 +466,6 @@ void arch_irq_work_raise(void)
#endif /* CONFIG_IRQ_WORK */
/*
- * For iSeries shared processors, we have to let the hypervisor
- * set the hardware decrementer. We set a virtual decrementer
- * in the lppaca and call the hypervisor if the virtual
- * decrementer is less than the current value in the hardware
- * decrementer. (almost always the new decrementer value will
- * be greater than the current hardware decementer so the hypervisor
- * call will not be needed)
- */
-
-/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
*/
@@ -580,6 +486,11 @@ void timer_interrupt(struct pt_regs * regs)
if (!cpu_online(smp_processor_id()))
return;
+ /* Conditionally hard-enable interrupts now that the DEC has been
+ * bumped to its maximum value
+ */
+ may_hard_irq_enable();
+
trace_timer_interrupt_entry(regs);
__get_cpu_var(irq_stat).timer_irqs++;
@@ -597,20 +508,10 @@ void timer_interrupt(struct pt_regs * regs)
irq_work_run();
}
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- get_lppaca()->int_dword.fields.decr_int = 0;
-#endif
-
*next_tb = ~(u64)0;
if (evt->event_handler)
evt->event_handler(evt);
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
- process_hvlpevents();
-#endif
-
#ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
@@ -982,9 +883,8 @@ void __init time_init(void)
*/
start_cpu_decrementer();
- /* Register the clocksource, if we're not running on iSeries */
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- clocksource_init();
+ /* Register the clocksource */
+ clocksource_init();
init_decrementer_clockevent();
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c091527efd89..a750409ccc4e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -57,6 +57,7 @@
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
#include <asm/rio.h>
+#include <asm/fadump.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -145,6 +146,8 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
+ crash_fadump(regs, "die oops");
+
/*
* A system reset (0x100) is a request to dump, so we always send
* it through the crashdump code.
@@ -244,6 +247,9 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr, regs->nip, regs->link, code);
}
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
memset(&info, 0, sizeof(info));
info.si_signo = signr;
info.si_code = code;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 57fa2c0a531c..c39c1ca77f46 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -46,9 +46,6 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
/* Maple real mode debug */
udbg_init_maple_realmode();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_ISERIES)
- /* For iSeries - hit Ctrl-x Ctrl-x to see the output */
- udbg_init_iseries();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT)
udbg_init_debug_beat();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 7d14bb697d40..972cca278f98 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -263,17 +263,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* the "data" page of the vDSO or you'll stop getting kernel updates
* and your nice userland gettimeofday will be totally dead.
* It's fine to use that for setting breakpoints in the vDSO code
- * pages though
- *
- * Make sure the vDSO gets into every core dump.
- * Dumping its contents makes post-mortem fully interpretable later
- * without matching up the same kernel and hardware config to see
- * what PC values meant.
+ * pages though.
*/
rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
- VM_ALWAYSDUMP,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
if (rc) {
current->mm->context.vdso_base = 0;
@@ -727,10 +721,10 @@ static int __init vdso_init(void)
vdso_data->version.minor = SYSTEMCFG_MINOR;
vdso_data->processor = mfspr(SPRN_PVR);
/*
- * Fake the old platform number for pSeries and iSeries and add
+ * Fake the old platform number for pSeries and add
* in LPAR bit if necessary
*/
- vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100;
+ vdso_data->platform = 0x100;
if (firmware_has_feature(FW_FEATURE_LPAR))
vdso_data->platform |= 1;
vdso_data->physicalMemorySize = memblock_phys_mem_size();
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 8b086299ba25..b2f7c8480bf6 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -34,11 +34,6 @@
#include <asm/abs_addr.h>
#include <asm/page.h>
#include <asm/hvcall.h>
-#include <asm/iseries/vio.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_call_xm.h>
-#include <asm/iseries/iommu.h>
static struct bus_type vio_bus_type;
@@ -1042,7 +1037,6 @@ static void vio_cmo_sysfs_init(void)
vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
}
#else /* CONFIG_PPC_SMLPAR */
-/* Dummy functions for iSeries platform */
int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
@@ -1060,9 +1054,6 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
struct iommu_table *tbl;
unsigned long offset, size;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return vio_build_iommu_table_iseries(dev);
-
dma_window = of_get_property(dev->dev.of_node,
"ibm,my-dma-window", NULL);
if (!dma_window)
@@ -1168,17 +1159,21 @@ static int vio_bus_remove(struct device *dev)
* vio_register_driver: - Register a new vio driver
* @drv: The vio_driver structure to be registered.
*/
-int vio_register_driver(struct vio_driver *viodrv)
+int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
+ const char *mod_name)
{
- printk(KERN_DEBUG "%s: driver %s registering\n", __func__,
- viodrv->driver.name);
+ pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
/* fill in 'struct driver' fields */
+ viodrv->driver.name = viodrv->name;
+ viodrv->driver.pm = viodrv->pm;
viodrv->driver.bus = &vio_bus_type;
+ viodrv->driver.owner = owner;
+ viodrv->driver.mod_name = mod_name;
return driver_register(&viodrv->driver);
}
-EXPORT_SYMBOL(vio_register_driver);
+EXPORT_SYMBOL(__vio_register_driver);
/**
* vio_unregister_driver - Remove registration of vio driver.
@@ -1195,8 +1190,7 @@ static void __devinit vio_dev_release(struct device *dev)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
- /* iSeries uses a common table for all vio devices */
- if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl)
+ if (tbl)
iommu_free_table(tbl, dev->of_node ?
dev->of_node->full_name : dev_name(dev));
of_node_put(dev->of_node);
@@ -1244,12 +1238,6 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
viodev->name = of_node->name;
viodev->type = of_node->type;
viodev->unit_address = *unit_address;
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- unit_address = of_get_property(of_node,
- "linux,unit_address", NULL);
- if (unit_address != NULL)
- viodev->unit_address = *unit_address;
- }
viodev->dev.of_node = of_node_get(of_node);
if (firmware_has_feature(FW_FEATURE_CMO))
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 710a54005dfb..65d1c08cf09e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -109,11 +109,6 @@ SECTIONS
__ptov_table_begin = .;
*(.ptov_fixup);
__ptov_table_end = .;
-#ifdef CONFIG_PPC_ISERIES
- __dt_strings_start = .;
- *(.dt_strings);
- __dt_strings_end = .;
-#endif
}
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 78133deb4b64..8f64709ae331 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -69,6 +69,7 @@ config KVM_BOOK3S_64
config KVM_BOOK3S_64_HV
bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
+ select MMU_NOTIFIER
---help---
Support running unmodified book3s_64 guest kernels in
virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index e41ac6f7dcf1..7d54f4ed6d96 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
return true;
}
-void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.sprg4;
- regs->sprg5 = vcpu->arch.sprg5;
- regs->sprg6 = vcpu->arch.sprg6;
- regs->sprg7 = vcpu->arch.sprg7;
+ regs->sprg4 = vcpu->arch.shared->sprg4;
+ regs->sprg5 = vcpu->arch.shared->sprg5;
+ regs->sprg6 = vcpu->arch.shared->sprg6;
+ regs->sprg7 = vcpu->arch.shared->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg4 = regs->sprg4;
- vcpu->arch.sprg5 = regs->sprg5;
- vcpu->arch.sprg6 = regs->sprg6;
- vcpu->arch.sprg7 = regs->sprg7;
+ vcpu->arch.shared->sprg4 = regs->sprg4;
+ vcpu->arch.shared->sprg5 = regs->sprg5;
+ vcpu->arch.shared->sprg6 = regs->sprg6;
+ vcpu->arch.shared->sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -477,41 +477,10 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * Get (and clear) the dirty memory log for a memory slot.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+void kvmppc_decrementer_func(unsigned long data)
{
- struct kvm_memory_slot *memslot;
- struct kvm_vcpu *vcpu;
- ulong ga, ga_end;
- int is_dirty = 0;
- int r;
- unsigned long n;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
- goto out;
-
- /* If nothing is dirty, don't bother messing with page tables. */
- if (is_dirty) {
- memslot = id_to_memslot(kvm->memslots, log->slot);
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
- ga = memslot->base_gfn << PAGE_SHIFT;
- ga_end = ga + (memslot->npages << PAGE_SHIFT);
-
- kvm_for_each_vcpu(n, vcpu, kvm)
- kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
-
- n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
- }
-
- r = 0;
-out:
- mutex_unlock(&kvm->slots_lock);
- return r;
+ kvmppc_core_queue_dec(vcpu);
+ kvm_vcpu_kick(vcpu);
}
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 9fecbfbce773..f922c29bb234 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
bool primary = false;
bool evict = false;
struct hpte_cache *pte;
+ int r = 0;
/* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (is_error_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
hpaddr <<= PAGE_SHIFT;
@@ -249,7 +251,8 @@ next_pteg:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
- return 0;
+out:
+ return r;
}
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
u64 gvsid;
u32 sr;
struct kvmppc_sid_map *map;
- struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ int r = 0;
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */
svcpu->sr[esid] = SR_INVALID;
- return -ENOENT;
+ r = -ENOENT;
+ goto out;
}
map = find_sid_vsid(vcpu, gvsid);
@@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
- return 0;
+out:
+ svcpu_put(svcpu);
+ return r;
}
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
int i;
- struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
svcpu->sr[i] = SR_INVALID;
+
+ svcpu_put(svcpu);
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index fa2f08434ba5..6f87f39a1ac2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
int vflags = 0;
int attempt = 0;
struct kvmppc_sid_map *map;
+ int r = 0;
/* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (is_error_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
hpaddr <<= PAGE_SHIFT;
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
@@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
vsid, orig_pte->eaddr);
WARN_ON(true);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
vsid = map->host_vsid;
@@ -131,8 +134,10 @@ map_again:
/* In case we tried normal mapping already, let's nuke old entries */
if (attempt > 1)
- if (ppc_md.hpte_remove(hpteg) < 0)
- return -1;
+ if (ppc_md.hpte_remove(hpteg) < 0) {
+ r = -1;
+ goto out;
+ }
ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
@@ -162,7 +167,8 @@ map_again:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
}
- return 0;
+out:
+ return r;
}
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
int i;
int max_slb_size = 64;
int found_inval = -1;
int r;
- if (!to_svcpu(vcpu)->slb_max)
- to_svcpu(vcpu)->slb_max = 1;
+ if (!svcpu->slb_max)
+ svcpu->slb_max = 1;
/* Are we overwriting? */
- for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
- if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
+ for (i = 1; i < svcpu->slb_max; i++) {
+ if (!(svcpu->slb[i].esid & SLB_ESID_V))
found_inval = i;
- else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
- return i;
+ else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
+ r = i;
+ goto out;
+ }
}
/* Found a spare entry that was invalidated before */
- if (found_inval > 0)
- return found_inval;
+ if (found_inval > 0) {
+ r = found_inval;
+ goto out;
+ }
/* No spare invalid entry, so create one */
@@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
max_slb_size = mmu_slb_size;
/* Overflowing -> purge */
- if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
+ if ((svcpu->slb_max) == max_slb_size)
kvmppc_mmu_flush_segments(vcpu);
- r = to_svcpu(vcpu)->slb_max;
- to_svcpu(vcpu)->slb_max++;
+ r = svcpu->slb_max;
+ svcpu->slb_max++;
+out:
+ svcpu_put(svcpu);
return r;
}
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u64 esid = eaddr >> SID_SHIFT;
u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
u64 slb_vsid = SLB_VSID_USER;
u64 gvsid;
int slb_index;
struct kvmppc_sid_map *map;
+ int r = 0;
slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */
- to_svcpu(vcpu)->slb[slb_index].esid = 0;
- return -ENOENT;
+ svcpu->slb[slb_index].esid = 0;
+ r = -ENOENT;
+ goto out;
}
map = find_sid_vsid(vcpu, gvsid);
@@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index;
- to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
- to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
+ svcpu->slb[slb_index].esid = slb_esid;
+ svcpu->slb[slb_index].vsid = slb_vsid;
trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
- return 0;
+out:
+ svcpu_put(svcpu);
+ return r;
}
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
- to_svcpu(vcpu)->slb_max = 1;
- to_svcpu(vcpu)->slb[0].esid = 0;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->slb_max = 1;
+ svcpu->slb[0].esid = 0;
+ svcpu_put(svcpu);
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index bc3a2ea94217..ddc485a529f2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -23,6 +23,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -33,15 +34,6 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
-/* For now use fixed-size 16MB page table */
-#define HPT_ORDER 24
-#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
-#define HPT_HASH_MASK (HPT_NPTEG - 1)
-
-/* Pages in the VRMA are 16MB pages */
-#define VRMA_PAGE_ORDER 24
-#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
-
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
#define NR_LPIDS (LPID_RSVD + 1)
@@ -51,21 +43,41 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
{
unsigned long hpt;
unsigned long lpid;
+ struct revmap_entry *rev;
+ struct kvmppc_linear_info *li;
+
+ /* Allocate guest's hashed page table */
+ li = kvm_alloc_hpt();
+ if (li) {
+ /* using preallocated memory */
+ hpt = (ulong)li->base_virt;
+ kvm->arch.hpt_li = li;
+ } else {
+ /* using dynamic memory */
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
+ __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
+ }
- hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
- HPT_ORDER - PAGE_SHIFT);
if (!hpt) {
pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
return -ENOMEM;
}
kvm->arch.hpt_virt = hpt;
+ /* Allocate reverse map array */
+ rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
+ if (!rev) {
+ pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
+ goto out_freehpt;
+ }
+ kvm->arch.revmap = rev;
+
+ /* Allocate the guest's logical partition ID */
do {
lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
if (lpid >= NR_LPIDS) {
pr_err("kvm_alloc_hpt: No LPIDs free\n");
- free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
- return -ENOMEM;
+ goto out_freeboth;
}
} while (test_and_set_bit(lpid, lpid_inuse));
@@ -74,37 +86,64 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
return 0;
+
+ out_freeboth:
+ vfree(rev);
+ out_freehpt:
+ free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
+ return -ENOMEM;
}
void kvmppc_free_hpt(struct kvm *kvm)
{
clear_bit(kvm->arch.lpid, lpid_inuse);
- free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+ vfree(kvm->arch.revmap);
+ if (kvm->arch.hpt_li)
+ kvm_release_hpt(kvm->arch.hpt_li);
+ else
+ free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+}
+
+/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
+static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
+{
+ return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
+}
+
+/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
+static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
+{
+ return (pgsize == 0x10000) ? 0x1000 : 0;
}
-void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
+void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
+ unsigned long porder)
{
unsigned long i;
- unsigned long npages = kvm->arch.ram_npages;
- unsigned long pfn;
- unsigned long *hpte;
- unsigned long hash;
- struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
+ unsigned long npages;
+ unsigned long hp_v, hp_r;
+ unsigned long addr, hash;
+ unsigned long psize;
+ unsigned long hp0, hp1;
+ long ret;
- if (!pginfo)
- return;
+ psize = 1ul << porder;
+ npages = memslot->npages >> (porder - PAGE_SHIFT);
/* VRMA can't be > 1TB */
- if (npages > 1ul << (40 - kvm->arch.ram_porder))
- npages = 1ul << (40 - kvm->arch.ram_porder);
+ if (npages > 1ul << (40 - porder))
+ npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */
if (npages > HPT_NPTEG)
npages = HPT_NPTEG;
+ hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
+ HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
+ hp1 = hpte1_pgsize_encoding(psize) |
+ HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
+
for (i = 0; i < npages; ++i) {
- pfn = pginfo[i].pfn;
- if (!pfn)
- break;
+ addr = i << porder;
/* can't use hpt_hash since va > 64 bits */
hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
/*
@@ -113,15 +152,15 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
* at most one HPTE per HPTEG, we just assume entry 7
* is available and use it.
*/
- hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
- hpte += 7 * 2;
- /* HPTE low word - RPN, protection, etc. */
- hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
- HPTE_R_M | PP_RWXX;
- wmb();
- hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
- (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
- HPTE_V_LARGE | HPTE_V_VALID;
+ hash = (hash << 3) + 7;
+ hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
+ hp_r = hp1 | addr;
+ ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
+ if (ret != H_SUCCESS) {
+ pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
+ addr, ret);
+ break;
+ }
}
}
@@ -158,10 +197,814 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
}
+/*
+ * This is called to get a reference to a guest page if there isn't
+ * one already in the kvm->arch.slot_phys[][] arrays.
+ */
+static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
+ struct kvm_memory_slot *memslot,
+ unsigned long psize)
+{
+ unsigned long start;
+ long np, err;
+ struct page *page, *hpage, *pages[1];
+ unsigned long s, pgsize;
+ unsigned long *physp;
+ unsigned int is_io, got, pgorder;
+ struct vm_area_struct *vma;
+ unsigned long pfn, i, npages;
+
+ physp = kvm->arch.slot_phys[memslot->id];
+ if (!physp)
+ return -EINVAL;
+ if (physp[gfn - memslot->base_gfn])
+ return 0;
+
+ is_io = 0;
+ got = 0;
+ page = NULL;
+ pgsize = psize;
+ err = -EINVAL;
+ start = gfn_to_hva_memslot(memslot, gfn);
+
+ /* Instantiate and get the page we want access to */
+ np = get_user_pages_fast(start, 1, 1, pages);
+ if (np != 1) {
+ /* Look up the vma for the page */
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, start);
+ if (!vma || vma->vm_start > start ||
+ start + psize > vma->vm_end ||
+ !(vma->vm_flags & VM_PFNMAP))
+ goto up_err;
+ is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
+ pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+ /* check alignment of pfn vs. requested page size */
+ if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
+ goto up_err;
+ up_read(&current->mm->mmap_sem);
+
+ } else {
+ page = pages[0];
+ got = KVMPPC_GOT_PAGE;
+
+ /* See if this is a large page */
+ s = PAGE_SIZE;
+ if (PageHuge(page)) {
+ hpage = compound_head(page);
+ s <<= compound_order(hpage);
+ /* Get the whole large page if slot alignment is ok */
+ if (s > psize && slot_is_aligned(memslot, s) &&
+ !(memslot->userspace_addr & (s - 1))) {
+ start &= ~(s - 1);
+ pgsize = s;
+ page = hpage;
+ }
+ }
+ if (s < psize)
+ goto out;
+ pfn = page_to_pfn(page);
+ }
+
+ npages = pgsize >> PAGE_SHIFT;
+ pgorder = __ilog2(npages);
+ physp += (gfn - memslot->base_gfn) & ~(npages - 1);
+ spin_lock(&kvm->arch.slot_phys_lock);
+ for (i = 0; i < npages; ++i) {
+ if (!physp[i]) {
+ physp[i] = ((pfn + i) << PAGE_SHIFT) +
+ got + is_io + pgorder;
+ got = 0;
+ }
+ }
+ spin_unlock(&kvm->arch.slot_phys_lock);
+ err = 0;
+
+ out:
+ if (got) {
+ if (PageHuge(page))
+ page = compound_head(page);
+ put_page(page);
+ }
+ return err;
+
+ up_err:
+ up_read(&current->mm->mmap_sem);
+ return err;
+}
+
+/*
+ * We come here on a H_ENTER call from the guest when we are not
+ * using mmu notifiers and we don't have the requested page pinned
+ * already.
+ */
+long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long psize, gpa, gfn;
+ struct kvm_memory_slot *memslot;
+ long ret;
+
+ if (kvm->arch.using_mmu_notifiers)
+ goto do_insert;
+
+ psize = hpte_page_size(pteh, ptel);
+ if (!psize)
+ return H_PARAMETER;
+
+ pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+
+ /* Find the memslot (if any) for this address */
+ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
+ gfn = gpa >> PAGE_SHIFT;
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
+ if (!slot_is_aligned(memslot, psize))
+ return H_PARAMETER;
+ if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
+ return H_PARAMETER;
+ }
+
+ do_insert:
+ /* Protect linux PTE lookup from page table destruction */
+ rcu_read_lock_sched(); /* this disables preemption too */
+ vcpu->arch.pgdir = current->mm->pgd;
+ ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
+ rcu_read_unlock_sched();
+ if (ret == H_TOO_HARD) {
+ /* this can't happen */
+ pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
+ ret = H_RESOURCE; /* or something */
+ }
+ return ret;
+
+}
+
+static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
+ gva_t eaddr)
+{
+ u64 mask;
+ int i;
+
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
+ continue;
+
+ if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
+ mask = ESID_MASK_1T;
+ else
+ mask = ESID_MASK;
+
+ if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
+ return &vcpu->arch.slb[i];
+ }
+ return NULL;
+}
+
+static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
+ unsigned long ea)
+{
+ unsigned long ra_mask;
+
+ ra_mask = hpte_page_size(v, r) - 1;
+ return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
+}
+
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data)
+ struct kvmppc_pte *gpte, bool data)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_slb *slbe;
+ unsigned long slb_v;
+ unsigned long pp, key;
+ unsigned long v, gr;
+ unsigned long *hptep;
+ int index;
+ int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+
+ /* Get SLB entry */
+ if (virtmode) {
+ slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
+ if (!slbe)
+ return -EINVAL;
+ slb_v = slbe->origv;
+ } else {
+ /* real mode access */
+ slb_v = vcpu->kvm->arch.vrma_slb_v;
+ }
+
+ /* Find the HPTE in the hash table */
+ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
+ HPTE_V_VALID | HPTE_V_ABSENT);
+ if (index < 0)
+ return -ENOENT;
+ hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ v = hptep[0] & ~HPTE_V_HVLOCK;
+ gr = kvm->arch.revmap[index].guest_rpte;
+
+ /* Unlock the HPTE */
+ asm volatile("lwsync" : : : "memory");
+ hptep[0] = v;
+
+ gpte->eaddr = eaddr;
+ gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
+
+ /* Get PP bits and key for permission check */
+ pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
+ key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ key &= slb_v;
+
+ /* Calculate permissions */
+ gpte->may_read = hpte_read_permission(pp, key);
+ gpte->may_write = hpte_write_permission(pp, key);
+ gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
+
+ /* Storage key permission check for POWER7 */
+ if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
+ int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
+ if (amrfield & 1)
+ gpte->may_read = 0;
+ if (amrfield & 2)
+ gpte->may_write = 0;
+ }
+
+ /* Get the guest physical address */
+ gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
+ return 0;
+}
+
+/*
+ * Quick test for whether an instruction is a load or a store.
+ * If the instruction is a load or a store, then this will indicate
+ * which it is, at least on server processors. (Embedded processors
+ * have some external PID instructions that don't follow the rule
+ * embodied here.) If the instruction isn't a load or store, then
+ * this doesn't return anything useful.
+ */
+static int instruction_is_store(unsigned int instr)
+{
+ unsigned int mask;
+
+ mask = 0x10000000;
+ if ((instr & 0xfc000000) == 0x7c000000)
+ mask = 0x100; /* major opcode 31 */
+ return (instr & mask) != 0;
+}
+
+static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned long gpa, int is_store)
+{
+ int ret;
+ u32 last_inst;
+ unsigned long srr0 = kvmppc_get_pc(vcpu);
+
+ /* We try to load the last instruction. We don't let
+ * emulate_instruction do it as it doesn't check what
+ * kvmppc_ld returns.
+ * If we fail, we just return to the guest and try executing it again.
+ */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
+ return RESUME_GUEST;
+ vcpu->arch.last_inst = last_inst;
+ }
+
+ /*
+ * WARNING: We do not know for sure whether the instruction we just
+ * read from memory is the same that caused the fault in the first
+ * place. If the instruction we read is neither an load or a store,
+ * then it can't access memory, so we don't need to worry about
+ * enforcing access permissions. So, assuming it is a load or
+ * store, we just check that its direction (load or store) is
+ * consistent with the original fault, since that's what we
+ * checked the access permissions against. If there is a mismatch
+ * we just return and retry the instruction.
+ */
+
+ if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
+ return RESUME_GUEST;
+
+ /*
+ * Emulated accesses are emulated by looking at the hash for
+ * translation once, then performing the access later. The
+ * translation could be invalidated in the meantime in which
+ * point performing the subsequent memory access on the old
+ * physical address could possibly be a security hole for the
+ * guest (but not the host).
+ *
+ * This is less of an issue for MMIO stores since they aren't
+ * globally visible. It could be an issue for MMIO loads to
+ * a certain extent but we'll ignore it for now.
+ */
+
+ vcpu->arch.paddr_accessed = gpa;
+ return kvmppc_emulate_mmio(run, vcpu);
+}
+
+int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned long ea, unsigned long dsisr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hptep, hpte[3], r;
+ unsigned long mmu_seq, psize, pte_size;
+ unsigned long gfn, hva, pfn;
+ struct kvm_memory_slot *memslot;
+ unsigned long *rmap;
+ struct revmap_entry *rev;
+ struct page *page, *pages[1];
+ long index, ret, npages;
+ unsigned long is_io;
+ unsigned int writing, write_ok;
+ struct vm_area_struct *vma;
+ unsigned long rcbits;
+
+ /*
+ * Real-mode code has already searched the HPT and found the
+ * entry we're interested in. Lock the entry and check that
+ * it hasn't changed. If it has, just return and re-execute the
+ * instruction.
+ */
+ if (ea != vcpu->arch.pgfault_addr)
+ return RESUME_GUEST;
+ index = vcpu->arch.pgfault_index;
+ hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ rev = &kvm->arch.revmap[index];
+ preempt_disable();
+ while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
+ cpu_relax();
+ hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
+ hpte[1] = hptep[1];
+ hpte[2] = r = rev->guest_rpte;
+ asm volatile("lwsync" : : : "memory");
+ hptep[0] = hpte[0];
+ preempt_enable();
+
+ if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
+ hpte[1] != vcpu->arch.pgfault_hpte[1])
+ return RESUME_GUEST;
+
+ /* Translate the logical address and get the page */
+ psize = hpte_page_size(hpte[0], r);
+ gfn = hpte_rpn(r, psize);
+ memslot = gfn_to_memslot(kvm, gfn);
+
+ /* No memslot means it's an emulated MMIO region */
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
+ unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
+ dsisr & DSISR_ISSTORE);
+ }
+
+ if (!kvm->arch.using_mmu_notifiers)
+ return -EFAULT; /* should never get here */
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ is_io = 0;
+ pfn = 0;
+ page = NULL;
+ pte_size = PAGE_SIZE;
+ writing = (dsisr & DSISR_ISSTORE) != 0;
+ /* If writing != 0, then the HPTE must allow writing, if we get here */
+ write_ok = writing;
+ hva = gfn_to_hva_memslot(memslot, gfn);
+ npages = get_user_pages_fast(hva, 1, writing, pages);
+ if (npages < 1) {
+ /* Check if it's an I/O mapping */
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, hva);
+ if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
+ (vma->vm_flags & VM_PFNMAP)) {
+ pfn = vma->vm_pgoff +
+ ((hva - vma->vm_start) >> PAGE_SHIFT);
+ pte_size = psize;
+ is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
+ write_ok = vma->vm_flags & VM_WRITE;
+ }
+ up_read(&current->mm->mmap_sem);
+ if (!pfn)
+ return -EFAULT;
+ } else {
+ page = pages[0];
+ if (PageHuge(page)) {
+ page = compound_head(page);
+ pte_size <<= compound_order(page);
+ }
+ /* if the guest wants write access, see if that is OK */
+ if (!writing && hpte_is_writable(r)) {
+ pte_t *ptep, pte;
+
+ /*
+ * We need to protect against page table destruction
+ * while looking up and updating the pte.
+ */
+ rcu_read_lock_sched();
+ ptep = find_linux_pte_or_hugepte(current->mm->pgd,
+ hva, NULL);
+ if (ptep && pte_present(*ptep)) {
+ pte = kvmppc_read_update_linux_pte(ptep, 1);
+ if (pte_write(pte))
+ write_ok = 1;
+ }
+ rcu_read_unlock_sched();
+ }
+ pfn = page_to_pfn(page);
+ }
+
+ ret = -EFAULT;
+ if (psize > pte_size)
+ goto out_put;
+
+ /* Check WIMG vs. the actual page we're accessing */
+ if (!hpte_cache_flags_ok(r, is_io)) {
+ if (is_io)
+ return -EFAULT;
+ /*
+ * Allow guest to map emulated device memory as
+ * uncacheable, but actually make it cacheable.
+ */
+ r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
+ }
+
+ /* Set the HPTE to point to pfn */
+ r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
+ if (hpte_is_writable(r) && !write_ok)
+ r = hpte_make_readonly(r);
+ ret = RESUME_GUEST;
+ preempt_disable();
+ while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
+ rev->guest_rpte != hpte[2])
+ /* HPTE has been changed under us; let the guest retry */
+ goto out_unlock;
+ hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
+
+ rmap = &memslot->rmap[gfn - memslot->base_gfn];
+ lock_rmap(rmap);
+
+ /* Check if we might have been invalidated; let the guest retry if so */
+ ret = RESUME_GUEST;
+ if (mmu_notifier_retry(vcpu, mmu_seq)) {
+ unlock_rmap(rmap);
+ goto out_unlock;
+ }
+
+ /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
+ rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
+ r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
+
+ if (hptep[0] & HPTE_V_VALID) {
+ /* HPTE was previously valid, so we need to invalidate it */
+ unlock_rmap(rmap);
+ hptep[0] |= HPTE_V_ABSENT;
+ kvmppc_invalidate_hpte(kvm, hptep, index);
+ /* don't lose previous R and C bits */
+ r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
+ } else {
+ kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
+ }
+
+ hptep[1] = r;
+ eieio();
+ hptep[0] = hpte[0];
+ asm volatile("ptesync" : : : "memory");
+ preempt_enable();
+ if (page && hpte_is_writable(r))
+ SetPageDirty(page);
+
+ out_put:
+ if (page)
+ put_page(page);
+ return ret;
+
+ out_unlock:
+ hptep[0] &= ~HPTE_V_HVLOCK;
+ preempt_enable();
+ goto out_put;
+}
+
+static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+ int (*handler)(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long gfn))
+{
+ int ret;
+ int retval = 0;
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots) {
+ unsigned long start = memslot->userspace_addr;
+ unsigned long end;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+ if (hva >= start && hva < end) {
+ gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+
+ ret = handler(kvm, &memslot->rmap[gfn_offset],
+ memslot->base_gfn + gfn_offset);
+ retval |= ret;
+ }
+ }
+
+ return retval;
+}
+
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long gfn)
+{
+ struct revmap_entry *rev = kvm->arch.revmap;
+ unsigned long h, i, j;
+ unsigned long *hptep;
+ unsigned long ptel, psize, rcbits;
+
+ for (;;) {
+ lock_rmap(rmapp);
+ if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
+ unlock_rmap(rmapp);
+ break;
+ }
+
+ /*
+ * To avoid an ABBA deadlock with the HPTE lock bit,
+ * we can't spin on the HPTE lock while holding the
+ * rmap chain lock.
+ */
+ i = *rmapp & KVMPPC_RMAP_INDEX;
+ hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+ if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
+ /* unlock rmap before spinning on the HPTE lock */
+ unlock_rmap(rmapp);
+ while (hptep[0] & HPTE_V_HVLOCK)
+ cpu_relax();
+ continue;
+ }
+ j = rev[i].forw;
+ if (j == i) {
+ /* chain is now empty */
+ *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
+ } else {
+ /* remove i from chain */
+ h = rev[i].back;
+ rev[h].forw = j;
+ rev[j].back = h;
+ rev[i].forw = rev[i].back = i;
+ *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
+ }
+
+ /* Now check and modify the HPTE */
+ ptel = rev[i].guest_rpte;
+ psize = hpte_page_size(hptep[0], ptel);
+ if ((hptep[0] & HPTE_V_VALID) &&
+ hpte_rpn(ptel, psize) == gfn) {
+ hptep[0] |= HPTE_V_ABSENT;
+ kvmppc_invalidate_hpte(kvm, hptep, i);
+ /* Harvest R and C */
+ rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
+ *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+ rev[i].guest_rpte = ptel | rcbits;
+ }
+ unlock_rmap(rmapp);
+ hptep[0] &= ~HPTE_V_HVLOCK;
+ }
+ return 0;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ if (kvm->arch.using_mmu_notifiers)
+ kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+ return 0;
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long gfn)
+{
+ struct revmap_entry *rev = kvm->arch.revmap;
+ unsigned long head, i, j;
+ unsigned long *hptep;
+ int ret = 0;
+
+ retry:
+ lock_rmap(rmapp);
+ if (*rmapp & KVMPPC_RMAP_REFERENCED) {
+ *rmapp &= ~KVMPPC_RMAP_REFERENCED;
+ ret = 1;
+ }
+ if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
+ unlock_rmap(rmapp);
+ return ret;
+ }
+
+ i = head = *rmapp & KVMPPC_RMAP_INDEX;
+ do {
+ hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+ j = rev[i].forw;
+
+ /* If this HPTE isn't referenced, ignore it */
+ if (!(hptep[1] & HPTE_R_R))
+ continue;
+
+ if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
+ /* unlock rmap before spinning on the HPTE lock */
+ unlock_rmap(rmapp);
+ while (hptep[0] & HPTE_V_HVLOCK)
+ cpu_relax();
+ goto retry;
+ }
+
+ /* Now check and modify the HPTE */
+ if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
+ kvmppc_clear_ref_hpte(kvm, hptep, i);
+ rev[i].guest_rpte |= HPTE_R_R;
+ ret = 1;
+ }
+ hptep[0] &= ~HPTE_V_HVLOCK;
+ } while ((i = j) != head);
+
+ unlock_rmap(rmapp);
+ return ret;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ if (!kvm->arch.using_mmu_notifiers)
+ return 0;
+ return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+}
+
+static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long gfn)
+{
+ struct revmap_entry *rev = kvm->arch.revmap;
+ unsigned long head, i, j;
+ unsigned long *hp;
+ int ret = 1;
+
+ if (*rmapp & KVMPPC_RMAP_REFERENCED)
+ return 1;
+
+ lock_rmap(rmapp);
+ if (*rmapp & KVMPPC_RMAP_REFERENCED)
+ goto out;
+
+ if (*rmapp & KVMPPC_RMAP_PRESENT) {
+ i = head = *rmapp & KVMPPC_RMAP_INDEX;
+ do {
+ hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
+ j = rev[i].forw;
+ if (hp[1] & HPTE_R_R)
+ goto out;
+ } while ((i = j) != head);
+ }
+ ret = 0;
+
+ out:
+ unlock_rmap(rmapp);
+ return ret;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ if (!kvm->arch.using_mmu_notifiers)
+ return 0;
+ return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- return -ENOENT;
+ if (!kvm->arch.using_mmu_notifiers)
+ return;
+ kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+}
+
+static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
+{
+ struct revmap_entry *rev = kvm->arch.revmap;
+ unsigned long head, i, j;
+ unsigned long *hptep;
+ int ret = 0;
+
+ retry:
+ lock_rmap(rmapp);
+ if (*rmapp & KVMPPC_RMAP_CHANGED) {
+ *rmapp &= ~KVMPPC_RMAP_CHANGED;
+ ret = 1;
+ }
+ if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
+ unlock_rmap(rmapp);
+ return ret;
+ }
+
+ i = head = *rmapp & KVMPPC_RMAP_INDEX;
+ do {
+ hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+ j = rev[i].forw;
+
+ if (!(hptep[1] & HPTE_R_C))
+ continue;
+
+ if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
+ /* unlock rmap before spinning on the HPTE lock */
+ unlock_rmap(rmapp);
+ while (hptep[0] & HPTE_V_HVLOCK)
+ cpu_relax();
+ goto retry;
+ }
+
+ /* Now check and modify the HPTE */
+ if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
+ /* need to make it temporarily absent to clear C */
+ hptep[0] |= HPTE_V_ABSENT;
+ kvmppc_invalidate_hpte(kvm, hptep, i);
+ hptep[1] &= ~HPTE_R_C;
+ eieio();
+ hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
+ rev[i].guest_rpte |= HPTE_R_C;
+ ret = 1;
+ }
+ hptep[0] &= ~HPTE_V_HVLOCK;
+ } while ((i = j) != head);
+
+ unlock_rmap(rmapp);
+ return ret;
+}
+
+long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+ unsigned long i;
+ unsigned long *rmapp, *map;
+
+ preempt_disable();
+ rmapp = memslot->rmap;
+ map = memslot->dirty_bitmap;
+ for (i = 0; i < memslot->npages; ++i) {
+ if (kvm_test_clear_dirty(kvm, rmapp))
+ __set_bit_le(i, map);
+ ++rmapp;
+ }
+ preempt_enable();
+ return 0;
+}
+
+void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
+ unsigned long *nb_ret)
+{
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn = gpa >> PAGE_SHIFT;
+ struct page *page, *pages[1];
+ int npages;
+ unsigned long hva, psize, offset;
+ unsigned long pa;
+ unsigned long *physp;
+
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ return NULL;
+ if (!kvm->arch.using_mmu_notifiers) {
+ physp = kvm->arch.slot_phys[memslot->id];
+ if (!physp)
+ return NULL;
+ physp += gfn - memslot->base_gfn;
+ pa = *physp;
+ if (!pa) {
+ if (kvmppc_get_guest_page(kvm, gfn, memslot,
+ PAGE_SIZE) < 0)
+ return NULL;
+ pa = *physp;
+ }
+ page = pfn_to_page(pa >> PAGE_SHIFT);
+ } else {
+ hva = gfn_to_hva_memslot(memslot, gfn);
+ npages = get_user_pages_fast(hva, 1, 1, pages);
+ if (npages < 1)
+ return NULL;
+ page = pages[0];
+ }
+ psize = PAGE_SIZE;
+ if (PageHuge(page)) {
+ page = compound_head(page);
+ psize <<= compound_order(page);
+ }
+ if (!kvm->arch.using_mmu_notifiers)
+ get_page(page);
+ offset = gpa & (psize - 1);
+ if (nb_ret)
+ *nb_ret = psize - offset;
+ return page_address(page) + offset;
+}
+
+void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
+{
+ struct page *page = virt_to_page(va);
+
+ page = compound_head(page);
+ put_page(page);
}
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 0c9dc62532d0..f1950d131827 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -230,9 +230,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) {
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
+
+ svcpu = svcpu_get(vcpu);
*advance = 0;
vcpu->arch.shared->dar = vaddr;
- to_svcpu(vcpu)->fault_dar = vaddr;
+ svcpu->fault_dar = vaddr;
dsisr = DSISR_ISSTORE;
if (r == -ENOENT)
@@ -241,7 +244,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->dsisr = dsisr;
- to_svcpu(vcpu)->fault_dsisr = dsisr;
+ svcpu->fault_dsisr = dsisr;
+ svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_STORAGE);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 336983da9e72..d386b6198bc7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -46,25 +46,16 @@
#include <asm/page.h>
#include <asm/hvcall.h>
#include <linux/gfp.h>
-#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
-
-/*
- * For now, limit memory to 64GB and require it to be large pages.
- * This value is chosen because it makes the ram_pginfo array be
- * 64kB in size, which is about as large as we want to be trying
- * to allocate with kmalloc.
- */
-#define MAX_MEM_ORDER 36
-
-#define LARGE_PAGE_ORDER 24 /* 16MB pages */
+#include <linux/hugetlb.h>
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
+static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
@@ -147,10 +138,10 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long vcpuid, unsigned long vpa)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long pg_index, ra, len;
- unsigned long pg_offset;
+ unsigned long len, nb;
void *va;
struct kvm_vcpu *tvcpu;
+ int err = H_PARAMETER;
tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
if (!tvcpu)
@@ -163,45 +154,41 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
if (flags < 4) {
if (vpa & 0x7f)
return H_PARAMETER;
+ if (flags >= 2 && !tvcpu->arch.vpa)
+ return H_RESOURCE;
/* registering new area; convert logical addr to real */
- pg_index = vpa >> kvm->arch.ram_porder;
- pg_offset = vpa & (kvm->arch.ram_psize - 1);
- if (pg_index >= kvm->arch.ram_npages)
- return H_PARAMETER;
- if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
+ va = kvmppc_pin_guest_page(kvm, vpa, &nb);
+ if (va == NULL)
return H_PARAMETER;
- ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
- ra |= pg_offset;
- va = __va(ra);
if (flags <= 1)
len = *(unsigned short *)(va + 4);
else
len = *(unsigned int *)(va + 4);
- if (pg_offset + len > kvm->arch.ram_psize)
- return H_PARAMETER;
+ if (len > nb)
+ goto out_unpin;
switch (flags) {
case 1: /* register VPA */
if (len < 640)
- return H_PARAMETER;
+ goto out_unpin;
+ if (tvcpu->arch.vpa)
+ kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
tvcpu->arch.vpa = va;
init_vpa(vcpu, va);
break;
case 2: /* register DTL */
if (len < 48)
- return H_PARAMETER;
- if (!tvcpu->arch.vpa)
- return H_RESOURCE;
+ goto out_unpin;
len -= len % 48;
+ if (tvcpu->arch.dtl)
+ kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
tvcpu->arch.dtl = va;
tvcpu->arch.dtl_end = va + len;
break;
case 3: /* register SLB shadow buffer */
- if (len < 8)
- return H_PARAMETER;
- if (!tvcpu->arch.vpa)
- return H_RESOURCE;
- tvcpu->arch.slb_shadow = va;
- len = (len - 16) / 16;
+ if (len < 16)
+ goto out_unpin;
+ if (tvcpu->arch.slb_shadow)
+ kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
tvcpu->arch.slb_shadow = va;
break;
}
@@ -210,17 +197,30 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
case 5: /* unregister VPA */
if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
return H_RESOURCE;
+ if (!tvcpu->arch.vpa)
+ break;
+ kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
tvcpu->arch.vpa = NULL;
break;
case 6: /* unregister DTL */
+ if (!tvcpu->arch.dtl)
+ break;
+ kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
tvcpu->arch.dtl = NULL;
break;
case 7: /* unregister SLB shadow buffer */
+ if (!tvcpu->arch.slb_shadow)
+ break;
+ kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
tvcpu->arch.slb_shadow = NULL;
break;
}
}
return H_SUCCESS;
+
+ out_unpin:
+ kvmppc_unpin_guest_page(kvm, va);
+ return err;
}
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
@@ -230,6 +230,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
struct kvm_vcpu *tvcpu;
switch (req) {
+ case H_ENTER:
+ ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ break;
case H_CEDE:
break;
case H_PROD:
@@ -319,20 +325,19 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
/*
- * We get these next two if the guest does a bad real-mode access,
- * as we have enabled VRMA (virtualized real mode area) mode in the
- * LPCR. We just generate an appropriate DSI/ISI to the guest.
+ * We get these next two if the guest accesses a page which it thinks
+ * it has mapped but which is not actually present, either because
+ * it is for an emulated I/O device or because the corresonding
+ * host page has been paged out. Any other HDSI/HISI interrupts
+ * have been handled already.
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
- vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
- vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
- kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
- r = RESUME_GUEST;
+ r = kvmppc_book3s_hv_page_fault(run, vcpu,
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
- kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
- 0x08000000);
- r = RESUME_GUEST;
+ r = kvmppc_book3s_hv_page_fault(run, vcpu,
+ kvmppc_get_pc(vcpu), 0);
break;
/*
* This occurs if the guest executes an illegal instruction.
@@ -392,6 +397,42 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_PPC_HIOR:
+ r = put_user(0, (u64 __user *)reg->addr);
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_PPC_HIOR:
+ {
+ u64 hior;
+ /* Only allow this to be set to zero */
+ r = get_user(hior, (u64 __user *)reg->addr);
+ if (!r && (hior != 0))
+ r = -EINVAL;
+ break;
+ }
+ default:
+ break;
+ }
+
+ return r;
+}
+
int kvmppc_core_check_processor_compat(void)
{
if (cpu_has_feature(CPU_FTR_HVMODE))
@@ -411,7 +452,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
goto out;
err = -ENOMEM;
- vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu)
goto out;
@@ -463,15 +504,21 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
return vcpu;
free_vcpu:
- kfree(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
}
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
+ if (vcpu->arch.dtl)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
+ if (vcpu->arch.slb_shadow)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
+ if (vcpu->arch.vpa)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
kvm_vcpu_uninit(vcpu);
- kfree(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
}
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
@@ -482,7 +529,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
if (now > vcpu->arch.dec_expires) {
/* decrementer has already gone negative */
kvmppc_core_queue_dec(vcpu);
- kvmppc_core_deliver_interrupts(vcpu);
+ kvmppc_core_prepare_to_enter(vcpu);
return;
}
dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
@@ -797,7 +844,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
list_for_each_entry_safe(v, vn, &vc->runnable_threads,
arch.run_list) {
- kvmppc_core_deliver_interrupts(v);
+ kvmppc_core_prepare_to_enter(v);
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
@@ -836,20 +883,26 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ kvmppc_core_prepare_to_enter(vcpu);
+
/* No need to go into the guest when all we'll do is come back out */
if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR;
return -EINTR;
}
- /* On PPC970, check that we have an RMA region */
- if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
- return -EPERM;
+ /* On the first time here, set up VRMA or RMA */
+ if (!vcpu->kvm->arch.rma_setup_done) {
+ r = kvmppc_hv_setup_rma(vcpu);
+ if (r)
+ return r;
+ }
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_vsx_to_thread(current);
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
+ vcpu->arch.pgdir = current->mm->pgd;
do {
r = kvmppc_run_vcpu(run, vcpu);
@@ -857,7 +910,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
r = kvmppc_pseries_do_hcall(vcpu);
- kvmppc_core_deliver_interrupts(vcpu);
+ kvmppc_core_prepare_to_enter(vcpu);
}
} while (r == RESUME_GUEST);
return r;
@@ -1001,7 +1054,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct kvmppc_rma_info *ri = vma->vm_file->private_data;
+ struct kvmppc_linear_info *ri = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff >= ri->npages)
@@ -1026,7 +1079,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_rma_release(struct inode *inode, struct file *filp)
{
- struct kvmppc_rma_info *ri = filp->private_data;
+ struct kvmppc_linear_info *ri = filp->private_data;
kvm_release_rma(ri);
return 0;
@@ -1039,7 +1092,7 @@ static struct file_operations kvm_rma_fops = {
long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
{
- struct kvmppc_rma_info *ri;
+ struct kvmppc_linear_info *ri;
long fd;
ri = kvm_alloc_rma();
@@ -1054,89 +1107,189 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
return fd;
}
-static struct page *hva_to_page(unsigned long addr)
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- struct page *page[1];
- int npages;
+ struct kvm_memory_slot *memslot;
+ int r;
+ unsigned long n;
- might_sleep();
+ mutex_lock(&kvm->slots_lock);
- npages = get_user_pages_fast(addr, 1, 1, page);
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
- if (unlikely(npages != 1))
- return 0;
+ memslot = id_to_memslot(kvm->memslots, log->slot);
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+
+ r = kvmppc_hv_get_dirty_log(kvm, memslot);
+ if (r)
+ goto out;
+
+ r = -EFAULT;
+ if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
+ goto out;
+
+ r = 0;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
- return page[0];
+static unsigned long slb_pgsize_encoding(unsigned long psize)
+{
+ unsigned long senc = 0;
+
+ if (psize > 0x1000) {
+ senc = SLB_VSID_L;
+ if (psize == 0x10000)
+ senc |= SLB_VSID_LP_01;
+ }
+ return senc;
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
- unsigned long psize, porder;
- unsigned long i, npages, totalpages;
- unsigned long pg_ix;
- struct kvmppc_pginfo *pginfo;
- unsigned long hva;
- struct kvmppc_rma_info *ri = NULL;
+ unsigned long npages;
+ unsigned long *phys;
+
+ /* Allocate a slot_phys array */
+ phys = kvm->arch.slot_phys[mem->slot];
+ if (!kvm->arch.using_mmu_notifiers && !phys) {
+ npages = mem->memory_size >> PAGE_SHIFT;
+ phys = vzalloc(npages * sizeof(unsigned long));
+ if (!phys)
+ return -ENOMEM;
+ kvm->arch.slot_phys[mem->slot] = phys;
+ kvm->arch.slot_npages[mem->slot] = npages;
+ }
+
+ return 0;
+}
+
+static void unpin_slot(struct kvm *kvm, int slot_id)
+{
+ unsigned long *physp;
+ unsigned long j, npages, pfn;
struct page *page;
- /* For now, only allow 16MB pages */
- porder = LARGE_PAGE_ORDER;
- psize = 1ul << porder;
- if ((mem->memory_size & (psize - 1)) ||
- (mem->guest_phys_addr & (psize - 1))) {
- pr_err("bad memory_size=%llx @ %llx\n",
- mem->memory_size, mem->guest_phys_addr);
- return -EINVAL;
+ physp = kvm->arch.slot_phys[slot_id];
+ npages = kvm->arch.slot_npages[slot_id];
+ if (physp) {
+ spin_lock(&kvm->arch.slot_phys_lock);
+ for (j = 0; j < npages; j++) {
+ if (!(physp[j] & KVMPPC_GOT_PAGE))
+ continue;
+ pfn = physp[j] >> PAGE_SHIFT;
+ page = pfn_to_page(pfn);
+ if (PageHuge(page))
+ page = compound_head(page);
+ SetPageDirty(page);
+ put_page(page);
+ }
+ kvm->arch.slot_phys[slot_id] = NULL;
+ spin_unlock(&kvm->arch.slot_phys_lock);
+ vfree(physp);
}
+}
- npages = mem->memory_size >> porder;
- totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
- /* More memory than we have space to track? */
- if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
- return -EINVAL;
+static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
+{
+ int err = 0;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_linear_info *ri = NULL;
+ unsigned long hva;
+ struct kvm_memory_slot *memslot;
+ struct vm_area_struct *vma;
+ unsigned long lpcr, senc;
+ unsigned long psize, porder;
+ unsigned long rma_size;
+ unsigned long rmls;
+ unsigned long *physp;
+ unsigned long i, npages;
- /* Do we already have an RMA registered? */
- if (mem->guest_phys_addr == 0 && kvm->arch.rma)
- return -EINVAL;
+ mutex_lock(&kvm->lock);
+ if (kvm->arch.rma_setup_done)
+ goto out; /* another vcpu beat us to it */
+
+ /* Look up the memslot for guest physical address 0 */
+ memslot = gfn_to_memslot(kvm, 0);
+
+ /* We must have some memory at 0 by now */
+ err = -EINVAL;
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ goto out;
+
+ /* Look up the VMA for the start of this memory slot */
+ hva = memslot->userspace_addr;
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, hva);
+ if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
+ goto up_out;
- if (totalpages > kvm->arch.ram_npages)
- kvm->arch.ram_npages = totalpages;
+ psize = vma_kernel_pagesize(vma);
+ porder = __ilog2(psize);
/* Is this one of our preallocated RMAs? */
- if (mem->guest_phys_addr == 0) {
- struct vm_area_struct *vma;
-
- down_read(&current->mm->mmap_sem);
- vma = find_vma(current->mm, mem->userspace_addr);
- if (vma && vma->vm_file &&
- vma->vm_file->f_op == &kvm_rma_fops &&
- mem->userspace_addr == vma->vm_start)
- ri = vma->vm_file->private_data;
- up_read(&current->mm->mmap_sem);
- if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
- pr_err("CPU requires an RMO\n");
- return -EINVAL;
+ if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
+ hva == vma->vm_start)
+ ri = vma->vm_file->private_data;
+
+ up_read(&current->mm->mmap_sem);
+
+ if (!ri) {
+ /* On POWER7, use VRMA; on PPC970, give up */
+ err = -EPERM;
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ pr_err("KVM: CPU requires an RMO\n");
+ goto out;
}
- }
- if (ri) {
- unsigned long rma_size;
- unsigned long lpcr;
- long rmls;
+ /* We can handle 4k, 64k or 16M pages in the VRMA */
+ err = -EINVAL;
+ if (!(psize == 0x1000 || psize == 0x10000 ||
+ psize == 0x1000000))
+ goto out;
+
+ /* Update VRMASD field in the LPCR */
+ senc = slb_pgsize_encoding(psize);
+ kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
+ lpcr |= senc << (LPCR_VRMASD_SH - 4);
+ kvm->arch.lpcr = lpcr;
+
+ /* Create HPTEs in the hash page table for the VRMA */
+ kvmppc_map_vrma(vcpu, memslot, porder);
- rma_size = ri->npages << PAGE_SHIFT;
- if (rma_size > mem->memory_size)
- rma_size = mem->memory_size;
+ } else {
+ /* Set up to use an RMO region */
+ rma_size = ri->npages;
+ if (rma_size > memslot->npages)
+ rma_size = memslot->npages;
+ rma_size <<= PAGE_SHIFT;
rmls = lpcr_rmls(rma_size);
+ err = -EINVAL;
if (rmls < 0) {
- pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
- return -EINVAL;
+ pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
+ goto out;
}
atomic_inc(&ri->use_count);
kvm->arch.rma = ri;
- kvm->arch.n_rma_pages = rma_size >> porder;
/* Update LPCR and RMOR */
lpcr = kvm->arch.lpcr;
@@ -1156,53 +1309,35 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
}
kvm->arch.lpcr = lpcr;
- pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
+ pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
- }
- pg_ix = mem->guest_phys_addr >> porder;
- pginfo = kvm->arch.ram_pginfo + pg_ix;
- for (i = 0; i < npages; ++i, ++pg_ix) {
- if (ri && pg_ix < kvm->arch.n_rma_pages) {
- pginfo[i].pfn = ri->base_pfn +
- (pg_ix << (porder - PAGE_SHIFT));
- continue;
- }
- hva = mem->userspace_addr + (i << porder);
- page = hva_to_page(hva);
- if (!page) {
- pr_err("oops, no pfn for hva %lx\n", hva);
- goto err;
- }
- /* Check it's a 16MB page */
- if (!PageHead(page) ||
- compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
- pr_err("page at %lx isn't 16MB (o=%d)\n",
- hva, compound_order(page));
- goto err;
- }
- pginfo[i].pfn = page_to_pfn(page);
+ /* Initialize phys addrs of pages in RMO */
+ npages = ri->npages;
+ porder = __ilog2(npages);
+ physp = kvm->arch.slot_phys[memslot->id];
+ spin_lock(&kvm->arch.slot_phys_lock);
+ for (i = 0; i < npages; ++i)
+ physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
+ spin_unlock(&kvm->arch.slot_phys_lock);
}
- return 0;
-
- err:
- return -EINVAL;
-}
+ /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
+ smp_wmb();
+ kvm->arch.rma_setup_done = 1;
+ err = 0;
+ out:
+ mutex_unlock(&kvm->lock);
+ return err;
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
-{
- if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
- !kvm->arch.rma)
- kvmppc_map_vrma(kvm, mem);
+ up_out:
+ up_read(&current->mm->mmap_sem);
+ goto out;
}
int kvmppc_core_init_vm(struct kvm *kvm)
{
long r;
- unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
- long err = -ENOMEM;
unsigned long lpcr;
/* Allocate hashed page table */
@@ -1212,19 +1347,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
- kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
- GFP_KERNEL);
- if (!kvm->arch.ram_pginfo) {
- pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
- npages * sizeof(struct kvmppc_pginfo));
- goto out_free;
- }
-
- kvm->arch.ram_npages = 0;
- kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
- kvm->arch.ram_porder = LARGE_PAGE_ORDER;
kvm->arch.rma = NULL;
- kvm->arch.n_rma_pages = 0;
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -1242,30 +1365,25 @@ int kvmppc_core_init_vm(struct kvm *kvm)
kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
lpcr &= LPCR_PECE | LPCR_LPES;
lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
- LPCR_VPM0 | LPCR_VRMA_L;
+ LPCR_VPM0 | LPCR_VPM1;
+ kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
}
kvm->arch.lpcr = lpcr;
+ kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
+ spin_lock_init(&kvm->arch.slot_phys_lock);
return 0;
-
- out_free:
- kvmppc_free_hpt(kvm);
- return err;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
- struct kvmppc_pginfo *pginfo;
unsigned long i;
- if (kvm->arch.ram_pginfo) {
- pginfo = kvm->arch.ram_pginfo;
- kvm->arch.ram_pginfo = NULL;
- for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
- if (pginfo[i].pfn)
- put_page(pfn_to_page(pginfo[i].pfn));
- kfree(pginfo);
- }
+ if (!kvm->arch.using_mmu_notifiers)
+ for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
+ unpin_slot(kvm, i);
+
if (kvm->arch.rma) {
kvm_release_rma(kvm->arch.rma);
kvm->arch.rma = NULL;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index a795a13f4a70..bed1279aa6a8 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -18,6 +18,15 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#define KVM_LINEAR_RMA 0
+#define KVM_LINEAR_HPT 1
+
+static void __init kvm_linear_init_one(ulong size, int count, int type);
+static struct kvmppc_linear_info *kvm_alloc_linear(int type);
+static void kvm_release_linear(struct kvmppc_linear_info *ri);
+
+/*************** RMA *************/
+
/*
* This maintains a list of RMAs (real mode areas) for KVM guests to use.
* Each RMA has to be physically contiguous and of a size that the
@@ -29,32 +38,6 @@
static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
static unsigned long kvm_rma_count;
-static int __init early_parse_rma_size(char *p)
-{
- if (!p)
- return 1;
-
- kvm_rma_size = memparse(p, &p);
-
- return 0;
-}
-early_param("kvm_rma_size", early_parse_rma_size);
-
-static int __init early_parse_rma_count(char *p)
-{
- if (!p)
- return 1;
-
- kvm_rma_count = simple_strtoul(p, NULL, 0);
-
- return 0;
-}
-early_param("kvm_rma_count", early_parse_rma_count);
-
-static struct kvmppc_rma_info *rma_info;
-static LIST_HEAD(free_rmas);
-static DEFINE_SPINLOCK(rma_lock);
-
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
static inline int lpcr_rmls(unsigned long rma_size)
@@ -81,45 +64,106 @@ static inline int lpcr_rmls(unsigned long rma_size)
}
}
+static int __init early_parse_rma_size(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_size = memparse(p, &p);
+
+ return 0;
+}
+early_param("kvm_rma_size", early_parse_rma_size);
+
+static int __init early_parse_rma_count(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_count = simple_strtoul(p, NULL, 0);
+
+ return 0;
+}
+early_param("kvm_rma_count", early_parse_rma_count);
+
+struct kvmppc_linear_info *kvm_alloc_rma(void)
+{
+ return kvm_alloc_linear(KVM_LINEAR_RMA);
+}
+EXPORT_SYMBOL_GPL(kvm_alloc_rma);
+
+void kvm_release_rma(struct kvmppc_linear_info *ri)
+{
+ kvm_release_linear(ri);
+}
+EXPORT_SYMBOL_GPL(kvm_release_rma);
+
+/*************** HPT *************/
+
/*
- * Called at boot time while the bootmem allocator is active,
- * to allocate contiguous physical memory for the real memory
- * areas for guests.
+ * This maintains a list of big linear HPT tables that contain the GVA->HPA
+ * memory mappings. If we don't reserve those early on, we might not be able
+ * to get a big (usually 16MB) linear memory region from the kernel anymore.
*/
-void __init kvm_rma_init(void)
+
+static unsigned long kvm_hpt_count;
+
+static int __init early_parse_hpt_count(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_hpt_count = simple_strtoul(p, NULL, 0);
+
+ return 0;
+}
+early_param("kvm_hpt_count", early_parse_hpt_count);
+
+struct kvmppc_linear_info *kvm_alloc_hpt(void)
+{
+ return kvm_alloc_linear(KVM_LINEAR_HPT);
+}
+EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
+
+void kvm_release_hpt(struct kvmppc_linear_info *li)
+{
+ kvm_release_linear(li);
+}
+EXPORT_SYMBOL_GPL(kvm_release_hpt);
+
+/*************** generic *************/
+
+static LIST_HEAD(free_linears);
+static DEFINE_SPINLOCK(linear_lock);
+
+static void __init kvm_linear_init_one(ulong size, int count, int type)
{
unsigned long i;
unsigned long j, npages;
- void *rma;
+ void *linear;
struct page *pg;
+ const char *typestr;
+ struct kvmppc_linear_info *linear_info;
- /* Only do this on PPC970 in HV mode */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return;
-
- if (!kvm_rma_size || !kvm_rma_count)
+ if (!count)
return;
- /* Check that the requested size is one supported in hardware */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return;
- }
-
- npages = kvm_rma_size >> PAGE_SHIFT;
- rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
- for (i = 0; i < kvm_rma_count; ++i) {
- rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
- pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
- kvm_rma_size >> 20);
- rma_info[i].base_virt = rma;
- rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
- rma_info[i].npages = npages;
- list_add_tail(&rma_info[i].list, &free_rmas);
- atomic_set(&rma_info[i].use_count, 0);
-
- pg = pfn_to_page(rma_info[i].base_pfn);
+ typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
+
+ npages = size >> PAGE_SHIFT;
+ linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
+ for (i = 0; i < count; ++i) {
+ linear = alloc_bootmem_align(size, size);
+ pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
+ size >> 20);
+ linear_info[i].base_virt = linear;
+ linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
+ linear_info[i].npages = npages;
+ linear_info[i].type = type;
+ list_add_tail(&linear_info[i].list, &free_linears);
+ atomic_set(&linear_info[i].use_count, 0);
+
+ pg = pfn_to_page(linear_info[i].base_pfn);
for (j = 0; j < npages; ++j) {
atomic_inc(&pg->_count);
++pg;
@@ -127,30 +171,59 @@ void __init kvm_rma_init(void)
}
}
-struct kvmppc_rma_info *kvm_alloc_rma(void)
+static struct kvmppc_linear_info *kvm_alloc_linear(int type)
{
- struct kvmppc_rma_info *ri;
+ struct kvmppc_linear_info *ri;
ri = NULL;
- spin_lock(&rma_lock);
- if (!list_empty(&free_rmas)) {
- ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
+ spin_lock(&linear_lock);
+ list_for_each_entry(ri, &free_linears, list) {
+ if (ri->type != type)
+ continue;
+
list_del(&ri->list);
atomic_inc(&ri->use_count);
+ break;
}
- spin_unlock(&rma_lock);
+ spin_unlock(&linear_lock);
+ memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
return ri;
}
-EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-void kvm_release_rma(struct kvmppc_rma_info *ri)
+static void kvm_release_linear(struct kvmppc_linear_info *ri)
{
if (atomic_dec_and_test(&ri->use_count)) {
- spin_lock(&rma_lock);
- list_add_tail(&ri->list, &free_rmas);
- spin_unlock(&rma_lock);
+ spin_lock(&linear_lock);
+ list_add_tail(&ri->list, &free_linears);
+ spin_unlock(&linear_lock);
}
}
-EXPORT_SYMBOL_GPL(kvm_release_rma);
+/*
+ * Called at boot time while the bootmem allocator is active,
+ * to allocate contiguous physical memory for the hash page
+ * tables for guests.
+ */
+void __init kvm_linear_init(void)
+{
+ /* HPT */
+ kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT);
+
+ /* RMA */
+ /* Only do this on PPC970 in HV mode */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return;
+
+ if (!kvm_rma_size || !kvm_rma_count)
+ return;
+
+ /* Check that the requested size is one supported in hardware */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return;
+ }
+
+ kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index bacb0cfa3602..def880aea63a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -11,6 +11,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/hugetlb.h>
+#include <linux/module.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -20,95 +21,307 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-/* For now use fixed-size 16MB page table */
-#define HPT_ORDER 24
-#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
-#define HPT_HASH_MASK (HPT_NPTEG - 1)
+/* Translate address of a vmalloc'd thing to a linear map address */
+static void *real_vmalloc_addr(void *x)
+{
+ unsigned long addr = (unsigned long) x;
+ pte_t *p;
-#define HPTE_V_HVLOCK 0x40UL
+ p = find_linux_pte(swapper_pg_dir, addr);
+ if (!p || !pte_present(*p))
+ return NULL;
+ /* assume we don't have huge pages in vmalloc space... */
+ addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
+ return __va(addr);
+}
-static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
+/*
+ * Add this HPTE into the chain for the real page.
+ * Must be called with the chain locked; it unlocks the chain.
+ */
+void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
+ unsigned long *rmap, long pte_index, int realmode)
{
- unsigned long tmp, old;
+ struct revmap_entry *head, *tail;
+ unsigned long i;
- asm volatile(" ldarx %0,0,%2\n"
- " and. %1,%0,%3\n"
- " bne 2f\n"
- " ori %0,%0,%4\n"
- " stdcx. %0,0,%2\n"
- " beq+ 2f\n"
- " li %1,%3\n"
- "2: isync"
- : "=&r" (tmp), "=&r" (old)
- : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
- : "cc", "memory");
- return old == 0;
+ if (*rmap & KVMPPC_RMAP_PRESENT) {
+ i = *rmap & KVMPPC_RMAP_INDEX;
+ head = &kvm->arch.revmap[i];
+ if (realmode)
+ head = real_vmalloc_addr(head);
+ tail = &kvm->arch.revmap[head->back];
+ if (realmode)
+ tail = real_vmalloc_addr(tail);
+ rev->forw = i;
+ rev->back = head->back;
+ tail->forw = pte_index;
+ head->back = pte_index;
+ } else {
+ rev->forw = rev->back = pte_index;
+ i = pte_index;
+ }
+ smp_wmb();
+ *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
+}
+EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
+
+/* Remove this HPTE from the chain for a real page */
+static void remove_revmap_chain(struct kvm *kvm, long pte_index,
+ struct revmap_entry *rev,
+ unsigned long hpte_v, unsigned long hpte_r)
+{
+ struct revmap_entry *next, *prev;
+ unsigned long gfn, ptel, head;
+ struct kvm_memory_slot *memslot;
+ unsigned long *rmap;
+ unsigned long rcbits;
+
+ rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
+ ptel = rev->guest_rpte |= rcbits;
+ gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
+ memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ return;
+
+ rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
+ lock_rmap(rmap);
+
+ head = *rmap & KVMPPC_RMAP_INDEX;
+ next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
+ prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
+ next->back = rev->back;
+ prev->forw = rev->forw;
+ if (head == pte_index) {
+ head = rev->forw;
+ if (head == pte_index)
+ *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
+ else
+ *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
+ }
+ *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+ unlock_rmap(rmap);
+}
+
+static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
+ int writing, unsigned long *pte_sizep)
+{
+ pte_t *ptep;
+ unsigned long ps = *pte_sizep;
+ unsigned int shift;
+
+ ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
+ if (!ptep)
+ return __pte(0);
+ if (shift)
+ *pte_sizep = 1ul << shift;
+ else
+ *pte_sizep = PAGE_SIZE;
+ if (ps > *pte_sizep)
+ return __pte(0);
+ if (!pte_present(*ptep))
+ return __pte(0);
+ return kvmppc_read_update_linux_pte(ptep, writing);
+}
+
+static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
+{
+ asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+ hpte[0] = hpte_v;
}
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{
- unsigned long porder;
struct kvm *kvm = vcpu->kvm;
- unsigned long i, lpn, pa;
+ unsigned long i, pa, gpa, gfn, psize;
+ unsigned long slot_fn, hva;
unsigned long *hpte;
+ struct revmap_entry *rev;
+ unsigned long g_ptel = ptel;
+ struct kvm_memory_slot *memslot;
+ unsigned long *physp, pte_size;
+ unsigned long is_io;
+ unsigned long *rmap;
+ pte_t pte;
+ unsigned int writing;
+ unsigned long mmu_seq;
+ unsigned long rcbits;
+ bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
- /* only handle 4k, 64k and 16M pages for now */
- porder = 12;
- if (pteh & HPTE_V_LARGE) {
- if (cpu_has_feature(CPU_FTR_ARCH_206) &&
- (ptel & 0xf000) == 0x1000) {
- /* 64k page */
- porder = 16;
- } else if ((ptel & 0xff000) == 0) {
- /* 16M page */
- porder = 24;
- /* lowest AVA bit must be 0 for 16M pages */
- if (pteh & 0x80)
- return H_PARAMETER;
- } else
+ psize = hpte_page_size(pteh, ptel);
+ if (!psize)
+ return H_PARAMETER;
+ writing = hpte_is_writable(ptel);
+ pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+
+ /* used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ /* Find the memslot (if any) for this address */
+ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
+ gfn = gpa >> PAGE_SHIFT;
+ memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
+ pa = 0;
+ is_io = ~0ul;
+ rmap = NULL;
+ if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
+ /* PPC970 can't do emulated MMIO */
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
return H_PARAMETER;
+ /* Emulated MMIO - mark this with key=31 */
+ pteh |= HPTE_V_ABSENT;
+ ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
+ goto do_insert;
}
- lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
- if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
- return H_PARAMETER;
- pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
- if (!pa)
+
+ /* Check if the requested page fits entirely in the memslot. */
+ if (!slot_is_aligned(memslot, psize))
return H_PARAMETER;
- /* Check WIMG */
- if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
- (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
+ slot_fn = gfn - memslot->base_gfn;
+ rmap = &memslot->rmap[slot_fn];
+
+ if (!kvm->arch.using_mmu_notifiers) {
+ physp = kvm->arch.slot_phys[memslot->id];
+ if (!physp)
+ return H_PARAMETER;
+ physp += slot_fn;
+ if (realmode)
+ physp = real_vmalloc_addr(physp);
+ pa = *physp;
+ if (!pa)
+ return H_TOO_HARD;
+ is_io = pa & (HPTE_R_I | HPTE_R_W);
+ pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
+ pa &= PAGE_MASK;
+ } else {
+ /* Translate to host virtual address */
+ hva = gfn_to_hva_memslot(memslot, gfn);
+
+ /* Look up the Linux PTE for the backing page */
+ pte_size = psize;
+ pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
+ if (pte_present(pte)) {
+ if (writing && !pte_write(pte))
+ /* make the actual HPTE be read-only */
+ ptel = hpte_make_readonly(ptel);
+ is_io = hpte_cache_bits(pte_val(pte));
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ }
+ }
+ if (pte_size < psize)
return H_PARAMETER;
- pteh &= ~0x60UL;
- ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
+ if (pa && pte_size > psize)
+ pa |= gpa & (pte_size - 1);
+
+ ptel &= ~(HPTE_R_PP0 - psize);
ptel |= pa;
- if (pte_index >= (HPT_NPTEG << 3))
+
+ if (pa)
+ pteh |= HPTE_V_VALID;
+ else
+ pteh |= HPTE_V_ABSENT;
+
+ /* Check WIMG */
+ if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
+ if (is_io)
+ return H_PARAMETER;
+ /*
+ * Allow guest to map emulated device memory as
+ * uncacheable, but actually make it cacheable.
+ */
+ ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
+ ptel |= HPTE_R_M;
+ }
+
+ /* Find and lock the HPTEG slot to use */
+ do_insert:
+ if (pte_index >= HPT_NPTE)
return H_PARAMETER;
if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7UL;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
- for (i = 0; ; ++i) {
- if (i == 8)
- return H_PTEG_FULL;
+ for (i = 0; i < 8; ++i) {
if ((*hpte & HPTE_V_VALID) == 0 &&
- lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
+ HPTE_V_ABSENT))
break;
hpte += 2;
}
+ if (i == 8) {
+ /*
+ * Since try_lock_hpte doesn't retry (not even stdcx.
+ * failures), it could be that there is a free slot
+ * but we transiently failed to lock it. Try again,
+ * actually locking each slot and checking it.
+ */
+ hpte -= 16;
+ for (i = 0; i < 8; ++i) {
+ while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ break;
+ *hpte &= ~HPTE_V_HVLOCK;
+ hpte += 2;
+ }
+ if (i == 8)
+ return H_PTEG_FULL;
+ }
+ pte_index += i;
} else {
- i = 0;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
- if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
- return H_PTEG_FULL;
+ if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
+ HPTE_V_ABSENT)) {
+ /* Lock the slot and check again */
+ while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+ *hpte &= ~HPTE_V_HVLOCK;
+ return H_PTEG_FULL;
+ }
+ }
}
+
+ /* Save away the guest's idea of the second HPTE dword */
+ rev = &kvm->arch.revmap[pte_index];
+ if (realmode)
+ rev = real_vmalloc_addr(rev);
+ if (rev)
+ rev->guest_rpte = g_ptel;
+
+ /* Link HPTE into reverse-map chain */
+ if (pteh & HPTE_V_VALID) {
+ if (realmode)
+ rmap = real_vmalloc_addr(rmap);
+ lock_rmap(rmap);
+ /* Check for pending invalidations under the rmap chain lock */
+ if (kvm->arch.using_mmu_notifiers &&
+ mmu_notifier_retry(vcpu, mmu_seq)) {
+ /* inval in progress, write a non-present HPTE */
+ pteh |= HPTE_V_ABSENT;
+ pteh &= ~HPTE_V_VALID;
+ unlock_rmap(rmap);
+ } else {
+ kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
+ realmode);
+ /* Only set R/C in real HPTE if already set in *rmap */
+ rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
+ ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
+ }
+ }
+
hpte[1] = ptel;
+
+ /* Write the first HPTE dword, unlocking the HPTE and making it valid */
eieio();
hpte[0] = pteh;
asm volatile("ptesync" : : : "memory");
- atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
- vcpu->arch.gpr[4] = pte_index + i;
+
+ vcpu->arch.gpr[4] = pte_index;
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -137,37 +350,46 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
struct kvm *kvm = vcpu->kvm;
unsigned long *hpte;
unsigned long v, r, rb;
+ struct revmap_entry *rev;
- if (pte_index >= (HPT_NPTEG << 3))
+ if (pte_index >= HPT_NPTE)
return H_PARAMETER;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
- while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
hpte[0] &= ~HPTE_V_HVLOCK;
return H_NOT_FOUND;
}
- if (atomic_read(&kvm->online_vcpus) == 1)
- flags |= H_LOCAL;
- vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
- vcpu->arch.gpr[5] = r = hpte[1];
- rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = 0;
- if (!(flags & H_LOCAL)) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- asm volatile("tlbiel %0" : : "r" (rb));
- asm volatile("ptesync" : : : "memory");
+
+ rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ v = hpte[0] & ~HPTE_V_HVLOCK;
+ if (v & HPTE_V_VALID) {
+ hpte[0] &= ~HPTE_V_VALID;
+ rb = compute_tlbie_rb(v, hpte[1], pte_index);
+ if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) {
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ /* Read PTE low word after tlbie to get final R/C values */
+ remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
}
+ r = rev->guest_rpte;
+ unlock_hpte(hpte, 0);
+
+ vcpu->arch.gpr[4] = v;
+ vcpu->arch.gpr[5] = r;
return H_SUCCESS;
}
@@ -175,78 +397,117 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
unsigned long *args = &vcpu->arch.gpr[4];
- unsigned long *hp, tlbrb[4];
- long int i, found;
- long int n_inval = 0;
- unsigned long flags, req, pte_index;
+ unsigned long *hp, *hptes[4], tlbrb[4];
+ long int i, j, k, n, found, indexes[4];
+ unsigned long flags, req, pte_index, rcbits;
long int local = 0;
long int ret = H_SUCCESS;
+ struct revmap_entry *rev, *revs[4];
if (atomic_read(&kvm->online_vcpus) == 1)
local = 1;
- for (i = 0; i < 4; ++i) {
- pte_index = args[i * 2];
- flags = pte_index >> 56;
- pte_index &= ((1ul << 56) - 1);
- req = flags >> 6;
- flags &= 3;
- if (req == 3)
- break;
- if (req != 1 || flags == 3 ||
- pte_index >= (HPT_NPTEG << 3)) {
- /* parameter error */
- args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
- ret = H_PARAMETER;
- break;
- }
- hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
- while (!lock_hpte(hp, HPTE_V_HVLOCK))
- cpu_relax();
- found = 0;
- if (hp[0] & HPTE_V_VALID) {
- switch (flags & 3) {
- case 0: /* absolute */
- found = 1;
+ for (i = 0; i < 4 && ret == H_SUCCESS; ) {
+ n = 0;
+ for (; i < 4; ++i) {
+ j = i * 2;
+ pte_index = args[j];
+ flags = pte_index >> 56;
+ pte_index &= ((1ul << 56) - 1);
+ req = flags >> 6;
+ flags &= 3;
+ if (req == 3) { /* no more requests */
+ i = 4;
break;
- case 1: /* andcond */
- if (!(hp[0] & args[i * 2 + 1]))
- found = 1;
+ }
+ if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) {
+ /* parameter error */
+ args[j] = ((0xa0 | flags) << 56) + pte_index;
+ ret = H_PARAMETER;
break;
- case 2: /* AVPN */
- if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
+ }
+ hp = (unsigned long *)
+ (kvm->arch.hpt_virt + (pte_index << 4));
+ /* to avoid deadlock, don't spin except for first */
+ if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
+ if (n)
+ break;
+ while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
+ cpu_relax();
+ }
+ found = 0;
+ if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
+ switch (flags & 3) {
+ case 0: /* absolute */
found = 1;
- break;
+ break;
+ case 1: /* andcond */
+ if (!(hp[0] & args[j + 1]))
+ found = 1;
+ break;
+ case 2: /* AVPN */
+ if ((hp[0] & ~0x7fUL) == args[j + 1])
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ hp[0] &= ~HPTE_V_HVLOCK;
+ args[j] = ((0x90 | flags) << 56) + pte_index;
+ continue;
}
+
+ args[j] = ((0x80 | flags) << 56) + pte_index;
+ rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+
+ if (!(hp[0] & HPTE_V_VALID)) {
+ /* insert R and C bits from PTE */
+ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
+ args[j] |= rcbits << (56 - 5);
+ continue;
+ }
+
+ hp[0] &= ~HPTE_V_VALID; /* leave it locked */
+ tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+ indexes[n] = j;
+ hptes[n] = hp;
+ revs[n] = rev;
+ ++n;
+ }
+
+ if (!n)
+ break;
+
+ /* Now that we've collected a batch, do the tlbies */
+ if (!local) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ for (k = 0; k < n; ++k)
+ asm volatile(PPC_TLBIE(%1,%0) : :
+ "r" (tlbrb[k]),
+ "r" (kvm->arch.lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ for (k = 0; k < n; ++k)
+ asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
+ asm volatile("ptesync" : : : "memory");
}
- if (!found) {
- hp[0] &= ~HPTE_V_HVLOCK;
- args[i * 2] = ((0x90 | flags) << 56) + pte_index;
- continue;
+
+ /* Read PTE low words after tlbie to get final R/C values */
+ for (k = 0; k < n; ++k) {
+ j = indexes[k];
+ pte_index = args[j] & ((1ul << 56) - 1);
+ hp = hptes[k];
+ rev = revs[k];
+ remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
+ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
+ args[j] |= rcbits << (56 - 5);
+ hp[0] = 0;
}
- /* insert R and C bits from PTE */
- flags |= (hp[1] >> 5) & 0x0c;
- args[i * 2] = ((0x80 | flags) << 56) + pte_index;
- tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
- hp[0] = 0;
- }
- if (n_inval == 0)
- return ret;
-
- if (!local) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < n_inval; ++i)
- asm volatile(PPC_TLBIE(%1,%0)
- : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
- asm volatile("eieio; tlbsync; ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < n_inval; ++i)
- asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
- asm volatile("ptesync" : : : "memory");
}
+
return ret;
}
@@ -256,40 +517,55 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
{
struct kvm *kvm = vcpu->kvm;
unsigned long *hpte;
- unsigned long v, r, rb;
+ struct revmap_entry *rev;
+ unsigned long v, r, rb, mask, bits;
- if (pte_index >= (HPT_NPTEG << 3))
+ if (pte_index >= HPT_NPTE)
return H_PARAMETER;
+
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
- while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
hpte[0] &= ~HPTE_V_HVLOCK;
return H_NOT_FOUND;
}
+
if (atomic_read(&kvm->online_vcpus) == 1)
flags |= H_LOCAL;
v = hpte[0];
- r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
- HPTE_R_KEY_HI | HPTE_R_KEY_LO);
- r |= (flags << 55) & HPTE_R_PP0;
- r |= (flags << 48) & HPTE_R_KEY_HI;
- r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
- rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = v & ~HPTE_V_VALID;
- if (!(flags & H_LOCAL)) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- asm volatile("tlbiel %0" : : "r" (rb));
- asm volatile("ptesync" : : : "memory");
+ bits = (flags << 55) & HPTE_R_PP0;
+ bits |= (flags << 48) & HPTE_R_KEY_HI;
+ bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
+
+ /* Update guest view of 2nd HPTE dword */
+ mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
+ HPTE_R_KEY_HI | HPTE_R_KEY_LO;
+ rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ if (rev) {
+ r = (rev->guest_rpte & ~mask) | bits;
+ rev->guest_rpte = r;
+ }
+ r = (hpte[1] & ~mask) | bits;
+
+ /* Update HPTE */
+ if (v & HPTE_V_VALID) {
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = v & ~HPTE_V_VALID;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
}
hpte[1] = r;
eieio();
@@ -298,40 +574,243 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
-static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
-{
- long int i;
- unsigned long offset, rpn;
-
- offset = realaddr & (kvm->arch.ram_psize - 1);
- rpn = (realaddr - offset) >> PAGE_SHIFT;
- for (i = 0; i < kvm->arch.ram_npages; ++i)
- if (rpn == kvm->arch.ram_pginfo[i].pfn)
- return (i << PAGE_SHIFT) + offset;
- return HPTE_R_RPN; /* all 1s in the RPN field */
-}
-
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long *hpte, r;
+ unsigned long *hpte, v, r;
int i, n = 1;
+ struct revmap_entry *rev = NULL;
- if (pte_index >= (HPT_NPTEG << 3))
+ if (pte_index >= HPT_NPTE)
return H_PARAMETER;
if (flags & H_READ_4) {
pte_index &= ~3;
n = 4;
}
+ rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
for (i = 0; i < n; ++i, ++pte_index) {
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ v = hpte[0] & ~HPTE_V_HVLOCK;
r = hpte[1];
- if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
- r = reverse_xlate(kvm, r & HPTE_R_RPN) |
- (r & ~HPTE_R_RPN);
- vcpu->arch.gpr[4 + i * 2] = hpte[0];
+ if (v & HPTE_V_ABSENT) {
+ v &= ~HPTE_V_ABSENT;
+ v |= HPTE_V_VALID;
+ }
+ if (v & HPTE_V_VALID)
+ r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
+ vcpu->arch.gpr[4 + i * 2] = v;
vcpu->arch.gpr[5 + i * 2] = r;
}
return H_SUCCESS;
}
+
+void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+ unsigned long pte_index)
+{
+ unsigned long rb;
+
+ hptep[0] &= ~HPTE_V_VALID;
+ rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
+
+void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+ unsigned long pte_index)
+{
+ unsigned long rb;
+ unsigned char rbyte;
+
+ rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
+ rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
+ /* modify only the second-last byte, which contains the ref bit */
+ *((char *)hptep + 14) = rbyte;
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
+
+static int slb_base_page_shift[4] = {
+ 24, /* 16M */
+ 16, /* 64k */
+ 34, /* 16G */
+ 20, /* 1M, unsupported */
+};
+
+long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
+ unsigned long valid)
+{
+ unsigned int i;
+ unsigned int pshift;
+ unsigned long somask;
+ unsigned long vsid, hash;
+ unsigned long avpn;
+ unsigned long *hpte;
+ unsigned long mask, val;
+ unsigned long v, r;
+
+ /* Get page shift, work out hash and AVPN etc. */
+ mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
+ val = 0;
+ pshift = 12;
+ if (slb_v & SLB_VSID_L) {
+ mask |= HPTE_V_LARGE;
+ val |= HPTE_V_LARGE;
+ pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
+ }
+ if (slb_v & SLB_VSID_B_1T) {
+ somask = (1UL << 40) - 1;
+ vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
+ vsid ^= vsid << 25;
+ } else {
+ somask = (1UL << 28) - 1;
+ vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
+ }
+ hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
+ avpn = slb_v & ~(somask >> 16); /* also includes B */
+ avpn |= (eaddr & somask) >> 16;
+
+ if (pshift >= 24)
+ avpn &= ~((1UL << (pshift - 16)) - 1);
+ else
+ avpn &= ~0x7fUL;
+ val |= avpn;
+
+ for (;;) {
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
+
+ for (i = 0; i < 16; i += 2) {
+ /* Read the PTE racily */
+ v = hpte[i] & ~HPTE_V_HVLOCK;
+
+ /* Check valid/absent, hash, segment size and AVPN */
+ if (!(v & valid) || (v & mask) != val)
+ continue;
+
+ /* Lock the PTE and read it under the lock */
+ while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
+ cpu_relax();
+ v = hpte[i] & ~HPTE_V_HVLOCK;
+ r = hpte[i+1];
+
+ /*
+ * Check the HPTE again, including large page size
+ * Since we don't currently allow any MPSS (mixed
+ * page-size segment) page sizes, it is sufficient
+ * to check against the actual page size.
+ */
+ if ((v & valid) && (v & mask) == val &&
+ hpte_page_size(v, r) == (1ul << pshift))
+ /* Return with the HPTE still locked */
+ return (hash << 3) + (i >> 1);
+
+ /* Unlock and move on */
+ hpte[i] = v;
+ }
+
+ if (val & HPTE_V_SECONDARY)
+ break;
+ val |= HPTE_V_SECONDARY;
+ hash = hash ^ HPT_HASH_MASK;
+ }
+ return -1;
+}
+EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
+
+/*
+ * Called in real mode to check whether an HPTE not found fault
+ * is due to accessing a paged-out page or an emulated MMIO page,
+ * or if a protection fault is due to accessing a page that the
+ * guest wanted read/write access to but which we made read-only.
+ * Returns a possibly modified status (DSISR) value if not
+ * (i.e. pass the interrupt to the guest),
+ * -1 to pass the fault up to host kernel mode code, -2 to do that
+ * and also load the instruction word (for MMIO emulation),
+ * or 0 if we should make the guest retry the access.
+ */
+long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+ unsigned long slb_v, unsigned int status, bool data)
+{
+ struct kvm *kvm = vcpu->kvm;
+ long int index;
+ unsigned long v, r, gr;
+ unsigned long *hpte;
+ unsigned long valid;
+ struct revmap_entry *rev;
+ unsigned long pp, key;
+
+ /* For protection fault, expect to find a valid HPTE */
+ valid = HPTE_V_VALID;
+ if (status & DSISR_NOHPTE)
+ valid |= HPTE_V_ABSENT;
+
+ index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
+ if (index < 0) {
+ if (status & DSISR_NOHPTE)
+ return status; /* there really was no HPTE */
+ return 0; /* for prot fault, HPTE disappeared */
+ }
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ v = hpte[0] & ~HPTE_V_HVLOCK;
+ r = hpte[1];
+ rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+ gr = rev->guest_rpte;
+
+ unlock_hpte(hpte, v);
+
+ /* For not found, if the HPTE is valid by now, retry the instruction */
+ if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
+ return 0;
+
+ /* Check access permissions to the page */
+ pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
+ key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
+ if (!data) {
+ if (gr & (HPTE_R_N | HPTE_R_G))
+ return status | SRR1_ISI_N_OR_G;
+ if (!hpte_read_permission(pp, slb_v & key))
+ return status | SRR1_ISI_PROT;
+ } else if (status & DSISR_ISSTORE) {
+ /* check write permission */
+ if (!hpte_write_permission(pp, slb_v & key))
+ return status | DSISR_PROTFAULT;
+ } else {
+ if (!hpte_read_permission(pp, slb_v & key))
+ return status | DSISR_PROTFAULT;
+ }
+
+ /* Check storage key, if applicable */
+ if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
+ unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
+ if (status & DSISR_ISSTORE)
+ perm >>= 1;
+ if (perm & 1)
+ return status | DSISR_KEYFAULT;
+ }
+
+ /* Save HPTE info for virtual-mode handler */
+ vcpu->arch.pgfault_addr = addr;
+ vcpu->arch.pgfault_index = index;
+ vcpu->arch.pgfault_hpte[0] = v;
+ vcpu->arch.pgfault_hpte[1] = r;
+
+ /* Check the storage key to see if it is possibly emulated MMIO */
+ if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
+ (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+ (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
+ return -2; /* MMIO emulation - load instr word */
+
+ return -1; /* send fault up to host kernel mode */
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5c8b26183f50..b70bf22a3ff3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -601,6 +601,30 @@ kvmppc_interrupt:
stw r12,VCPU_TRAP(r9)
+ /* Save HEIR (HV emulation assist reg) in last_inst
+ if this is an HEI (HV emulation interrupt, e40) */
+ li r3,KVM_INST_FETCH_FAILED
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
+ bne 11f
+ mfspr r3,SPRN_HEIR
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+11: stw r3,VCPU_LAST_INST(r9)
+
+ /* these are volatile across C function calls */
+ mfctr r3
+ mfxer r4
+ std r3, VCPU_CTR(r9)
+ stw r4, VCPU_XER(r9)
+
+BEGIN_FTR_SECTION
+ /* If this is a page table miss then see if it's theirs or ours */
+ cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
+ beq kvmppc_hdsi
+ cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
+ beq kvmppc_hisi
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
@@ -608,7 +632,7 @@ kvmppc_interrupt:
cmpwi r3,0
bge ignore_hdec
2:
- /* See if this is something we can handle in real mode */
+ /* See if this is an hcall we can handle in real mode */
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
@@ -624,6 +648,7 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+nohpte_cont:
hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save DEC */
mfspr r5,SPRN_DEC
@@ -632,36 +657,21 @@ hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
add r5,r5,r6
std r5,VCPU_DEC_EXPIRES(r9)
- /* Save HEIR (HV emulation assist reg) in last_inst
- if this is an HEI (HV emulation interrupt, e40) */
- li r3,-1
-BEGIN_FTR_SECTION
- cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
- bne 11f
- mfspr r3,SPRN_HEIR
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-11: stw r3,VCPU_LAST_INST(r9)
-
/* Save more register state */
- mfxer r5
mfdar r6
mfdsisr r7
- mfctr r8
-
- stw r5, VCPU_XER(r9)
std r6, VCPU_DAR(r9)
stw r7, VCPU_DSISR(r9)
- std r8, VCPU_CTR(r9)
- /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
BEGIN_FTR_SECTION
+ /* don't overwrite fault_dar/fault_dsisr if HDSI */
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
beq 6f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-7: std r6, VCPU_FAULT_DAR(r9)
+ std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)
/* Save guest CTRL register, set runlatch to 1 */
- mfspr r6,SPRN_CTRLF
+6: mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
andi. r0,r6,1
bne 4f
@@ -1094,9 +1104,131 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_HSRR1, r7
ba 0x500
-6: mfspr r6,SPRN_HDAR
- mfspr r7,SPRN_HDSISR
- b 7b
+/*
+ * Check whether an HDSI is an HPTE not found fault or something else.
+ * If it is an HPTE not found fault that is due to the guest accessing
+ * a page that they have mapped but which we have paged out, then
+ * we continue on with the guest exit path. In all other cases,
+ * reflect the HDSI to the guest as a DSI.
+ */
+kvmppc_hdsi:
+ mfspr r4, SPRN_HDAR
+ mfspr r6, SPRN_HDSISR
+ /* HPTE not found fault or protection fault? */
+ andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
+ beq 1f /* if not, send it to the guest */
+ andi. r0, r11, MSR_DR /* data relocation enabled? */
+ beq 3f
+ clrrdi r0, r4, 28
+ PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
+ bne 1f /* if no SLB entry found */
+4: std r4, VCPU_FAULT_DAR(r9)
+ stw r6, VCPU_FAULT_DSISR(r9)
+
+ /* Search the hash table. */
+ mr r3, r9 /* vcpu pointer */
+ li r7, 1 /* data fault */
+ bl .kvmppc_hpte_hv_fault
+ ld r9, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_PC(r9)
+ ld r11, VCPU_MSR(r9)
+ li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
+ cmpdi r3, 0 /* retry the instruction */
+ beq 6f
+ cmpdi r3, -1 /* handle in kernel mode */
+ beq nohpte_cont
+ cmpdi r3, -2 /* MMIO emulation; need instr word */
+ beq 2f
+
+ /* Synthesize a DSI for the guest */
+ ld r4, VCPU_FAULT_DAR(r9)
+ mr r6, r3
+1: mtspr SPRN_DAR, r4
+ mtspr SPRN_DSISR, r6
+ mtspr SPRN_SRR0, r10
+ mtspr SPRN_SRR1, r11
+ li r10, BOOK3S_INTERRUPT_DATA_STORAGE
+ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
+ rotldi r11, r11, 63
+6: ld r7, VCPU_CTR(r9)
+ lwz r8, VCPU_XER(r9)
+ mtctr r7
+ mtxer r8
+ mr r4, r9
+ b fast_guest_return
+
+3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
+ ld r5, KVM_VRMA_SLB_V(r5)
+ b 4b
+
+ /* If this is for emulated MMIO, load the instruction word */
+2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
+
+ /* Set guest mode to 'jump over instruction' so if lwz faults
+ * we'll just continue at the next IP. */
+ li r0, KVM_GUEST_MODE_SKIP
+ stb r0, HSTATE_IN_GUEST(r13)
+
+ /* Do the access with MSR:DR enabled */
+ mfmsr r3
+ ori r4, r3, MSR_DR /* Enable paging for data */
+ mtmsrd r4
+ lwz r8, 0(r10)
+ mtmsrd r3
+
+ /* Store the result */
+ stw r8, VCPU_LAST_INST(r9)
+
+ /* Unset guest mode. */
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+ b nohpte_cont
+
+/*
+ * Similarly for an HISI, reflect it to the guest as an ISI unless
+ * it is an HPTE not found fault for a page that we have paged out.
+ */
+kvmppc_hisi:
+ andis. r0, r11, SRR1_ISI_NOPT@h
+ beq 1f
+ andi. r0, r11, MSR_IR /* instruction relocation enabled? */
+ beq 3f
+ clrrdi r0, r10, 28
+ PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
+ bne 1f /* if no SLB entry found */
+4:
+ /* Search the hash table. */
+ mr r3, r9 /* vcpu pointer */
+ mr r4, r10
+ mr r6, r11
+ li r7, 0 /* instruction fault */
+ bl .kvmppc_hpte_hv_fault
+ ld r9, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_PC(r9)
+ ld r11, VCPU_MSR(r9)
+ li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
+ cmpdi r3, 0 /* retry the instruction */
+ beq 6f
+ cmpdi r3, -1 /* handle in kernel mode */
+ beq nohpte_cont
+
+ /* Synthesize an ISI for the guest */
+ mr r11, r3
+1: mtspr SPRN_SRR0, r10
+ mtspr SPRN_SRR1, r11
+ li r10, BOOK3S_INTERRUPT_INST_STORAGE
+ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
+ rotldi r11, r11, 63
+6: ld r7, VCPU_CTR(r9)
+ lwz r8, VCPU_XER(r9)
+ mtctr r7
+ mtxer r8
+ mr r4, r9
+ b fast_guest_return
+
+3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
+ ld r5, KVM_VRMA_SLB_V(r6)
+ b 4b
/*
* Try to handle an hcall in real mode.
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 7b0ee96c1bed..e70ef2d86431 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -196,7 +196,8 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1);
+ emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ len, 1);
goto done_load;
}
@@ -286,11 +287,13 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1);
+ emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ 4, 1);
vcpu->arch.qpr[rs] = tmp[1];
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1);
+ emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
+ 8, 1);
goto done_load;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e2cfb9e1e20e..7340e1090b77 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -51,15 +51,19 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE
+#define __hard_irq_disable local_irq_disable
+#define __hard_irq_enable local_irq_enable
#endif
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
- to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+ svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
+ svcpu_put(svcpu);
#endif
#ifdef CONFIG_PPC_BOOK3S_32
@@ -70,10 +74,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
sizeof(get_paca()->shadow_vcpu));
- to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+ to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
+ svcpu_put(svcpu);
#endif
kvmppc_giveup_ext(vcpu, MSR_FP);
@@ -151,14 +157,16 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
#ifdef CONFIG_PPC_BOOK3S_64
if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
kvmppc_mmu_book3s_64_init(vcpu);
- to_book3s(vcpu)->hior = 0xfff00000;
+ if (!to_book3s(vcpu)->hior_explicit)
+ to_book3s(vcpu)->hior = 0xfff00000;
to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
} else
#endif
{
kvmppc_mmu_book3s_32_init(vcpu);
- to_book3s(vcpu)->hior = 0;
+ if (!to_book3s(vcpu)->hior_explicit)
+ to_book3s(vcpu)->hior = 0;
to_book3s(vcpu)->msr_mask = 0xffffffffULL;
vcpu->arch.cpu_type = KVM_CPU_3S_32;
}
@@ -227,14 +235,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
hpage_offset /= 4;
get_page(hpage);
- page = kmap_atomic(hpage, KM_USER0);
+ page = kmap_atomic(hpage);
/* patch dcbz into reserved instruction, so we trap */
for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
if ((page[i] & 0xff0007ff) == INS_DCBZ)
page[i] &= 0xfffffff7;
- kunmap_atomic(page, KM_USER0);
+ kunmap_atomic(page);
put_page(hpage);
}
@@ -308,19 +316,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
+ svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr =
- to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ svcpu->shadow_srr1 & 0x00000000f8000000ULL;
+ svcpu_put(svcpu);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
@@ -517,24 +528,29 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
trace_kvm_book3s_exit(exit_nr, vcpu);
+ preempt_enable();
kvm_resched(vcpu);
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
+ {
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong shadow_srr1 = svcpu->shadow_srr1;
vcpu->stat.pf_instruc++;
#ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */
- if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
- == SR_INVALID) {
+ if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
r = RESUME_GUEST;
+ svcpu_put(svcpu);
break;
}
#endif
+ svcpu_put(svcpu);
/* only care about PTEG not found errors, but leave NX alone */
- if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+ if (shadow_srr1 & 0x40000000) {
r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -547,33 +563,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
} else {
- vcpu->arch.shared->msr |=
- to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
+ vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
}
break;
+ }
case BOOK3S_INTERRUPT_DATA_STORAGE:
{
ulong dar = kvmppc_get_fault_dar(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 fault_dsisr = svcpu->fault_dsisr;
vcpu->stat.pf_storage++;
#ifdef CONFIG_PPC_BOOK3S_32
/* We set segments as unused segments when invalidating them. So
* treat the respective fault as segment fault. */
- if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
kvmppc_mmu_map_segment(vcpu, dar);
r = RESUME_GUEST;
+ svcpu_put(svcpu);
break;
}
#endif
+ svcpu_put(svcpu);
/* The only case we need to handle is missing shadow PTEs */
- if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+ if (fault_dsisr & DSISR_NOHPTE) {
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
} else {
vcpu->arch.shared->dar = dar;
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.shared->dsisr = fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
}
@@ -609,10 +629,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_PROGRAM:
{
enum emulation_result er;
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
ulong flags;
program_interrupt:
- flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
+ svcpu = svcpu_get(vcpu);
+ flags = svcpu->shadow_srr1 & 0x1f0000ull;
+ svcpu_put(svcpu);
if (vcpu->arch.shared->msr & MSR_PR) {
#ifdef EXIT_DEBUG
@@ -740,20 +763,33 @@ program_interrupt:
r = RESUME_GUEST;
break;
default:
+ {
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong shadow_srr1 = svcpu->shadow_srr1;
+ svcpu_put(svcpu);
/* Ugh - bork here! What did we get? */
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
- exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
+ exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
r = RESUME_HOST;
BUG();
break;
}
-
+ }
if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if
* we aren't already exiting to userspace for some other
* reason. */
+
+ /*
+ * Interrupts could be timers for the guest which we have to
+ * inject again, so let's postpone them until we're in the guest
+ * and if we really did time things so badly, then we just exit
+ * again due to a host external interrupt.
+ */
+ __hard_irq_disable();
if (signal_pending(current)) {
+ __hard_irq_enable();
#ifdef EXIT_DEBUG
printk(KERN_EMERG "KVM: Going back to host\n");
#endif
@@ -761,10 +797,12 @@ program_interrupt:
run->exit_reason = KVM_EXIT_INTR;
r = -EINTR;
} else {
+ preempt_disable();
+
/* In case an interrupt came in that was triggered
* from userspace (like DEC), we need to check what
* to inject now! */
- kvmppc_core_deliver_interrupts(vcpu);
+ kvmppc_core_prepare_to_enter(vcpu);
}
}
@@ -836,6 +874,38 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return 0;
}
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_PPC_HIOR:
+ r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_PPC_HIOR:
+ r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
+ if (!r)
+ to_book3s(vcpu)->hior_explicit = true;
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
int kvmppc_core_check_processor_compat(void)
{
return 0;
@@ -923,16 +993,31 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
ulong ext_msr;
+ preempt_disable();
+
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+ kvmppc_core_prepare_to_enter(vcpu);
+
+ /*
+ * Interrupts could be timers for the guest which we have to inject
+ * again, so let's postpone them until we're in the guest and if we
+ * really did time things so badly, then we just exit again due to
+ * a host external interrupt.
+ */
+ __hard_irq_disable();
+
/* No need to go into the guest when all we do is going out */
if (signal_pending(current)) {
+ __hard_irq_enable();
kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
+ ret = -EINTR;
+ goto out;
}
/* Save FPU state in stack */
@@ -974,8 +1059,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvm_guest_exit();
- local_irq_disable();
-
current->thread.regs->msr = ext_msr;
/* Make sure we save the guest FPU/Altivec/VSX state */
@@ -1002,9 +1085,50 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
current->thread.used_vsr = used_vsr;
#endif
+out:
+ preempt_enable();
return ret;
}
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ struct kvm_memory_slot *memslot;
+ struct kvm_vcpu *vcpu;
+ ulong ga, ga_end;
+ int is_dirty = 0;
+ int r;
+ unsigned long n;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
+ memslot = id_to_memslot(kvm->memslots, log->slot);
+
+ ga = memslot->base_gfn << PAGE_SHIFT;
+ ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+ kvm_for_each_vcpu(n, vcpu, kvm)
+ kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+
+ r = 0;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index bb6c988f010a..ee9e1ee9c858 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,12 +124,6 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
vcpu->arch.shared->msr = new_msr;
kvmppc_mmu_msr_notify(vcpu, old_msr);
-
- if (vcpu->arch.shared->msr & MSR_WE) {
- kvm_vcpu_block(vcpu);
- kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
- };
-
kvmppc_vcpu_sync_spe(vcpu);
}
@@ -258,9 +252,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
allowed = vcpu->arch.shared->msr & MSR_ME;
msr_mask = 0;
break;
- case BOOKE_IRQPRIO_EXTERNAL:
case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT:
+ keep_irq = true;
+ /* fall through */
+ case BOOKE_IRQPRIO_EXTERNAL:
allowed = vcpu->arch.shared->msr & MSR_EE;
allowed = allowed && !crit;
msr_mask = MSR_CE|MSR_ME|MSR_DE;
@@ -276,7 +272,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
if (update_esr == true)
- vcpu->arch.esr = vcpu->arch.queued_esr;
+ vcpu->arch.shared->esr = vcpu->arch.queued_esr;
if (update_dear == true)
vcpu->arch.shared->dar = vcpu->arch.queued_dear;
kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
@@ -288,13 +284,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
return allowed;
}
-/* Check pending exceptions and deliver one, if possible. */
-void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
+static void update_timer_ints(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
+ kvmppc_core_queue_dec(vcpu);
+ else
+ kvmppc_core_dequeue_dec(vcpu);
+}
+
+static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
- unsigned long old_pending = vcpu->arch.pending_exceptions;
unsigned int priority;
+ if (vcpu->requests) {
+ if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
+ smp_mb();
+ update_timer_ints(vcpu);
+ }
+ }
+
priority = __ffs(*pending);
while (priority <= BOOKE_IRQPRIO_MAX) {
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -306,10 +315,24 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
}
/* Tell the guest about our interrupt status */
- if (*pending)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
+ vcpu->arch.shared->int_pending = !!*pending;
+}
+
+/* Check pending exceptions and deliver one, if possible. */
+void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+{
+ WARN_ON_ONCE(!irqs_disabled());
+
+ kvmppc_core_check_exceptions(vcpu);
+
+ if (vcpu->arch.shared->msr & MSR_WE) {
+ local_irq_enable();
+ kvm_vcpu_block(vcpu);
+ local_irq_disable();
+
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+ kvmppc_core_check_exceptions(vcpu);
+ };
}
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -322,11 +345,21 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
local_irq_disable();
+
+ kvmppc_core_prepare_to_enter(vcpu);
+
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ret = -EINTR;
+ goto out;
+ }
+
kvm_guest_enter();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
kvm_guest_exit();
- local_irq_enable();
+out:
+ local_irq_enable();
return ret;
}
@@ -603,7 +636,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_disable();
- kvmppc_core_deliver_interrupts(vcpu);
+ kvmppc_core_prepare_to_enter(vcpu);
if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if
@@ -628,6 +661,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.pc = 0;
vcpu->arch.shared->msr = 0;
vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
+ vcpu->arch.shared->pir = vcpu->vcpu_id;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -662,10 +696,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.sprg4;
- regs->sprg5 = vcpu->arch.sprg5;
- regs->sprg6 = vcpu->arch.sprg6;
- regs->sprg7 = vcpu->arch.sprg7;
+ regs->sprg4 = vcpu->arch.shared->sprg4;
+ regs->sprg5 = vcpu->arch.shared->sprg5;
+ regs->sprg6 = vcpu->arch.shared->sprg6;
+ regs->sprg7 = vcpu->arch.shared->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -690,10 +724,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg4 = regs->sprg4;
- vcpu->arch.sprg5 = regs->sprg5;
- vcpu->arch.sprg6 = regs->sprg6;
- vcpu->arch.sprg7 = regs->sprg7;
+ vcpu->arch.shared->sprg4 = regs->sprg4;
+ vcpu->arch.shared->sprg5 = regs->sprg5;
+ vcpu->arch.shared->sprg6 = regs->sprg6;
+ vcpu->arch.shared->sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -711,7 +745,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
sregs->u.e.csrr0 = vcpu->arch.csrr0;
sregs->u.e.csrr1 = vcpu->arch.csrr1;
sregs->u.e.mcsr = vcpu->arch.mcsr;
- sregs->u.e.esr = vcpu->arch.esr;
+ sregs->u.e.esr = vcpu->arch.shared->esr;
sregs->u.e.dear = vcpu->arch.shared->dar;
sregs->u.e.tsr = vcpu->arch.tsr;
sregs->u.e.tcr = vcpu->arch.tcr;
@@ -729,28 +763,19 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
vcpu->arch.csrr0 = sregs->u.e.csrr0;
vcpu->arch.csrr1 = sregs->u.e.csrr1;
vcpu->arch.mcsr = sregs->u.e.mcsr;
- vcpu->arch.esr = sregs->u.e.esr;
+ vcpu->arch.shared->esr = sregs->u.e.esr;
vcpu->arch.shared->dar = sregs->u.e.dear;
vcpu->arch.vrsave = sregs->u.e.vrsave;
- vcpu->arch.tcr = sregs->u.e.tcr;
+ kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
- if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC)
+ if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
vcpu->arch.dec = sregs->u.e.dec;
-
- kvmppc_emulate_dec(vcpu);
+ kvmppc_emulate_dec(vcpu);
+ }
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
- /*
- * FIXME: existing KVM timer handling is incomplete.
- * TSR cannot be read by the guest, and its value in
- * vcpu->arch is always zero. For now, just handle
- * the case where the caller is trying to inject a
- * decrementer interrupt.
- */
-
- if ((sregs->u.e.tsr & TSR_DIS) &&
- (vcpu->arch.tcr & TCR_DIE))
- kvmppc_core_queue_dec(vcpu);
+ vcpu->arch.tsr = sregs->u.e.tsr;
+ update_timer_ints(vcpu);
}
return 0;
@@ -761,7 +786,7 @@ static void get_sregs_arch206(struct kvm_vcpu *vcpu,
{
sregs->u.e.features |= KVM_SREGS_E_ARCH206;
- sregs->u.e.pir = 0;
+ sregs->u.e.pir = vcpu->vcpu_id;
sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
sregs->u.e.decar = vcpu->arch.decar;
@@ -774,7 +799,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
return 0;
- if (sregs->u.e.pir != 0)
+ if (sregs->u.e.pir != vcpu->vcpu_id)
return -EINVAL;
vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
@@ -862,6 +887,16 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return kvmppc_core_set_sregs(vcpu, sregs);
}
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ return -EINVAL;
+}
+
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ return -EINVAL;
+}
+
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOTSUPP;
@@ -906,6 +941,33 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
{
}
+void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
+{
+ vcpu->arch.tcr = new_tcr;
+ update_timer_ints(vcpu);
+}
+
+void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
+{
+ set_bits(tsr_bits, &vcpu->arch.tsr);
+ smp_wmb();
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
+void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
+{
+ clear_bits(tsr_bits, &vcpu->arch.tsr);
+ update_timer_ints(vcpu);
+}
+
+void kvmppc_decrementer_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ kvmppc_set_tsr_bits(vcpu, TSR_DIS);
+}
+
int __init kvmppc_booke_init(void)
{
unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 8e1fe33d64e5..2fe202705a3f 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -55,6 +55,10 @@ extern unsigned long kvmppc_booke_handlers;
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
+void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
+void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
+void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
+
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 1260f5f24c0c..3e652da36534 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
+ * Copyright 2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
@@ -107,7 +108,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_DEAR:
vcpu->arch.shared->dar = spr_val; break;
case SPRN_ESR:
- vcpu->arch.esr = spr_val; break;
+ vcpu->arch.shared->esr = spr_val; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; break;
case SPRN_DBCR1:
@@ -115,23 +116,23 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_DBSR:
vcpu->arch.dbsr &= ~spr_val; break;
case SPRN_TSR:
- vcpu->arch.tsr &= ~spr_val; break;
+ kvmppc_clr_tsr_bits(vcpu, spr_val);
+ break;
case SPRN_TCR:
- vcpu->arch.tcr = spr_val;
- kvmppc_emulate_dec(vcpu);
+ kvmppc_set_tcr(vcpu, spr_val);
break;
/* Note: SPRG4-7 are user-readable. These values are
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
- vcpu->arch.sprg4 = spr_val; break;
+ vcpu->arch.shared->sprg4 = spr_val; break;
case SPRN_SPRG5:
- vcpu->arch.sprg5 = spr_val; break;
+ vcpu->arch.shared->sprg5 = spr_val; break;
case SPRN_SPRG6:
- vcpu->arch.sprg6 = spr_val; break;
+ vcpu->arch.shared->sprg6 = spr_val; break;
case SPRN_SPRG7:
- vcpu->arch.sprg7 = spr_val; break;
+ vcpu->arch.shared->sprg7 = spr_val; break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
@@ -202,13 +203,17 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_DEAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
case SPRN_ESR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
case SPRN_DBCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
case SPRN_DBCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
case SPRN_DBSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
+ case SPRN_TSR:
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
+ case SPRN_TCR:
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
case SPRN_IVOR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 42f2fb1f66e9..10d8ef602e5c 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -402,19 +402,25 @@ lightweight_exit:
/* Save vcpu pointer for the exception handlers. */
mtspr SPRN_SPRG_WVCPU, r4
+ lwz r5, VCPU_SHARED(r4)
+
/* Can't switch the stack pointer until after IVPR is switched,
* because host interrupt handlers would get confused. */
lwz r1, VCPU_GPR(r1)(r4)
- /* Host interrupt handlers may have clobbered these guest-readable
- * SPRGs, so we need to reload them here with the guest's values. */
- lwz r3, VCPU_SPRG4(r4)
+ /*
+ * Host interrupt handlers may have clobbered these
+ * guest-readable SPRGs, or the guest kernel may have
+ * written directly to the shared area, so we
+ * need to reload them here with the guest's values.
+ */
+ lwz r3, VCPU_SHARED_SPRG4(r5)
mtspr SPRN_SPRG4W, r3
- lwz r3, VCPU_SPRG5(r4)
+ lwz r3, VCPU_SHARED_SPRG5(r5)
mtspr SPRN_SPRG5W, r3
- lwz r3, VCPU_SPRG6(r4)
+ lwz r3, VCPU_SHARED_SPRG6(r5)
mtspr SPRN_SPRG6W, r3
- lwz r3, VCPU_SPRG7(r4)
+ lwz r3, VCPU_SHARED_SPRG7(r5)
mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 8c0d45a6faf7..ddcd896fa2ff 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -71,9 +71,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu_e500->svr = mfspr(SPRN_SVR);
- /* Since booke kvm only support one core, update all vcpus' PIR to 0 */
- vcpu->vcpu_id = 0;
-
vcpu->arch.cpu_type = KVM_CPU_E500V2;
return 0;
@@ -118,12 +115,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
- sregs->u.e.mas0 = vcpu_e500->mas0;
- sregs->u.e.mas1 = vcpu_e500->mas1;
- sregs->u.e.mas2 = vcpu_e500->mas2;
- sregs->u.e.mas7_3 = ((u64)vcpu_e500->mas7 << 32) | vcpu_e500->mas3;
- sregs->u.e.mas4 = vcpu_e500->mas4;
- sregs->u.e.mas6 = vcpu_e500->mas6;
+ sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+ sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+ sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+ sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+ sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+ sregs->u.e.mas6 = vcpu->arch.shared->mas6;
sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
@@ -151,13 +148,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
}
if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
- vcpu_e500->mas0 = sregs->u.e.mas0;
- vcpu_e500->mas1 = sregs->u.e.mas1;
- vcpu_e500->mas2 = sregs->u.e.mas2;
- vcpu_e500->mas7 = sregs->u.e.mas7_3 >> 32;
- vcpu_e500->mas3 = (u32)sregs->u.e.mas7_3;
- vcpu_e500->mas4 = sregs->u.e.mas4;
- vcpu_e500->mas6 = sregs->u.e.mas6;
+ vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+ vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+ vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+ vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+ vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+ vcpu->arch.shared->mas6 = sregs->u.e.mas6;
}
if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
@@ -233,6 +229,10 @@ static int __init kvmppc_e500_init(void)
unsigned long ivor[3];
unsigned long max_ivor = 0;
+ r = kvmppc_core_check_processor_compat();
+ if (r)
+ return r;
+
r = kvmppc_booke_init();
if (r)
return r;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index d48ae396f41e..6d0b2bd54fb0 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -89,19 +89,23 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
- vcpu_e500->mas0 = spr_val; break;
+ vcpu->arch.shared->mas0 = spr_val; break;
case SPRN_MAS1:
- vcpu_e500->mas1 = spr_val; break;
+ vcpu->arch.shared->mas1 = spr_val; break;
case SPRN_MAS2:
- vcpu_e500->mas2 = spr_val; break;
+ vcpu->arch.shared->mas2 = spr_val; break;
case SPRN_MAS3:
- vcpu_e500->mas3 = spr_val; break;
+ vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
+ vcpu->arch.shared->mas7_3 |= spr_val;
+ break;
case SPRN_MAS4:
- vcpu_e500->mas4 = spr_val; break;
+ vcpu->arch.shared->mas4 = spr_val; break;
case SPRN_MAS6:
- vcpu_e500->mas6 = spr_val; break;
+ vcpu->arch.shared->mas6 = spr_val; break;
case SPRN_MAS7:
- vcpu_e500->mas7 = spr_val; break;
+ vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
+ vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
+ break;
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
@@ -143,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
+ unsigned long val;
switch (sprn) {
case SPRN_PID:
@@ -152,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PID2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
case SPRN_MAS1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
case SPRN_MAS2:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
case SPRN_MAS3:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
+ val = (u32)vcpu->arch.shared->mas7_3;
+ kvmppc_set_gpr(vcpu, rt, val);
+ break;
case SPRN_MAS4:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
case SPRN_MAS6:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
case SPRN_MAS7:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
-
+ val = vcpu->arch.shared->mas7_3 >> 32;
+ kvmppc_set_gpr(vcpu, rt, val);
+ break;
case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
case SPRN_TLB1CFG:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 13c432ea2fa8..6e53e4164de1 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -12,12 +12,19 @@
* published by the Free Software Foundation.
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
+#include <linux/log2.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <linux/hugetlb.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_e500.h>
@@ -26,7 +33,7 @@
#include "trace.h"
#include "timing.h"
-#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
+#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
struct id {
unsigned long val;
@@ -63,7 +70,14 @@ static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
* The valid range of shadow ID is [1..255] */
static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
-static unsigned int tlb1_entry_num;
+static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
+
+static struct kvm_book3e_206_tlb_entry *get_entry(
+ struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
+{
+ int offset = vcpu_e500->gtlb_offset[tlbsel];
+ return &vcpu_e500->gtlb_arch[offset + entry];
+}
/*
* Allocate a free shadow id and setup a valid sid mapping in given entry.
@@ -116,13 +130,11 @@ static inline int local_sid_lookup(struct id *entry)
return -1;
}
-/* Invalidate all id mappings on local core */
+/* Invalidate all id mappings on local core -- call with preempt disabled */
static inline void local_sid_destroy_all(void)
{
- preempt_disable();
__get_cpu_var(pcpu_last_used_sid) = 0;
memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
- preempt_enable();
}
static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -218,34 +230,13 @@ void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
preempt_enable();
}
-void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct tlbe *tlbe;
- int i, tlbsel;
-
- printk("| %8s | %8s | %8s | %8s | %8s |\n",
- "nr", "mas1", "mas2", "mas3", "mas7");
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
- printk("Guest TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
- if (tlbe->mas1 & MAS1_VALID)
- printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
- tlbsel, i, tlbe->mas1, tlbe->mas2,
- tlbe->mas3, tlbe->mas7);
- }
- }
-}
-
-static inline unsigned int tlb0_get_next_victim(
+static inline unsigned int gtlb0_get_next_victim(
struct kvmppc_vcpu_e500 *vcpu_e500)
{
unsigned int victim;
victim = vcpu_e500->gtlb_nv[0]++;
- if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
+ if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
vcpu_e500->gtlb_nv[0] = 0;
return victim;
@@ -254,12 +245,12 @@ static inline unsigned int tlb0_get_next_victim(
static inline unsigned int tlb1_max_shadow_size(void)
{
/* reserve one entry for magic page */
- return tlb1_entry_num - tlbcam_index - 1;
+ return host_tlb_params[1].entries - tlbcam_index - 1;
}
-static inline int tlbe_is_writable(struct tlbe *tlbe)
+static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
{
- return tlbe->mas3 & (MAS3_SW|MAS3_UW);
+ return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
}
static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
@@ -290,40 +281,66 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
/*
* writing shadow tlb entry to host TLB
*/
-static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
+static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
+ uint32_t mas0)
{
unsigned long flags;
local_irq_save(flags);
mtspr(SPRN_MAS0, mas0);
mtspr(SPRN_MAS1, stlbe->mas1);
- mtspr(SPRN_MAS2, stlbe->mas2);
- mtspr(SPRN_MAS3, stlbe->mas3);
- mtspr(SPRN_MAS7, stlbe->mas7);
+ mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
+ mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
+ mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
asm volatile("isync; tlbwe" : : : "memory");
local_irq_restore(flags);
+
+ trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
+ stlbe->mas2, stlbe->mas7_3);
+}
+
+/*
+ * Acquire a mas0 with victim hint, as if we just took a TLB miss.
+ *
+ * We don't care about the address we're searching for, other than that it's
+ * in the right set and is not present in the TLB. Using a zero PID and a
+ * userspace address means we don't have to set and then restore MAS5, or
+ * calculate a proper MAS6 value.
+ */
+static u32 get_host_mas0(unsigned long eaddr)
+{
+ unsigned long flags;
+ u32 mas0;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS6, 0);
+ asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
+ mas0 = mfspr(SPRN_MAS0);
+ local_irq_restore(flags);
+
+ return mas0;
}
+/* sesel is for tlb1 only */
static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel, struct tlbe *stlbe)
+ int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
{
+ u32 mas0;
+
if (tlbsel == 0) {
- __write_host_tlbe(stlbe,
- MAS0_TLBSEL(0) |
- MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
+ mas0 = get_host_mas0(stlbe->mas2);
+ __write_host_tlbe(stlbe, mas0);
} else {
__write_host_tlbe(stlbe,
MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(esel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)));
}
- trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
- stlbe->mas3, stlbe->mas7);
}
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct tlbe magic;
+ struct kvm_book3e_206_tlb_entry magic;
ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
unsigned int stid;
pfn_t pfn;
@@ -337,9 +354,9 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
MAS1_TSIZE(BOOK3E_PAGESZ_4K);
magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
- magic.mas3 = (pfn << PAGE_SHIFT) |
- MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
- magic.mas7 = pfn >> (32 - PAGE_SHIFT);
+ magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
+ MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+ magic.mas8 = 0;
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
preempt_enable();
@@ -357,10 +374,11 @@ void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
{
}
-static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
{
- struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ struct kvm_book3e_206_tlb_entry *gtlbe =
+ get_entry(vcpu_e500, tlbsel, esel);
struct vcpu_id_table *idt = vcpu_e500->idt;
unsigned int pr, tid, ts, pid;
u32 val, eaddr;
@@ -414,25 +432,57 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
preempt_enable();
}
+static int tlb0_set_base(gva_t addr, int sets, int ways)
+{
+ int set_base;
+
+ set_base = (addr >> PAGE_SHIFT) & (sets - 1);
+ set_base *= ways;
+
+ return set_base;
+}
+
+static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
+{
+ return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
+ vcpu_e500->gtlb_params[0].ways);
+}
+
+static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int esel = get_tlb_esel_bit(vcpu);
+
+ if (tlbsel == 0) {
+ esel &= vcpu_e500->gtlb_params[0].ways - 1;
+ esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
+ } else {
+ esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
+ }
+
+ return esel;
+}
+
/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
- int size = vcpu_e500->gtlb_size[tlbsel];
- int set_base;
+ int size = vcpu_e500->gtlb_params[tlbsel].entries;
+ unsigned int set_base, offset;
int i;
if (tlbsel == 0) {
- int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
- set_base = (eaddr >> PAGE_SHIFT) & mask;
- set_base *= KVM_E500_TLB0_WAY_NUM;
- size = KVM_E500_TLB0_WAY_NUM;
+ set_base = gtlb0_set_base(vcpu_e500, eaddr);
+ size = vcpu_e500->gtlb_params[0].ways;
} else {
set_base = 0;
}
+ offset = vcpu_e500->gtlb_offset[tlbsel];
+
for (i = 0; i < size; i++) {
- struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
+ struct kvm_book3e_206_tlb_entry *tlbe =
+ &vcpu_e500->gtlb_arch[offset + set_base + i];
unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe))
@@ -457,27 +507,55 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
return -1;
}
-static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
- struct tlbe *gtlbe,
- pfn_t pfn)
+static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ pfn_t pfn)
{
- priv->pfn = pfn;
- priv->flags = E500_TLB_VALID;
+ ref->pfn = pfn;
+ ref->flags = E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
- priv->flags |= E500_TLB_DIRTY;
+ ref->flags |= E500_TLB_DIRTY;
}
-static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
+static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
- if (priv->flags & E500_TLB_VALID) {
- if (priv->flags & E500_TLB_DIRTY)
- kvm_release_pfn_dirty(priv->pfn);
+ if (ref->flags & E500_TLB_VALID) {
+ if (ref->flags & E500_TLB_DIRTY)
+ kvm_release_pfn_dirty(ref->pfn);
else
- kvm_release_pfn_clean(priv->pfn);
+ kvm_release_pfn_clean(ref->pfn);
+
+ ref->flags = 0;
+ }
+}
+
+static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int tlbsel = 0;
+ int i;
+
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
+ }
+}
+
+static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int stlbsel = 1;
+ int i;
+
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
- priv->flags = 0;
+ for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->tlb_refs[stlbsel][i];
+ kvmppc_e500_ref_release(ref);
}
+
+ clear_tlb_privs(vcpu_e500);
}
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
@@ -488,59 +566,54 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
int tlbsel;
/* since we only have two TLBs, only lower bit is used. */
- tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
- victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
- pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
- tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
+ tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
+ victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
+ pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+ tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
- vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+ vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
+ vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
| MAS1_TID(vcpu_e500->pid[pidsel])
| MAS1_TSIZE(tsized);
- vcpu_e500->mas2 = (eaddr & MAS2_EPN)
- | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
- vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
- vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
+ vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
+ | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
+ vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+ vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
| (get_cur_pid(vcpu) << 16)
| (as ? MAS6_SAS : 0);
- vcpu_e500->mas7 = 0;
}
-static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- struct tlbe *gtlbe, int tsize,
- struct tlbe_priv *priv,
- u64 gvaddr, struct tlbe *stlbe)
+/* TID must be supplied by the caller */
+static inline void kvmppc_e500_setup_stlbe(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ int tsize, struct tlbe_ref *ref, u64 gvaddr,
+ struct kvm_book3e_206_tlb_entry *stlbe)
{
- pfn_t pfn = priv->pfn;
- unsigned int stid;
+ pfn_t pfn = ref->pfn;
- stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
- get_tlb_tid(gtlbe),
- get_cur_pr(&vcpu_e500->vcpu), 0);
+ BUG_ON(!(ref->flags & E500_TLB_VALID));
/* Force TS=1 IPROT=0 for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(tsize)
- | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
+ stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN)
| e500_shadow_mas2_attrib(gtlbe->mas2,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
- | e500_shadow_mas3_attrib(gtlbe->mas3,
+ stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
+ | e500_shadow_mas3_attrib(gtlbe->mas7_3,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
}
-
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
- struct tlbe *stlbe)
+ u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
+ int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
+ struct tlbe_ref *ref)
{
struct kvm_memory_slot *slot;
unsigned long pfn, hva;
int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K;
- struct tlbe_priv *priv;
/*
* Translate guest physical to true physical, acquiring
@@ -621,12 +694,31 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
pfn &= ~(tsize_pages - 1);
break;
}
+ } else if (vma && hva >= vma->vm_start &&
+ (vma->vm_flags & VM_HUGETLB)) {
+ unsigned long psize = vma_kernel_pagesize(vma);
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * Take the largest page size that satisfies both host
+ * and guest mapping
+ */
+ tsize = min(__ilog2(psize) - 10, tsize);
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
}
up_read(&current->mm->mmap_sem);
}
if (likely(!pfnmap)) {
+ unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
if (is_error_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
@@ -634,45 +726,52 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvm_release_pfn_clean(pfn);
return;
}
+
+ /* Align guest and physical address to page map boundaries */
+ pfn &= ~(tsize_pages - 1);
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
- /* Drop old priv and setup new one. */
- priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- kvmppc_e500_priv_release(priv);
- kvmppc_e500_priv_setup(priv, gtlbe, pfn);
+ /* Drop old ref and setup new one. */
+ kvmppc_e500_ref_release(ref);
+ kvmppc_e500_ref_setup(ref, gtlbe, pfn);
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
}
/* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- int esel, struct tlbe *stlbe)
+static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int esel,
+ struct kvm_book3e_206_tlb_entry *stlbe)
{
- struct tlbe *gtlbe;
+ struct kvm_book3e_206_tlb_entry *gtlbe;
+ struct tlbe_ref *ref;
- gtlbe = &vcpu_e500->gtlb_arch[0][esel];
+ gtlbe = get_entry(vcpu_e500, 0, esel);
+ ref = &vcpu_e500->gtlb_priv[0][esel].ref;
kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
- gtlbe, 0, esel, stlbe);
-
- return esel;
+ gtlbe, 0, stlbe, ref);
}
/* Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
+ u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
+ struct kvm_book3e_206_tlb_entry *stlbe)
{
+ struct tlbe_ref *ref;
unsigned int victim;
- victim = vcpu_e500->gtlb_nv[1]++;
+ victim = vcpu_e500->host_tlb1_nv++;
- if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
- vcpu_e500->gtlb_nv[1] = 0;
+ if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
+ vcpu_e500->host_tlb1_nv = 0;
- kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
+ ref = &vcpu_e500->tlb_refs[1][victim];
+ kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
return victim;
}
@@ -689,7 +788,8 @@ static inline int kvmppc_e500_gtlbe_invalidate(
struct kvmppc_vcpu_e500 *vcpu_e500,
int tlbsel, int esel)
{
- struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ struct kvm_book3e_206_tlb_entry *gtlbe =
+ get_entry(vcpu_e500, tlbsel, esel);
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
@@ -704,10 +804,10 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
int esel;
if (value & MMUCSR0_TLB0FI)
- for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
if (value & MMUCSR0_TLB1FI)
- for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
/* Invalidate all vcpu id mappings */
@@ -732,7 +832,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
if (ia) {
/* invalidate all entries */
- for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
+ esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
} else {
ea &= 0xfffff000;
@@ -752,18 +853,17 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int tlbsel, esel;
- struct tlbe *gtlbe;
+ struct kvm_book3e_206_tlb_entry *gtlbe;
- tlbsel = get_tlb_tlbsel(vcpu_e500);
- esel = get_tlb_esel(vcpu_e500, tlbsel);
+ tlbsel = get_tlb_tlbsel(vcpu);
+ esel = get_tlb_esel(vcpu, tlbsel);
- gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
- vcpu_e500->mas0 &= ~MAS0_NV(~0);
- vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = gtlbe->mas1;
- vcpu_e500->mas2 = gtlbe->mas2;
- vcpu_e500->mas3 = gtlbe->mas3;
- vcpu_e500->mas7 = gtlbe->mas7;
+ gtlbe = get_entry(vcpu_e500, tlbsel, esel);
+ vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
+ vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
+ vcpu->arch.shared->mas1 = gtlbe->mas1;
+ vcpu->arch.shared->mas2 = gtlbe->mas2;
+ vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
return EMULATE_DONE;
}
@@ -771,10 +871,10 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int as = !!get_cur_sas(vcpu_e500);
- unsigned int pid = get_cur_spid(vcpu_e500);
+ int as = !!get_cur_sas(vcpu);
+ unsigned int pid = get_cur_spid(vcpu);
int esel, tlbsel;
- struct tlbe *gtlbe = NULL;
+ struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
gva_t ea;
ea = kvmppc_get_gpr(vcpu, rb);
@@ -782,70 +882,90 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
if (esel >= 0) {
- gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ gtlbe = get_entry(vcpu_e500, tlbsel, esel);
break;
}
}
if (gtlbe) {
- vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
+ esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
+
+ vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = gtlbe->mas1;
- vcpu_e500->mas2 = gtlbe->mas2;
- vcpu_e500->mas3 = gtlbe->mas3;
- vcpu_e500->mas7 = gtlbe->mas7;
+ vcpu->arch.shared->mas1 = gtlbe->mas1;
+ vcpu->arch.shared->mas2 = gtlbe->mas2;
+ vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
} else {
int victim;
/* since we only have two TLBs, only lower bit is used. */
- tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
- victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
+ tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
+ victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
- vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+ vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
+ | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
- | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
- | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
- vcpu_e500->mas2 &= MAS2_EPN;
- vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
- vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
- vcpu_e500->mas7 = 0;
+ vcpu->arch.shared->mas1 =
+ (vcpu->arch.shared->mas6 & MAS6_SPID0)
+ | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
+ | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
+ vcpu->arch.shared->mas2 &= MAS2_EPN;
+ vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
+ MAS2_ATTRIB_MASK;
+ vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
+ MAS3_U2 | MAS3_U3;
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
return EMULATE_DONE;
}
+/* sesel is for tlb1 only */
+static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ struct kvm_book3e_206_tlb_entry *stlbe,
+ int stlbsel, int sesel)
+{
+ int stid;
+
+ preempt_disable();
+ stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
+ get_tlb_tid(gtlbe),
+ get_cur_pr(&vcpu_e500->vcpu), 0);
+
+ stlbe->mas1 |= MAS1_TID(stid);
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
+ preempt_enable();
+}
+
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct tlbe *gtlbe;
+ struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel;
- tlbsel = get_tlb_tlbsel(vcpu_e500);
- esel = get_tlb_esel(vcpu_e500, tlbsel);
+ tlbsel = get_tlb_tlbsel(vcpu);
+ esel = get_tlb_esel(vcpu, tlbsel);
- gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ gtlbe = get_entry(vcpu_e500, tlbsel, esel);
if (get_tlb_v(gtlbe))
- kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
- gtlbe->mas1 = vcpu_e500->mas1;
- gtlbe->mas2 = vcpu_e500->mas2;
- gtlbe->mas3 = vcpu_e500->mas3;
- gtlbe->mas7 = vcpu_e500->mas7;
+ gtlbe->mas1 = vcpu->arch.shared->mas1;
+ gtlbe->mas2 = vcpu->arch.shared->mas2;
+ gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
- trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
- gtlbe->mas3, gtlbe->mas7);
+ trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
+ gtlbe->mas2, gtlbe->mas7_3);
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
- struct tlbe stlbe;
+ struct kvm_book3e_206_tlb_entry stlbe;
int stlbsel, sesel;
u64 eaddr;
u64 raddr;
- preempt_disable();
switch (tlbsel) {
case 0:
/* TLB0 */
@@ -853,7 +973,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
stlbsel = 0;
- sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+ kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+ sesel = 0; /* unused */
break;
@@ -874,8 +995,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
default:
BUG();
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
- preempt_enable();
+
+ write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -914,9 +1035,11 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
gva_t eaddr)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct tlbe *gtlbe =
- &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
- u64 pgmask = get_tlb_bytes(gtlbe) - 1;
+ struct kvm_book3e_206_tlb_entry *gtlbe;
+ u64 pgmask;
+
+ gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
+ pgmask = get_tlb_bytes(gtlbe) - 1;
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}
@@ -930,22 +1053,21 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct tlbe_priv *priv;
- struct tlbe *gtlbe, stlbe;
+ struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
int tlbsel = tlbsel_of(index);
int esel = esel_of(index);
int stlbsel, sesel;
- gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ gtlbe = get_entry(vcpu_e500, tlbsel, esel);
- preempt_disable();
switch (tlbsel) {
case 0:
stlbsel = 0;
- sesel = esel;
- priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
+ sesel = 0; /* unused */
+ priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
- priv, eaddr, &stlbe);
+ &priv->ref, eaddr, &stlbe);
break;
case 1: {
@@ -962,8 +1084,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
break;
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
- preempt_enable();
+ write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
}
int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -993,85 +1114,279 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- struct tlbe *tlbe;
+ struct kvm_book3e_206_tlb_entry *tlbe;
/* Insert large initial mapping for guest. */
- tlbe = &vcpu_e500->gtlb_arch[1][0];
+ tlbe = get_entry(vcpu_e500, 1, 0);
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
tlbe->mas2 = 0;
- tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
- tlbe->mas7 = 0;
+ tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
/* 4K map for serial output. Used by kernel wrapper. */
- tlbe = &vcpu_e500->gtlb_arch[1][1];
+ tlbe = get_entry(vcpu_e500, 1, 1);
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
- tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
- tlbe->mas7 = 0;
+ tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
+}
+
+static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int i;
+
+ clear_tlb_refs(vcpu_e500);
+ kfree(vcpu_e500->gtlb_priv[0]);
+ kfree(vcpu_e500->gtlb_priv[1]);
+
+ if (vcpu_e500->shared_tlb_pages) {
+ vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
+ PAGE_SIZE)));
+
+ for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
+ set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
+ put_page(vcpu_e500->shared_tlb_pages[i]);
+ }
+
+ vcpu_e500->num_shared_tlb_pages = 0;
+ vcpu_e500->shared_tlb_pages = NULL;
+ } else {
+ kfree(vcpu_e500->gtlb_arch);
+ }
+
+ vcpu_e500->gtlb_arch = NULL;
+}
+
+int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_config_tlb *cfg)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct kvm_book3e_206_tlb_params params;
+ char *virt;
+ struct page **pages;
+ struct tlbe_priv *privs[2] = {};
+ size_t array_len;
+ u32 sets;
+ int num_pages, ret, i;
+
+ if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
+ return -EINVAL;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
+ sizeof(params)))
+ return -EFAULT;
+
+ if (params.tlb_sizes[1] > 64)
+ return -EINVAL;
+ if (params.tlb_ways[1] != params.tlb_sizes[1])
+ return -EINVAL;
+ if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
+ return -EINVAL;
+ if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
+ return -EINVAL;
+
+ if (!is_power_of_2(params.tlb_ways[0]))
+ return -EINVAL;
+
+ sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
+ if (!is_power_of_2(sets))
+ return -EINVAL;
+
+ array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
+ array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
+
+ if (cfg->array_len < array_len)
+ return -EINVAL;
+
+ num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
+ cfg->array / PAGE_SIZE;
+ pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
+ if (ret < 0)
+ goto err_pages;
+
+ if (ret != num_pages) {
+ num_pages = ret;
+ ret = -EFAULT;
+ goto err_put_page;
+ }
+
+ virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
+ if (!virt)
+ goto err_put_page;
+
+ privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
+ GFP_KERNEL);
+ privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
+ GFP_KERNEL);
+
+ if (!privs[0] || !privs[1])
+ goto err_put_page;
+
+ free_gtlb(vcpu_e500);
+
+ vcpu_e500->gtlb_priv[0] = privs[0];
+ vcpu_e500->gtlb_priv[1] = privs[1];
+
+ vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
+ (virt + (cfg->array & (PAGE_SIZE - 1)));
+
+ vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
+ vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
+
+ vcpu_e500->gtlb_offset[0] = 0;
+ vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
+
+ vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ if (params.tlb_sizes[0] <= 2048)
+ vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
+ vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
+
+ vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
+ vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
+
+ vcpu_e500->shared_tlb_pages = pages;
+ vcpu_e500->num_shared_tlb_pages = num_pages;
+
+ vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
+ vcpu_e500->gtlb_params[0].sets = sets;
+
+ vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
+ vcpu_e500->gtlb_params[1].sets = 1;
+
+ return 0;
+
+err_put_page:
+ kfree(privs[0]);
+ kfree(privs[1]);
+
+ for (i = 0; i < num_pages; i++)
+ put_page(pages[i]);
+
+err_pages:
+ kfree(pages);
+ return ret;
+}
+
+int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_dirty_tlb *dirty)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ clear_tlb_refs(vcpu_e500);
+ return 0;
}
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
-
- vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->gtlb_arch[0] =
- kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->gtlb_arch[0] == NULL)
- goto err_out;
-
- vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
- vcpu_e500->gtlb_arch[1] =
- kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- if (vcpu_e500->gtlb_arch[1] == NULL)
- goto err_out_guest0;
-
- vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
- kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->gtlb_priv[0] == NULL)
- goto err_out_guest1;
- vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
- kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
-
- if (vcpu_e500->gtlb_priv[1] == NULL)
- goto err_out_priv0;
+ int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
+ int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
+
+ host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
+ host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+ /*
+ * This should never happen on real e500 hardware, but is
+ * architecturally possible -- e.g. in some weird nested
+ * virtualization case.
+ */
+ if (host_tlb_params[0].entries == 0 ||
+ host_tlb_params[1].entries == 0) {
+ pr_err("%s: need to know host tlb size\n", __func__);
+ return -ENODEV;
+ }
+
+ host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
+ TLBnCFG_ASSOC_SHIFT;
+ host_tlb_params[1].ways = host_tlb_params[1].entries;
+
+ if (!is_power_of_2(host_tlb_params[0].entries) ||
+ !is_power_of_2(host_tlb_params[0].ways) ||
+ host_tlb_params[0].entries < host_tlb_params[0].ways ||
+ host_tlb_params[0].ways == 0) {
+ pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
+ __func__, host_tlb_params[0].entries,
+ host_tlb_params[0].ways);
+ return -ENODEV;
+ }
+
+ host_tlb_params[0].sets =
+ host_tlb_params[0].entries / host_tlb_params[0].ways;
+ host_tlb_params[1].sets = 1;
+
+ vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
+ vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
+
+ vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
+ vcpu_e500->gtlb_params[0].sets =
+ KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
+
+ vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
+ vcpu_e500->gtlb_params[1].sets = 1;
+
+ vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
+ if (!vcpu_e500->gtlb_arch)
+ return -ENOMEM;
+
+ vcpu_e500->gtlb_offset[0] = 0;
+ vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
+
+ vcpu_e500->tlb_refs[0] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[0])
+ goto err;
+
+ vcpu_e500->tlb_refs[1] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[1])
+ goto err;
+
+ vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
+ vcpu_e500->gtlb_params[0].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->gtlb_priv[0])
+ goto err;
+
+ vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
+ vcpu_e500->gtlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->gtlb_priv[1])
+ goto err;
if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
- goto err_out_priv1;
+ goto err;
/* Init TLB configuration register */
- vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
- vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
- vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
+ vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
+ ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
+ vcpu_e500->tlb0cfg |=
+ vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
+
+ vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
+ ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
+ vcpu_e500->tlb0cfg |=
+ vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
return 0;
-err_out_priv1:
- kfree(vcpu_e500->gtlb_priv[1]);
-err_out_priv0:
- kfree(vcpu_e500->gtlb_priv[0]);
-err_out_guest1:
- kfree(vcpu_e500->gtlb_arch[1]);
-err_out_guest0:
- kfree(vcpu_e500->gtlb_arch[0]);
-err_out:
+err:
+ free_gtlb(vcpu_e500);
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
return -1;
}
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int stlbsel, i;
-
- /* release all privs */
- for (stlbsel = 0; stlbsel < 2; stlbsel++)
- for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
- struct tlbe_priv *priv =
- &vcpu_e500->gtlb_priv[stlbsel][i];
- kvmppc_e500_priv_release(priv);
- }
-
+ free_gtlb(vcpu_e500);
kvmppc_e500_id_table_free(vcpu_e500);
- kfree(vcpu_e500->gtlb_arch[1]);
- kfree(vcpu_e500->gtlb_arch[0]);
+
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 59b88e99a235..5c6d2d7bf058 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -20,13 +20,9 @@
#include <asm/tlb.h>
#include <asm/kvm_e500.h>
-#define KVM_E500_TLB0_WAY_SIZE_BIT 7 /* Fixed */
-#define KVM_E500_TLB0_WAY_SIZE (1UL << KVM_E500_TLB0_WAY_SIZE_BIT)
-#define KVM_E500_TLB0_WAY_SIZE_MASK (KVM_E500_TLB0_WAY_SIZE - 1)
-
-#define KVM_E500_TLB0_WAY_NUM_BIT 1 /* No greater than 7 */
-#define KVM_E500_TLB0_WAY_NUM (1UL << KVM_E500_TLB0_WAY_NUM_BIT)
-#define KVM_E500_TLB0_WAY_NUM_MASK (KVM_E500_TLB0_WAY_NUM - 1)
+/* This geometry is the legacy default -- can be overridden by userspace */
+#define KVM_E500_TLB0_WAY_SIZE 128
+#define KVM_E500_TLB0_WAY_NUM 2
#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
#define KVM_E500_TLB1_SIZE 16
@@ -58,50 +54,54 @@ extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
/* TLB helper functions */
-static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
+static inline unsigned int
+get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return (tlbe->mas1 >> 7) & 0x1f;
}
-static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
+static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return tlbe->mas2 & 0xfffff000;
}
-static inline u64 get_tlb_bytes(const struct tlbe *tlbe)
+static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
{
unsigned int pgsize = get_tlb_size(tlbe);
return 1ULL << 10 << pgsize;
}
-static inline gva_t get_tlb_end(const struct tlbe *tlbe)
+static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
{
u64 bytes = get_tlb_bytes(tlbe);
return get_tlb_eaddr(tlbe) + bytes - 1;
}
-static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
+static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
{
- u64 rpn = tlbe->mas7;
- return (rpn << 32) | (tlbe->mas3 & 0xfffff000);
+ return tlbe->mas7_3 & ~0xfffULL;
}
-static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
+static inline unsigned int
+get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return (tlbe->mas1 >> 16) & 0xff;
}
-static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
+static inline unsigned int
+get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return (tlbe->mas1 >> 12) & 0x1;
}
-static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
+static inline unsigned int
+get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return (tlbe->mas1 >> 31) & 0x1;
}
-static inline unsigned int get_tlb_iprot(const struct tlbe *tlbe)
+static inline unsigned int
+get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return (tlbe->mas1 >> 30) & 0x1;
}
@@ -121,59 +121,37 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
return !!(vcpu->arch.shared->msr & MSR_PR);
}
-static inline unsigned int get_cur_spid(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
{
- return (vcpu_e500->mas6 >> 16) & 0xff;
+ return (vcpu->arch.shared->mas6 >> 16) & 0xff;
}
-static inline unsigned int get_cur_sas(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
{
- return vcpu_e500->mas6 & 0x1;
+ return vcpu->arch.shared->mas6 & 0x1;
}
-static inline unsigned int get_tlb_tlbsel(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
{
/*
* Manual says that tlbsel has 2 bits wide.
* Since we only have two TLBs, only lower bit is used.
*/
- return (vcpu_e500->mas0 >> 28) & 0x1;
-}
-
-static inline unsigned int get_tlb_nv_bit(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- return vcpu_e500->mas0 & 0xfff;
+ return (vcpu->arch.shared->mas0 >> 28) & 0x1;
}
-static inline unsigned int get_tlb_esel_bit(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
{
- return (vcpu_e500->mas0 >> 16) & 0xfff;
+ return vcpu->arch.shared->mas0 & 0xfff;
}
-static inline unsigned int get_tlb_esel(
- const struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel)
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
{
- unsigned int esel = get_tlb_esel_bit(vcpu_e500);
-
- if (tlbsel == 0) {
- esel &= KVM_E500_TLB0_WAY_NUM_MASK;
- esel |= ((vcpu_e500->mas2 >> 12) & KVM_E500_TLB0_WAY_SIZE_MASK)
- << KVM_E500_TLB0_WAY_NUM_BIT;
- } else {
- esel &= KVM_E500_TLB1_SIZE - 1;
- }
-
- return esel;
+ return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
}
static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
- const struct tlbe *tlbe)
+ const struct kvm_book3e_206_tlb_entry *tlbe)
{
gpa_t gpa;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 141dce3c6810..968f40101883 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
@@ -69,54 +70,55 @@
#define OP_STH 44
#define OP_STHU 45
-#ifdef CONFIG_PPC_BOOK3S
-static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
-{
- return 1;
-}
-#else
-static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.tcr & TCR_DIE;
-}
-#endif
-
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
{
unsigned long dec_nsec;
+ unsigned long long dec_time;
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+
#ifdef CONFIG_PPC_BOOK3S
/* mtdec lowers the interrupt line when positive. */
kvmppc_core_dequeue_dec(vcpu);
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if (vcpu->arch.dec & 0x80000000) {
- hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
kvmppc_core_queue_dec(vcpu);
return;
}
#endif
- if (kvmppc_dec_enabled(vcpu)) {
- /* The decrementer ticks at the same rate as the timebase, so
- * that's how we convert the guest DEC value to the number of
- * host ticks. */
-
- hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
- dec_nsec = vcpu->arch.dec;
- dec_nsec *= 1000;
- dec_nsec /= tb_ticks_per_usec;
- hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
- HRTIMER_MODE_REL);
- vcpu->arch.dec_jiffies = get_tb();
- } else {
- hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
- }
+
+#ifdef CONFIG_BOOKE
+ /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
+ if (vcpu->arch.dec == 0)
+ return;
+#endif
+
+ /*
+ * The decrementer ticks at the same rate as the timebase, so
+ * that's how we convert the guest DEC value to the number of
+ * host ticks.
+ */
+
+ dec_time = vcpu->arch.dec;
+ dec_time *= 1000;
+ do_div(dec_time, tb_ticks_per_usec);
+ dec_nsec = do_div(dec_time, NSEC_PER_SEC);
+ hrtimer_start(&vcpu->arch.dec_timer,
+ ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
+ vcpu->arch.dec_jiffies = get_tb();
}
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
{
u64 jd = tb - vcpu->arch.dec_jiffies;
+
+#ifdef CONFIG_BOOKE
+ if (vcpu->arch.dec < jd)
+ return 0;
+#endif
+
return vcpu->arch.dec - jd;
}
@@ -159,7 +161,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_TRAP_64:
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
- kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
+ kvmppc_core_queue_program(vcpu,
+ vcpu->arch.shared->esr | ESR_PTR);
#endif
advance = 0;
break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 607fbdf24b84..00d7e345b3fe 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -39,7 +39,8 @@
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !(v->arch.shared->msr & MSR_WE) ||
- !!(v->arch.pending_exceptions);
+ !!(v->arch.pending_exceptions) ||
+ v->requests;
}
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
@@ -66,7 +67,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
vcpu->arch.magic_page_pa = param1;
vcpu->arch.magic_page_ea = param2;
- r2 = KVM_MAGIC_FEAT_SR;
+ r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
r = HC_EV_SUCCESS;
break;
@@ -171,8 +172,11 @@ void kvm_arch_check_processor_compat(void *rtn)
*(int *)rtn = kvmppc_core_check_processor_compat();
}
-int kvm_arch_init_vm(struct kvm *kvm)
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
+ if (type)
+ return -EINVAL;
+
return kvmppc_core_init_vm(kvm);
}
@@ -208,17 +212,22 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_BOOKE_SREGS:
#else
case KVM_CAP_PPC_SEGSTATE:
+ case KVM_CAP_PPC_HIOR:
case KVM_CAP_PPC_PAPR:
#endif
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_ONE_REG:
r = 1;
break;
#ifndef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
+#ifdef CONFIG_KVM_E500
+ case KVM_CAP_SW_TLB:
+#endif
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -238,7 +247,26 @@ int kvm_dev_ioctl_check_extension(long ext)
if (cpu_has_feature(CPU_FTR_ARCH_201))
r = 2;
break;
+ case KVM_CAP_SYNC_MMU:
+ r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+ break;
#endif
+ case KVM_CAP_NR_VCPUS:
+ /*
+ * Recommending a number of CPUs is somewhat arbitrary; we
+ * return the number of present CPUs for -HV (since a host
+ * will have secondary threads "offline"), and for other KVM
+ * implementations just count online CPUs.
+ */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ r = num_present_cpus();
+#else
+ r = num_online_cpus();
+#endif
+ break;
+ case KVM_CAP_MAX_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
default:
r = 0;
break;
@@ -253,6 +281,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
@@ -279,9 +317,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
vcpu = kvmppc_core_vcpu_create(kvm, id);
- vcpu->arch.wqp = &vcpu->wq;
- if (!IS_ERR(vcpu))
+ if (!IS_ERR(vcpu)) {
+ vcpu->arch.wqp = &vcpu->wq;
kvmppc_create_vcpu_debugfs(vcpu, id);
+ }
return vcpu;
}
@@ -305,18 +344,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu);
}
-static void kvmppc_decrementer_func(unsigned long data)
-{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
- kvmppc_core_queue_dec(vcpu);
-
- if (waitqueue_active(vcpu->arch.wqp)) {
- wake_up_interruptible(vcpu->arch.wqp);
- vcpu->stat.halt_wakeup++;
- }
-}
-
/*
* low level hrtimer wake routine. Because this runs in hardirq context
* we schedule a tasklet to do the real work.
@@ -431,20 +458,20 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
- switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
- case KVM_REG_GPR:
+ switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
+ case KVM_MMIO_REG_GPR:
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
break;
- case KVM_REG_FPR:
- vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ case KVM_MMIO_REG_FPR:
+ vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#ifdef CONFIG_PPC_BOOK3S
- case KVM_REG_QPR:
- vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ case KVM_MMIO_REG_QPR:
+ vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
- case KVM_REG_FQPR:
- vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
- vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
+ case KVM_MMIO_REG_FQPR:
+ vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
+ vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
default:
@@ -553,8 +580,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->arch.hcall_needed = 0;
}
- kvmppc_core_deliver_interrupts(vcpu);
-
r = kvmppc_vcpu_run(run, vcpu);
if (vcpu->sigset_active)
@@ -563,6 +588,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return r;
}
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int me;
+ int cpu = vcpu->cpu;
+
+ me = get_cpu();
+ if (waitqueue_active(vcpu->arch.wqp)) {
+ wake_up_interruptible(vcpu->arch.wqp);
+ vcpu->stat.halt_wakeup++;
+ } else if (cpu != me && cpu != -1) {
+ smp_send_reschedule(vcpu->cpu);
+ }
+ put_cpu();
+}
+
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
if (irq->irq == KVM_INTERRUPT_UNSET) {
@@ -571,13 +611,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
}
kvmppc_core_queue_external(vcpu, irq);
-
- if (waitqueue_active(vcpu->arch.wqp)) {
- wake_up_interruptible(vcpu->arch.wqp);
- vcpu->stat.halt_wakeup++;
- } else if (vcpu->cpu != -1) {
- smp_send_reschedule(vcpu->cpu);
- }
+ kvm_vcpu_kick(vcpu);
return 0;
}
@@ -599,6 +633,19 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = 0;
vcpu->arch.papr_enabled = true;
break;
+#ifdef CONFIG_KVM_E500
+ case KVM_CAP_SW_TLB: {
+ struct kvm_config_tlb cfg;
+ void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
+
+ r = -EFAULT;
+ if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
+ break;
+
+ r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
+ break;
+ }
+#endif
default:
r = -EINVAL;
break;
@@ -648,6 +695,32 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
+
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG:
+ {
+ struct kvm_one_reg reg;
+ r = -EFAULT;
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ goto out;
+ if (ioctl == KVM_SET_ONE_REG)
+ r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
+ else
+ r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
+ break;
+ }
+
+#ifdef CONFIG_KVM_E500
+ case KVM_DIRTY_TLB: {
+ struct kvm_dirty_tlb dirty;
+ r = -EFAULT;
+ if (copy_from_user(&dirty, argp, sizeof(dirty)))
+ goto out;
+ r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
+ break;
+ }
+#endif
+
default:
r = -EINVAL;
}
@@ -656,6 +729,11 @@ out:
return r;
}
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
{
u32 inst_lis = 0x3c000000;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index b135d3d397db..877186b7b1c3 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -118,11 +118,14 @@ TRACE_EVENT(kvm_book3s_exit,
),
TP_fast_assign(
+ struct kvmppc_book3s_shadow_vcpu *svcpu;
__entry->exit_nr = exit_nr;
__entry->pc = kvmppc_get_pc(vcpu);
__entry->dar = kvmppc_get_fault_dar(vcpu);
__entry->msr = vcpu->arch.shared->msr;
- __entry->srr1 = to_svcpu(vcpu)->shadow_srr1;
+ svcpu = svcpu_get(vcpu);
+ __entry->srr1 = svcpu->shadow_srr1;
+ svcpu_put(svcpu);
),
TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",
@@ -337,6 +340,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
#endif /* CONFIG_PPC_BOOK3S */
+
+/*************************************************************************
+ * Book3E trace points *
+ *************************************************************************/
+
+#ifdef CONFIG_BOOKE
+
+TRACE_EVENT(kvm_booke206_stlb_write,
+ TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
+ TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
+
+ TP_STRUCT__entry(
+ __field( __u32, mas0 )
+ __field( __u32, mas8 )
+ __field( __u32, mas1 )
+ __field( __u64, mas2 )
+ __field( __u64, mas7_3 )
+ ),
+
+ TP_fast_assign(
+ __entry->mas0 = mas0;
+ __entry->mas8 = mas8;
+ __entry->mas1 = mas1;
+ __entry->mas2 = mas2;
+ __entry->mas7_3 = mas7_3;
+ ),
+
+ TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
+ __entry->mas0, __entry->mas8, __entry->mas1,
+ __entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_gtlb_write,
+ TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
+ TP_ARGS(mas0, mas1, mas2, mas7_3),
+
+ TP_STRUCT__entry(
+ __field( __u32, mas0 )
+ __field( __u32, mas1 )
+ __field( __u64, mas2 )
+ __field( __u64, mas7_3 )
+ ),
+
+ TP_fast_assign(
+ __entry->mas0 = mas0;
+ __entry->mas1 = mas1;
+ __entry->mas2 = mas2;
+ __entry->mas7_3 = mas7_3;
+ ),
+
+ TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
+ __entry->mas0, __entry->mas1,
+ __entry->mas2, __entry->mas7_3)
+);
+
+#endif
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index a6ebba56fdd4..bb7cfecf2788 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -19,11 +19,9 @@
#include <linux/smp.h>
/* waiting for a spinlock... */
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_PPC_SPLPAR)
#include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
#include <asm/smp.h>
-#include <asm/firmware.h>
void __spin_yield(arch_spinlock_t *lock)
{
@@ -40,14 +38,8 @@ void __spin_yield(arch_spinlock_t *lock)
rmb();
if (lock->slock != lock_value)
return; /* something has changed */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
- ((u64)holder_cpu << 32) | yield_count);
-#ifdef CONFIG_PPC_SPLPAR
- else
- plpar_hcall_norets(H_CONFER,
- get_hard_smp_processor_id(holder_cpu), yield_count);
-#endif
+ plpar_hcall_norets(H_CONFER,
+ get_hard_smp_processor_id(holder_cpu), yield_count);
}
/*
@@ -71,14 +63,8 @@ void __rw_yield(arch_rwlock_t *rw)
rmb();
if (rw->lock != lock_value)
return; /* something has changed */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
- ((u64)holder_cpu << 32) | yield_count);
-#ifdef CONFIG_PPC_SPLPAR
- else
- plpar_hcall_norets(H_CONFER,
- get_hard_smp_processor_id(holder_cpu), yield_count);
-#endif
+ plpar_hcall_norets(H_CONFER,
+ get_hard_smp_processor_id(holder_cpu), yield_count);
}
#endif
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 329be36c0a8d..6747eece84af 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
local_irq_save(flags);
do {
- start = (unsigned long)kmap_atomic(page + seg_nr,
- KM_PPC_SYNC_PAGE) + seg_offset;
+ start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
/* Sync this buffer segment */
__dma_sync((void *)start, seg_size, direction);
- kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
+ kunmap_atomic((void *)start);
seg_nr++;
/* Calculate next buffer segment size */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 2f0d1b032a89..19f2f9498b27 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -105,6 +105,82 @@ static int store_updates_sp(struct pt_regs *regs)
}
return 0;
}
+/*
+ * do_page_fault error handling helpers
+ */
+
+#define MM_FAULT_RETURN 0
+#define MM_FAULT_CONTINUE -1
+#define MM_FAULT_ERR(sig) (sig)
+
+static int out_of_memory(struct pt_regs *regs)
+{
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+ up_read(&current->mm->mmap_sem);
+ if (!user_mode(regs))
+ return MM_FAULT_ERR(SIGKILL);
+ pagefault_out_of_memory();
+ return MM_FAULT_RETURN;
+}
+
+static int do_sigbus(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+
+ up_read(&current->mm->mmap_sem);
+
+ if (user_mode(regs)) {
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
+ return MM_FAULT_RETURN;
+ }
+ return MM_FAULT_ERR(SIGBUS);
+}
+
+static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
+{
+ /*
+ * Pagefault was interrupted by SIGKILL. We have no reason to
+ * continue the pagefault.
+ */
+ if (fatal_signal_pending(current)) {
+ /*
+ * If we have retry set, the mmap semaphore will have
+ * alrady been released in __lock_page_or_retry(). Else
+ * we release it now.
+ */
+ if (!(fault & VM_FAULT_RETRY))
+ up_read(&current->mm->mmap_sem);
+ /* Coming from kernel, we need to deal with uaccess fixups */
+ if (user_mode(regs))
+ return MM_FAULT_RETURN;
+ return MM_FAULT_ERR(SIGKILL);
+ }
+
+ /* No fault: be happy */
+ if (!(fault & VM_FAULT_ERROR))
+ return MM_FAULT_CONTINUE;
+
+ /* Out of memory */
+ if (fault & VM_FAULT_OOM)
+ return out_of_memory(regs);
+
+ /* Bus error. x86 handles HWPOISON here, we'll add this if/when
+ * we support the feature in HW
+ */
+ if (fault & VM_FAULT_SIGBUS)
+ return do_sigbus(regs, addr);
+
+ /* We don't understand the fault code, this is fatal */
+ BUG();
+ return MM_FAULT_CONTINUE;
+}
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
@@ -124,11 +200,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
- siginfo_t info;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
int code = SEGV_MAPERR;
- int is_write = 0, ret;
+ int is_write = 0;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
+ int fault;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
@@ -145,6 +222,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
+ if (is_write)
+ flags |= FAULT_FLAG_WRITE;
+
#ifdef CONFIG_PPC_ICSWX
/*
* we need to do this early because this "data storage
@@ -152,13 +232,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
* look at it
*/
if (error_code & ICSWX_DSI_UCT) {
- int ret;
-
- ret = acop_handle_fault(regs, address, error_code);
- if (ret)
- return ret;
+ int rc = acop_handle_fault(regs, address, error_code);
+ if (rc)
+ return rc;
}
-#endif
+#endif /* CONFIG_PPC_ICSWX */
if (notify_page_fault(regs))
return 0;
@@ -179,6 +257,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
@@ -212,7 +294,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!user_mode(regs) && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
+retry:
down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case we'll have missed the might_sleep() from
+ * down_read():
+ */
+ might_sleep();
}
vma = find_vma(mm, address);
@@ -327,30 +417,43 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
- if (unlikely(ret & VM_FAULT_ERROR)) {
- if (ret & VM_FAULT_OOM)
- goto out_of_memory;
- else if (ret & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
+ fault = handle_mm_fault(mm, vma, address, flags);
+ if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ int rc = mm_fault_error(regs, address, fault);
+ if (rc >= MM_FAULT_RETURN)
+ return rc;
}
- if (ret & VM_FAULT_MAJOR) {
- current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
+
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ current->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
#ifdef CONFIG_PPC_SMLPAR
- if (firmware_has_feature(FW_FEATURE_CMO)) {
- preempt_disable();
- get_lppaca()->page_ins += (1 << PAGE_FACTOR);
- preempt_enable();
+ if (firmware_has_feature(FW_FEATURE_CMO)) {
+ preempt_disable();
+ get_lppaca()->page_ins += (1 << PAGE_FACTOR);
+ preempt_enable();
+ }
+#endif /* CONFIG_PPC_SMLPAR */
+ } else {
+ current->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation. */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ goto retry;
}
-#endif
- } else {
- current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
}
+
up_read(&mm->mmap_sem);
return 0;
@@ -371,28 +474,6 @@ bad_area_nosemaphore:
return SIGSEGV;
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (!user_mode(regs))
- return SIGKILL;
- pagefault_out_of_memory();
- return 0;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
- if (user_mode(regs)) {
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, current);
- return 0;
- }
- return SIGBUS;
}
/*
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 66a6fd38e9cd..07ba45b0f07c 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -149,12 +149,19 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
phys_addr_t phys)
{
- unsigned int camsize = __ilog2(ram) & ~1U;
- unsigned int align = __ffs(virt | phys) & ~1U;
- unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf;
-
- /* Convert (4^max) kB to (2^max) bytes */
- max_cam = max_cam * 2 + 10;
+ unsigned int camsize = __ilog2(ram);
+ unsigned int align = __ffs(virt | phys);
+ unsigned long max_cam;
+
+ if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
+ /* Convert (4^max) kB to (2^max) bytes */
+ max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
+ camsize &= ~1U;
+ align &= ~1U;
+ } else {
+ /* Convert (2^max) kB to (2^max) bytes */
+ max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
+ }
if (camsize > align)
camsize = align;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2d282186cb45..3e8c37a4e395 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -55,6 +55,8 @@
#include <asm/spu.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
+#include <asm/fadump.h>
+#include <asm/firmware.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -625,6 +627,16 @@ static void __init htab_initialize(void)
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
+#ifdef CONFIG_FA_DUMP
+ /*
+ * If firmware assisted dump is active firmware preserves
+ * the contents of htab along with entire partition memory.
+ * Clear the htab if firmware assisted dump is active so
+ * that we dont end up using old mappings.
+ */
+ if (is_fadump_active() && ppc_md.hpte_clear_all)
+ ppc_md.hpte_clear_all();
+#endif
} else {
/* Find storage for the HPT. Must be contiguous in
* the absolute address space. On cell we want it to be
@@ -745,12 +757,9 @@ void __init early_init_mmu(void)
*/
htab_initialize();
- /* Initialize stab / SLB management except on iSeries
- */
+ /* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
- else if (!firmware_has_feature(FW_FEATURE_ISERIES))
- stab_initialize(get_paca()->stab_real);
}
#ifdef CONFIG_SMP
@@ -761,8 +770,7 @@ void __cpuinit early_init_mmu_secondary(void)
mtspr(SPRN_SDR1, _SDR1);
/* Initialize STAB/SLB. We use a virtual address as it works
- * in real mode on pSeries and we want a virtual address on
- * iSeries anyway
+ * in real mode on pSeries.
*/
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a8b3cc7d90fe..fb05b123218f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
+#include <linux/export.h>
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
@@ -103,6 +104,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = hugepd_shift(*hpdp);
return hugepte_offset(hpdp, ea, pdshift);
}
+EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
@@ -310,7 +312,8 @@ void __init reserve_hugetlb_gpages(void)
int i;
strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
- parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);
+ parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
+ &do_gpage_early_setup);
/*
* Walk gpage list in reverse, allocating larger page sizes first.
@@ -910,9 +913,9 @@ void flush_dcache_icache_hugepage(struct page *page)
if (!PageHighMem(page)) {
__flush_dcache_icache(page_address(page+i));
} else {
- start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
+ start = kmap_atomic(page+i);
__flush_dcache_icache(start);
- kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+ kunmap_atomic(start);
}
}
}
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 5d9a59eaad93..8cdbd8634a58 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(drop_cop);
static int acop_use_cop(int ct)
{
- /* todo */
+ /* There is no alternate policy, yet */
return -1;
}
@@ -227,11 +227,30 @@ int acop_handle_fault(struct pt_regs *regs, unsigned long address,
ct = (ccw >> 16) & 0x3f;
}
+ /*
+ * We could be here because another thread has enabled acop
+ * but the ACOP register has yet to be updated.
+ *
+ * This should have been taken care of by the IPI to sync all
+ * the threads (see smp_call_function(sync_cop, mm, 1)), but
+ * that could take forever if there are a significant amount
+ * of threads.
+ *
+ * Given the number of threads on some of these systems,
+ * perhaps this is the best way to sync ACOP rather than whack
+ * every thread with an IPI.
+ */
+ if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
+ sync_cop(current->active_mm);
+ return 0;
+ }
+
+ /* check for alternate policy */
if (!acop_use_cop(ct))
return 0;
/* at this point the CT is unknown to the system */
- pr_warn("%s[%d]: Coprocessor %d is unavailable",
+ pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
current->comm, current->pid, ct);
/* get inst if we don't already have it */
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h
index 42176bd0884c..6dedc08e62c8 100644
--- a/arch/powerpc/mm/icswx.h
+++ b/arch/powerpc/mm/icswx.h
@@ -59,4 +59,10 @@ extern void free_cop_pid(int free_pid);
extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
+
+static inline u64 acop_copro_type_bit(unsigned int type)
+{
+ return 1ULL << (63 - type);
+}
+
#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d974b79a3068..baaafde7d135 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
#endif
#ifdef CONFIG_BOOKE
{
- void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+ void *start = kmap_atomic(page);
__flush_dcache_icache(start);
- kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+ kunmap_atomic(start);
}
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
/* On 8xx there is no need to kmap since highmem is not supported */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 51f87956f8f8..0907f92ce309 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -207,7 +207,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
*/
if (mem_init_done && (p < virt_to_phys(high_memory)) &&
!(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
- printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
+ printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index e22276cb67a4..a538c80db2df 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -21,7 +21,6 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
-#include <asm/firmware.h>
#include <linux/compiler.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
@@ -307,11 +306,6 @@ void slb_initialize(void)
get_paca()->stab_rr = SLB_NUM_BOLTED;
- /* On iSeries the bolted entries have already been set up by
- * the hypervisor from the lparMap data in head.S */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return;
-
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index ef653dc95b65..b9ee79ce2200 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -217,21 +217,6 @@ slb_finish_load:
* free slot first but that took too long. Unfortunately we
* dont have any LRU information to help us choose a slot.
*/
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- /*
- * On iSeries, the "bolted" stack segment can be cast out on
- * shared processor switch so we need to check for a miss on
- * it and restore it to the right slot.
- */
- ld r9,PACAKSAVE(r13)
- clrrdi r9,r9,28
- clrrdi r3,r3,28
- li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
- cmpld r9,r3
- beq 3f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
@@ -282,7 +267,6 @@ _GLOBAL(slb_compare_rr_to_size)
/*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
- * We assume legacy iSeries will never have 1T segments.
*
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
*/
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 41e31642a86a..9106ebb118f5 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -21,8 +21,6 @@
#include <asm/cputable.h>
#include <asm/prom.h>
#include <asm/abs_addr.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_call.h>
struct stab_entry {
unsigned long esid_data;
@@ -285,12 +283,5 @@ void stab_initialize(unsigned long stab)
/* Set ASR */
stabreal = get_paca()->stab_real | 0x1ul;
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- HvCall1(HvCallBaseSetASR, stabreal);
- return;
- }
-#endif /* CONFIG_PPC_ISERIES */
-
mtspr(SPRN_ASR, stabreal);
}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index d65e68f3cb25..6f01624f317f 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -195,9 +195,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return -ENODEV;
-
switch (cur_cpu_spec->oprofile_type) {
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_OPROFILE_CELL
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
new file mode 100644
index 000000000000..af3fac23768c
--- /dev/null
+++ b/arch/powerpc/perf/Makefile
@@ -0,0 +1,14 @@
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+obj-$(CONFIG_PERF_EVENTS) += callchain.o
+
+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
+obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
+ power5+-pmu.o power6-pmu.o power7-pmu.o
+obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
+
+obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
+obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
+
+obj-$(CONFIG_PPC64) += $(obj64-y)
+obj-$(CONFIG_PPC32) += $(obj32-y)
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/perf/callchain.c
index 564c1d8bdb5c..e8a18d1cc7c9 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -20,7 +20,7 @@
#include <asm/ucontext.h>
#include <asm/vdso.h>
#ifdef CONFIG_PPC64
-#include "ppc32.h"
+#include "../kernel/ppc32.h"
#endif
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/perf/core-book3s.c
index 10a140f82cb8..02aee03e713c 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -116,14 +116,45 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
*addrp = mfspr(SPRN_SDAR);
}
+static inline u32 perf_flags_from_msr(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return PERF_RECORD_MISC_USER;
+ if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
+ return PERF_RECORD_MISC_HYPERVISOR;
+ return PERF_RECORD_MISC_KERNEL;
+}
+
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
unsigned long sihv = MMCRA_SIHV;
unsigned long sipr = MMCRA_SIPR;
+ /* Not a PMU interrupt: Make up flags from regs->msr */
if (TRAP(regs) != 0xf00)
- return 0; /* not a PMU interrupt */
+ return perf_flags_from_msr(regs);
+
+ /*
+ * If we don't support continuous sampling and this
+ * is not a marked event, same deal
+ */
+ if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) &&
+ !(mmcra & MMCRA_SAMPLE_ENABLE))
+ return perf_flags_from_msr(regs);
+
+ /*
+ * If we don't have flags in MMCRA, rather than using
+ * the MSR, we intuit the flags from the address in
+ * SIAR which should give slightly more reliable
+ * results
+ */
+ if (ppmu->flags & PPMU_NO_SIPR) {
+ unsigned long siar = mfspr(SPRN_SIAR);
+ if (siar >= PAGE_OFFSET)
+ return PERF_RECORD_MISC_KERNEL;
+ return PERF_RECORD_MISC_USER;
+ }
if (ppmu->flags & PPMU_ALT_SIPR) {
sihv = POWER6_MMCRA_SIHV;
@@ -865,6 +896,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
+ unsigned long val;
if (!event->hw.idx || !event->hw.sample_period)
return;
@@ -880,7 +912,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
- write_pmc(event->hw.idx, left);
+
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+
+ write_pmc(event->hw.idx, val);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
@@ -1078,6 +1115,10 @@ static int power_pmu_event_init(struct perf_event *event)
if (!ppmu)
return -ENOENT;
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
@@ -1187,6 +1228,11 @@ static int power_pmu_event_init(struct perf_event *event)
return err;
}
+static int power_pmu_event_idx(struct perf_event *event)
+{
+ return event->hw.idx;
+}
+
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
@@ -1199,6 +1245,7 @@ struct pmu power_pmu = {
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
+ .event_idx = power_pmu_event_idx,
};
/*
@@ -1283,13 +1330,18 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long ip;
+ unsigned long mmcra = regs->dsisr;
+ /* Not a PMU interrupt */
if (TRAP(regs) != 0xf00)
- return regs->nip; /* not a PMU interrupt */
+ return regs->nip;
+
+ /* Processor doesn't support sampling non marked events */
+ if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) &&
+ !(mmcra & MMCRA_SAMPLE_ENABLE))
+ return regs->nip;
- ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
- return ip;
+ return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
}
static bool pmc_overflow(unsigned long val)
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 0a6d2a9d569c..0a6d2a9d569c 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index cb2e2949c8d1..cb2e2949c8d1 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index fe21b515ca44..fe21b515ca44 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c
index b4f1dda4d089..9103a1de864d 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/perf/power4-pmu.c
@@ -607,6 +607,7 @@ static struct power_pmu power4_pmu = {
.n_generic = ARRAY_SIZE(p4_generic_events),
.generic_events = p4_generic_events,
.cache_events = &power4_cache_events,
+ .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
static int __init init_power4_pmu(void)
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index a8757baa28f3..a8757baa28f3 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index e7f06eb7a861..e7f06eb7a861 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 0bbc901e7efc..31128e086fed 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -131,7 +131,7 @@ static u32 marked_bus_events[16] = {
0x00000022, /* BFP set 2: byte 0 bits 1, 5 */
0, 0
};
-
+
/*
* Returns 1 if event counts things relating to marked instructions
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 1251e4d7e262..1251e4d7e262 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 8c2190206964..20139ceeacf6 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -252,7 +252,7 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
alt[1] = event ^ 0x1000;
return 2;
}
-
+
return 1;
}
@@ -487,6 +487,7 @@ static struct power_pmu ppc970_pmu = {
.n_generic = ARRAY_SIZE(ppc970_generic_events),
.generic_events = ppc970_generic_events,
.cache_events = &ppc970_cache_events,
+ .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
static int __init init_ppc970_pmu(void)
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index fcf6bf2ceee9..2e4e64abfab4 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -23,6 +23,7 @@ config BLUESTONE
default n
select PPC44x_SIMPLE
select APM821xx
+ select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII
help
This option enables support for the APM APM821xx Evaluation board.
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index 3f6229b5dee0..583e67fee37e 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -83,7 +83,7 @@ static void __init ppc47x_init_irq(void)
* device-tree, just pass 0 to all arguments
*/
struct mpic *mpic =
- mpic_alloc(np, 0, 0, 0, 0, " MPIC ");
+ mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index 5b8cdbb82f80..a28a8629727e 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -71,8 +71,7 @@ static void __init iss4xx_init_irq(void)
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
- struct mpic *mpic = mpic_alloc(np, 0, 0, 0, 0,
- " MPIC ");
+ struct mpic *mpic = mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 8d2202763415..3ffb915446e3 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -52,7 +52,7 @@ machine_device_initcall(ppc44x_simple, ppc44x_device_probe);
static char *board[] __initdata = {
"amcc,arches",
"amcc,bamboo",
- "amcc,bluestone",
+ "apm,bluestone",
"amcc,glacier",
"ibm,ebony",
"amcc,eiger",
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 9f09319352c0..ca3a062ed1b9 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -21,7 +21,7 @@
#include <asm/prom.h>
static struct device_node *cpld_pic_node;
-static struct irq_host *cpld_pic_host;
+static struct irq_domain *cpld_pic_host;
/*
* Bits to ignore in the misc_status register
@@ -123,13 +123,13 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
}
static int
-cpld_pic_host_match(struct irq_host *h, struct device_node *node)
+cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
{
return cpld_pic_node == node;
}
static int
-cpld_pic_host_map(struct irq_host *h, unsigned int virq,
+cpld_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_status_flags(virq, IRQ_LEVEL);
@@ -137,8 +137,7 @@ cpld_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct
-irq_host_ops cpld_pic_host_ops = {
+static const struct irq_domain_ops cpld_pic_host_ops = {
.match = cpld_pic_host_match,
.map = cpld_pic_host_map,
};
@@ -191,8 +190,7 @@ mpc5121_ads_cpld_pic_init(void)
cpld_pic_node = of_node_get(np);
- cpld_pic_host =
- irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16);
+ cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL);
if (!cpld_pic_host) {
printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n");
goto end;
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 96f85e5e0cd3..17d91b7da315 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -45,7 +45,7 @@ static struct of_device_id mpc5200_gpio_ids[] __initdata = {
struct media5200_irq {
void __iomem *regs;
spinlock_t lock;
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
};
struct media5200_irq media5200_irq;
@@ -112,7 +112,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
raw_spin_unlock(&desc->lock);
}
-static int media5200_irq_map(struct irq_host *h, unsigned int virq,
+static int media5200_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
@@ -122,7 +122,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
+static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
@@ -136,7 +136,7 @@ static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops media5200_irq_ops = {
+static const struct irq_domain_ops media5200_irq_ops = {
.map = media5200_irq_map,
.xlate = media5200_irq_xlate,
};
@@ -173,15 +173,12 @@ static void __init media5200_init_irq(void)
spin_lock_init(&media5200_irq.lock);
- media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR,
- MEDIA5200_NUM_IRQS,
- &media5200_irq_ops, -1);
+ media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
+ MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
if (!media5200_irq.irqhost)
goto out;
pr_debug("%s: allocated irqhost\n", __func__);
- media5200_irq.irqhost->host_data = &media5200_irq;
-
irq_set_handler_data(cascade_virq, &media5200_irq);
irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 846b789fb195..c0aa04068d69 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void)
/* list of the supported boards */
static const char *board[] __initdata = {
+ "anonymous,a4m072",
"anon,charon",
"intercontrol,digsy-mtc",
"manroland,mucmc52",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 369fd5457a3f..d7e94f49532a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -98,13 +98,11 @@ struct mpc52xx_gpio_wkup __iomem *wkup_gpio;
* of the localplus bus to the of_platform
* bus.
*/
-void __init
-mpc52xx_declare_of_platform_devices(void)
+void __init mpc52xx_declare_of_platform_devices(void)
{
- /* Find every child of the SOC node and add it to of_platform */
- if (of_platform_bus_probe(NULL, mpc52xx_bus_ids, NULL))
- printk(KERN_ERR __FILE__ ": "
- "Error while probing of_platform bus\n");
+ /* Find all the 'platform' devices and register them. */
+ if (of_platform_populate(NULL, mpc52xx_bus_ids, NULL, NULL))
+ pr_err(__FILE__ ": Error while populating devices from DT\n");
}
/*
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index f94f06e52762..028470b95886 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -81,7 +81,7 @@ MODULE_LICENSE("GPL");
* @regs: virtual address of GPT registers
* @lock: spinlock to coordinate between different functions.
* @gc: gpio_chip instance structure; used when GPIO is enabled
- * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported
+ * @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported
* @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
* if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
* if the timer is actively used as wdt which blocks gpt functions
@@ -91,7 +91,7 @@ struct mpc52xx_gpt_priv {
struct device *dev;
struct mpc52xx_gpt __iomem *regs;
spinlock_t lock;
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
u32 ipb_freq;
u8 wdt_mode;
@@ -204,7 +204,7 @@ void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
}
}
-static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
+static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct mpc52xx_gpt_priv *gpt = h->host_data;
@@ -216,7 +216,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
+static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
@@ -236,7 +236,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops mpc52xx_gpt_irq_ops = {
+static const struct irq_domain_ops mpc52xx_gpt_irq_ops = {
.map = mpc52xx_gpt_irq_map,
.xlate = mpc52xx_gpt_irq_xlate,
};
@@ -252,14 +252,12 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
if (!cascade_virq)
return;
- gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1,
- &mpc52xx_gpt_irq_ops, -1);
+ gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
if (!gpt->irqhost) {
- dev_err(gpt->dev, "irq_alloc_host() failed\n");
+ dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
return;
}
- gpt->irqhost->host_data = gpt;
irq_set_handler_data(cascade_virq, gpt);
irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 1a9a49570579..8520b58a5e9a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -132,7 +132,7 @@ static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
static struct mpc52xx_intr __iomem *intr;
static struct mpc52xx_sdma __iomem *sdma;
-static struct irq_host *mpc52xx_irqhost = NULL;
+static struct irq_domain *mpc52xx_irqhost = NULL;
static unsigned char mpc52xx_map_senses[4] = {
IRQ_TYPE_LEVEL_HIGH,
@@ -301,7 +301,7 @@ static int mpc52xx_is_extirq(int l1, int l2)
/**
* mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
*/
-static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
+static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
@@ -335,7 +335,7 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
/**
* mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
*/
-static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
+static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t irq)
{
int l1irq;
@@ -384,7 +384,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops mpc52xx_irqhost_ops = {
+static const struct irq_domain_ops mpc52xx_irqhost_ops = {
.xlate = mpc52xx_irqhost_xlate,
.map = mpc52xx_irqhost_map,
};
@@ -444,9 +444,9 @@ void __init mpc52xx_init_irq(void)
* As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq
*/
- mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR,
+ mpc52xx_irqhost = irq_domain_add_linear(picnode,
MPC52xx_IRQ_HIGHTESTHWIRQ,
- &mpc52xx_irqhost_ops, -1);
+ &mpc52xx_irqhost_ops, NULL);
if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n");
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 8ccf9ed62fe2..328d221fd1c0 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -29,7 +29,7 @@ static DEFINE_RAW_SPINLOCK(pci_pic_lock);
struct pq2ads_pci_pic {
struct device_node *node;
- struct irq_host *host;
+ struct irq_domain *host;
struct {
u32 stat;
@@ -103,7 +103,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
}
}
-static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
+static int pci_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_status_flags(virq, IRQ_LEVEL);
@@ -112,14 +112,14 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops pci_pic_host_ops = {
+static const struct irq_domain_ops pci_pic_host_ops = {
.map = pci_pic_host_map,
};
int __init pq2ads_pci_init_irq(void)
{
struct pq2ads_pci_pic *priv;
- struct irq_host *host;
+ struct irq_domain *host;
struct device_node *np;
int ret = -ENODEV;
int irq;
@@ -156,17 +156,13 @@ int __init pq2ads_pci_init_irq(void)
out_be32(&priv->regs->mask, ~0);
mb();
- host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS,
- &pci_pic_host_ops, NUM_IRQS);
+ host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
if (!host) {
ret = -ENOMEM;
goto out_unmap_regs;
}
- host->host_data = priv;
-
priv->host = host;
- host->host_data = priv;
irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index d7946be298b6..f000d81c4e31 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -6,6 +6,7 @@ menuconfig FSL_SOC_BOOKE
select MPIC
select PPC_PCI_CHOICE
select FSL_PCI if PCI
+ select SERIAL_8250_EXTENDED if SERIAL_8250
select SERIAL_8250_SHARE_IRQ if SERIAL_8250
default y
@@ -13,6 +14,15 @@ if FSL_SOC_BOOKE
if PPC32
+config FSL_85XX_CACHE_SRAM
+ bool
+ select PPC_LIB_RHEAP
+ help
+ When selected, this option enables cache-sram support
+ for memory allocation on P1/P2 QorIQ platforms.
+ cache-sram-size and cache-sram-offset kernel boot
+ parameters should be passed when this option is enabled.
+
config MPC8540_ADS
bool "Freescale MPC8540 ADS"
select DEFAULT_UIMAGE
@@ -30,6 +40,7 @@ config MPC85xx_CDS
bool "Freescale MPC85xx CDS"
select DEFAULT_UIMAGE
select PPC_I8259
+ select HAS_RAPIDIO
help
This option enables support for the MPC85xx CDS board
@@ -80,7 +91,6 @@ config P1010_RDB
config P1022_DS
bool "Freescale P1022 DS"
select DEFAULT_UIMAGE
- select PHYS_64BIT # The DTS has 36-bit addresses
select SWIOTLB
help
This option enables support for the Freescale P1022DS reference board.
@@ -171,6 +181,21 @@ config SBC8560
help
This option enables support for the Wind River SBC8560 board
+config GE_IMP3A
+ bool "GE Intelligent Platforms IMP3A"
+ select DEFAULT_UIMAGE
+ select SWIOTLB
+ select MMIO_NVRAM
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select GE_FPGA
+ help
+ This option enables support for the GE Intelligent Platforms IMP3A
+ board.
+
+ This board is a 3U CompactPCI Single Board Computer with a Freescale
+ P2020 processor.
+
config P2041_RDB
bool "Freescale P2041 RDB"
select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 9cb2d4320dcc..2125d4ca068a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_SBC8548) += sbc8548.o
obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o
obj-$(CONFIG_KSI8560) += ksi8560.o
obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o
+obj-$(CONFIG_GE_IMP3A) += ge_imp3a.o
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 07e3e6c47371..df69e99e511c 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -36,8 +36,8 @@
void __init corenet_ds_pic_init(void)
{
struct mpic *mpic;
- unsigned int flags = MPIC_BIG_ENDIAN |
- MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU;
+ unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
+ MPIC_NO_RESET;
if (ppc_md.get_irq == mpic_get_coreint_irq)
flags |= MPIC_ENABLE_COREINT;
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
new file mode 100644
index 000000000000..d50056f424f6
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -0,0 +1,246 @@
+/*
+ * GE IMP3A Board Setup
+ *
+ * Author Martyn Welch <martyn.welch@ge.com>
+ *
+ * Copyright 2010 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Based on: mpc85xx_ds.c (MPC85xx DS Board Setup)
+ * Copyright 2007 Freescale Semiconductor Inc.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/memblock.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/swiotlb.h>
+#include <asm/nvram.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include "smp.h"
+
+#include "mpc85xx.h"
+#include <sysdev/ge/ge_pic.h>
+
+void __iomem *imp3a_regs;
+
+void __init ge_imp3a_pic_init(void)
+{
+ struct mpic *mpic;
+ struct device_node *np;
+ struct device_node *cascade_node = NULL;
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_NO_RESET |
+ MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+ } else {
+ mpic = mpic_alloc(NULL, 0,
+ MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+ }
+
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+ /*
+ * There is a simple interrupt handler in the main FPGA, this needs
+ * to be cascaded into the MPIC
+ */
+ for_each_node_by_type(np, "interrupt-controller")
+ if (of_device_is_compatible(np, "gef,fpga-pic-1.00")) {
+ cascade_node = np;
+ break;
+ }
+
+ if (cascade_node == NULL) {
+ printk(KERN_WARNING "IMP3A: No FPGA PIC\n");
+ return;
+ }
+
+ gef_pic_init(cascade_node);
+ of_node_put(cascade_node);
+}
+
+#ifdef CONFIG_PCI
+static int primary_phb_addr;
+#endif /* CONFIG_PCI */
+
+/*
+ * Setup the architecture
+ */
+static void __init ge_imp3a_setup_arch(void)
+{
+ struct device_node *regs;
+#ifdef CONFIG_PCI
+ struct device_node *np;
+ struct pci_controller *hose;
+#endif
+ dma_addr_t max = 0xffffffff;
+
+ if (ppc_md.progress)
+ ppc_md.progress("ge_imp3a_setup_arch()", 0);
+
+#ifdef CONFIG_PCI
+ for_each_node_by_type(np, "pci") {
+ if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
+ of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
+ of_device_is_compatible(np, "fsl,p2020-pcie")) {
+ struct resource rsrc;
+ of_address_to_resource(np, 0, &rsrc);
+ if ((rsrc.start & 0xfffff) == primary_phb_addr)
+ fsl_add_bridge(np, 1);
+ else
+ fsl_add_bridge(np, 0);
+
+ hose = pci_find_hose_for_OF_device(np);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
+ }
+#endif
+
+ mpc85xx_smp_init();
+
+#ifdef CONFIG_SWIOTLB
+ if (memblock_end_of_DRAM() > max) {
+ ppc_swiotlb_enable = 1;
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ }
+#endif
+
+ /* Remap basic board registers */
+ regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs");
+ if (regs) {
+ imp3a_regs = of_iomap(regs, 0);
+ if (imp3a_regs == NULL)
+ printk(KERN_WARNING "Unable to map board registers\n");
+ of_node_put(regs);
+ }
+
+#if defined(CONFIG_MMIO_NVRAM)
+ mmio_nvram_init();
+#endif
+
+ printk(KERN_INFO "GE Intelligent Platforms IMP3A 3U cPCI SBC\n");
+}
+
+/* Return the PCB revision */
+static unsigned int ge_imp3a_get_pcb_rev(void)
+{
+ unsigned int reg;
+
+ reg = ioread16(imp3a_regs);
+ return (reg >> 8) & 0xff;
+}
+
+/* Return the board (software) revision */
+static unsigned int ge_imp3a_get_board_rev(void)
+{
+ unsigned int reg;
+
+ reg = ioread16(imp3a_regs + 0x2);
+ return reg & 0xff;
+}
+
+/* Return the FPGA revision */
+static unsigned int ge_imp3a_get_fpga_rev(void)
+{
+ unsigned int reg;
+
+ reg = ioread16(imp3a_regs + 0x2);
+ return (reg >> 8) & 0xff;
+}
+
+/* Return compactPCI Geographical Address */
+static unsigned int ge_imp3a_get_cpci_geo_addr(void)
+{
+ unsigned int reg;
+
+ reg = ioread16(imp3a_regs + 0x6);
+ return (reg & 0x0f00) >> 8;
+}
+
+/* Return compactPCI System Controller Status */
+static unsigned int ge_imp3a_get_cpci_is_syscon(void)
+{
+ unsigned int reg;
+
+ reg = ioread16(imp3a_regs + 0x6);
+ return reg & (1 << 12);
+}
+
+static void ge_imp3a_show_cpuinfo(struct seq_file *m)
+{
+ seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
+
+ seq_printf(m, "Revision\t: %u%c\n", ge_imp3a_get_pcb_rev(),
+ ('A' + ge_imp3a_get_board_rev() - 1));
+
+ seq_printf(m, "FPGA Revision\t: %u\n", ge_imp3a_get_fpga_rev());
+
+ seq_printf(m, "cPCI geo. addr\t: %u\n", ge_imp3a_get_cpci_geo_addr());
+
+ seq_printf(m, "cPCI syscon\t: %s\n",
+ ge_imp3a_get_cpci_is_syscon() ? "yes" : "no");
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init ge_imp3a_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "ge,IMP3A")) {
+#ifdef CONFIG_PCI
+ primary_phb_addr = 0x9000;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+machine_device_initcall(ge_imp3a, mpc85xx_common_publish_devices);
+
+machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
+
+define_machine(ge_imp3a) {
+ .name = "GE_IMP3A",
+ .probe = ge_imp3a_probe,
+ .setup_arch = ge_imp3a_setup_arch,
+ .init_IRQ = ge_imp3a_pic_init,
+ .show_cpuinfo = ge_imp3a_show_cpuinfo,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 20f75d7819c6..60120e55da41 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -57,8 +57,7 @@ static void machine_restart(char *cmd)
static void __init ksi8560_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index cf266826682e..f58872688d8f 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -36,9 +36,7 @@
void __init mpc8536_ds_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 3bebb5173bfc..d19f675cb369 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -50,8 +50,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
static void __init mpc85xx_ads_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 40f03da616a9..ab5f0bf19454 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -3,7 +3,7 @@
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
- * Copyright 2005 Freescale Semiconductor Inc.
+ * Copyright 2005, 2011-2012 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -48,17 +48,24 @@
#include "mpc85xx.h"
-/* CADMUS info */
-/* xxx - galak, move into device tree */
-#define CADMUS_BASE (0xf8004000)
-#define CADMUS_SIZE (256)
-#define CM_VER (0)
-#define CM_CSR (1)
-#define CM_RST (2)
-
+/*
+ * The CDS board contains an FPGA/CPLD called "Cadmus", which collects
+ * various logic and performs system control functions.
+ * Here is the FPGA/CPLD register map.
+ */
+struct cadmus_reg {
+ u8 cm_ver; /* Board version */
+ u8 cm_csr; /* General control/status */
+ u8 cm_rst; /* Reset control */
+ u8 cm_hsclk; /* High speed clock */
+ u8 cm_hsxclk; /* High speed clock extended */
+ u8 cm_led; /* LED data */
+ u8 cm_pci; /* PCI control/status */
+ u8 cm_dma; /* DMA control */
+ u8 res[248]; /* Total 256 bytes */
+};
-static int cds_pci_slot = 2;
-static volatile u8 *cadmus;
+static struct cadmus_reg *cadmus;
#ifdef CONFIG_PCI
@@ -158,6 +165,33 @@ DECLARE_PCI_FIXUP_EARLY(0x1957, 0x3fff, skip_fake_bridge);
DECLARE_PCI_FIXUP_EARLY(0x3fff, 0x1957, skip_fake_bridge);
DECLARE_PCI_FIXUP_EARLY(0xff3f, 0x5719, skip_fake_bridge);
+#define PCI_DEVICE_ID_IDT_TSI310 0x01a7
+
+/*
+ * Fix Tsi310 PCI-X bridge resource.
+ * Force the bridge to open a window from 0x0000-0x1fff in PCI I/O space.
+ * This allows legacy I/O(i8259, etc) on the VIA southbridge to be accessed.
+ */
+void mpc85xx_cds_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev = bus->self;
+ struct resource *res = bus->resource[0];
+
+ if (dev != NULL &&
+ dev->vendor == PCI_VENDOR_ID_IBM &&
+ dev->device == PCI_DEVICE_ID_IDT_TSI310) {
+ if (res) {
+ res->start = 0;
+ res->end = 0x1fff;
+ res->flags = IORESOURCE_IO;
+ pr_info("mpc85xx_cds: PCI bridge resource fixup applied\n");
+ pr_info("mpc85xx_cds: %pR\n", res);
+ }
+ }
+
+ fsl_pcibios_fixup_bus(bus);
+}
+
#ifdef CONFIG_PPC_I8259
static void mpc85xx_8259_cascade_handler(unsigned int irq,
struct irq_desc *desc)
@@ -188,8 +222,7 @@ static struct irqaction mpc85xxcds_8259_irqaction = {
static void __init mpc85xx_cds_pic_init(void)
{
struct mpic *mpic;
- mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
@@ -249,20 +282,30 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
*/
static void __init mpc85xx_cds_setup_arch(void)
{
-#ifdef CONFIG_PCI
struct device_node *np;
-#endif
+ int cds_pci_slot;
if (ppc_md.progress)
ppc_md.progress("mpc85xx_cds_setup_arch()", 0);
- cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE);
- cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1;
+ np = of_find_compatible_node(NULL, NULL, "fsl,mpc8548cds-fpga");
+ if (!np) {
+ pr_err("Could not find FPGA node.\n");
+ return;
+ }
+
+ cadmus = of_iomap(np, 0);
+ of_node_put(np);
+ if (!cadmus) {
+ pr_err("Fail to map FPGA area.\n");
+ return;
+ }
if (ppc_md.progress) {
char buf[40];
+ cds_pci_slot = ((in_8(&cadmus->cm_csr) >> 6) & 0x3) + 1;
snprintf(buf, 40, "CDS Version = 0x%x in slot %d\n",
- cadmus[CM_VER], cds_pci_slot);
+ in_8(&cadmus->cm_ver), cds_pci_slot);
ppc_md.progress(buf, 0);
}
@@ -292,7 +335,8 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
- seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n", cadmus[CM_VER]);
+ seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n",
+ in_8(&cadmus->cm_ver));
seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
@@ -323,7 +367,7 @@ define_machine(mpc85xx_cds) {
.get_irq = mpic_get_irq,
#ifdef CONFIG_PCI
.restart = mpc85xx_cds_restart,
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_bus = mpc85xx_cds_fixup_bus,
#else
.restart = fsl_rstcr_restart,
#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index eefbb91e1d61..6e23e3e34bd9 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -72,13 +72,13 @@ void __init mpc85xx_ds_pic_init(void)
if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(NULL, 0,
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ MPIC_NO_RESET |
+ MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 1d15a0cd2c82..f33662b46b8d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,6 @@
/*
- * Copyright (C) Freescale Semicondutor, Inc. 2006-2010. All rights reserved.
+ * Copyright (C) 2006-2010, 2012 Freescale Semicondutor, Inc.
+ * All rights reserved.
*
* Author: Andy Fleming <afleming@freescale.com>
*
@@ -51,6 +52,7 @@
#include <asm/qe_ic.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
+#include <asm/fsl_guts.h>
#include "smp.h"
#include "mpc85xx.h"
@@ -268,34 +270,27 @@ static void __init mpc85xx_mds_qe_init(void)
mpc85xx_mds_reset_ucc_phys();
if (machine_is(p1021_mds)) {
-#define MPC85xx_PMUXCR_OFFSET 0x60
-#define MPC85xx_PMUXCR_QE0 0x00008000
-#define MPC85xx_PMUXCR_QE3 0x00001000
-#define MPC85xx_PMUXCR_QE9 0x00000040
-#define MPC85xx_PMUXCR_QE12 0x00000008
- static __be32 __iomem *pmuxcr;
- np = of_find_node_by_name(NULL, "global-utilities");
+ struct ccsr_guts_85xx __iomem *guts;
+ np = of_find_node_by_name(NULL, "global-utilities");
if (np) {
- pmuxcr = of_iomap(np, 0) + MPC85xx_PMUXCR_OFFSET;
-
- if (!pmuxcr)
- printk(KERN_EMERG "Error: Alternate function"
- " signal multiplex control register not"
- " mapped!\n");
- else
+ guts = of_iomap(np, 0);
+ if (!guts)
+ pr_err("mpc85xx-rdb: could not map global utilities register\n");
+ else{
/* P1021 has pins muxed for QE and other functions. To
* enable QE UEC mode, we need to set bit QE0 for UCC1
* in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
* and QE12 for QE MII management signals in PMUXCR
* register.
*/
- setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 |
- MPC85xx_PMUXCR_QE3 |
- MPC85xx_PMUXCR_QE9 |
- MPC85xx_PMUXCR_QE12);
-
+ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
+ MPC85xx_PMUXCR_QE(3) |
+ MPC85xx_PMUXCR_QE(9) |
+ MPC85xx_PMUXCR_QE(12));
+ iounmap(guts);
+ }
of_node_put(np);
}
@@ -434,9 +429,8 @@ machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier);
static void __init mpc85xx_mds_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
- MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index ccf520e890be..db214cd4c822 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -1,7 +1,7 @@
/*
* MPC85xx RDB Board Setup
*
- * Copyright 2009 Freescale Semiconductor Inc.
+ * Copyright 2009,2012 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,6 +26,9 @@
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+#include <asm/fsl_guts.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -47,21 +50,36 @@ void __init mpc85xx_rdb_pic_init(void)
struct mpic *mpic;
unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_QUICC_ENGINE
+ struct device_node *np;
+#endif
+
if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
- mpic = mpic_alloc(NULL, 0,
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET |
+ MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
}
BUG_ON(mpic == NULL);
mpic_init(mpic);
+
+#ifdef CONFIG_QUICC_ENGINE
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (np) {
+ qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
+ qe_ic_cascade_high_mpic);
+ of_node_put(np);
+
+ } else
+ pr_err("%s: Could not find qe-ic node\n", __func__);
+#endif
+
}
/*
@@ -69,7 +87,7 @@ void __init mpc85xx_rdb_pic_init(void)
*/
static void __init mpc85xx_rdb_setup_arch(void)
{
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE)
struct device_node *np;
#endif
@@ -85,11 +103,73 @@ static void __init mpc85xx_rdb_setup_arch(void)
#endif
mpc85xx_smp_init();
+
+#ifdef CONFIG_QUICC_ENGINE
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np) {
+ pr_err("%s: Could not find Quicc Engine node\n", __func__);
+ goto qe_fail;
+ }
+
+ qe_reset();
+ of_node_put(np);
+
+ np = of_find_node_by_name(NULL, "par_io");
+ if (np) {
+ struct device_node *ucc;
+
+ par_io_init(np);
+ of_node_put(np);
+
+ for_each_node_by_name(ucc, "ucc")
+ par_io_of_config(ucc);
+
+ }
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
+ if (machine_is(p1025_rdb)) {
+
+ struct ccsr_guts_85xx __iomem *guts;
+
+ np = of_find_node_by_name(NULL, "global-utilities");
+ if (np) {
+ guts = of_iomap(np, 0);
+ if (!guts) {
+
+ pr_err("mpc85xx-rdb: could not map global utilities register\n");
+
+ } else {
+ /* P1025 has pins muxed for QE and other functions. To
+ * enable QE UEC mode, we need to set bit QE0 for UCC1
+ * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
+ * and QE12 for QE MII management singals in PMUXCR
+ * register.
+ */
+ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
+ MPC85xx_PMUXCR_QE(3) |
+ MPC85xx_PMUXCR_QE(9) |
+ MPC85xx_PMUXCR_QE(12));
+ iounmap(guts);
+ }
+ of_node_put(np);
+ }
+
+ }
+#endif
+
+qe_fail:
+#endif /* CONFIG_QUICC_ENGINE */
+
printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
}
machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices);
+machine_device_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
+machine_device_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices);
+machine_device_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
+machine_device_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
+machine_device_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
+machine_device_initcall(p1025_rdb, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
@@ -112,6 +192,52 @@ static int __init p1020_rdb_probe(void)
return 0;
}
+static int __init p1020_rdb_pc_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC");
+}
+
+static int __init p1021_rdb_pc_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "fsl,P1021RDB-PC"))
+ return 1;
+ return 0;
+}
+
+static int __init p2020_rdb_pc_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "fsl,P2020RDB-PC"))
+ return 1;
+ return 0;
+}
+
+static int __init p1025_rdb_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,P1025RDB");
+}
+
+static int __init p1020_mbg_pc_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,P1020MBG-PC");
+}
+
+static int __init p1020_utm_pc_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,P1020UTM-PC");
+}
+
define_machine(p2020_rdb) {
.name = "P2020 RDB",
.probe = p2020_rdb_probe,
@@ -139,3 +265,87 @@ define_machine(p1020_rdb) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
+
+define_machine(p1021_rdb_pc) {
+ .name = "P1021 RDB-PC",
+ .probe = p1021_rdb_pc_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
+
+define_machine(p2020_rdb_pc) {
+ .name = "P2020RDB-PC",
+ .probe = p2020_rdb_pc_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
+
+define_machine(p1025_rdb) {
+ .name = "P1025 RDB",
+ .probe = p1025_rdb_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
+
+define_machine(p1020_mbg_pc) {
+ .name = "P1020 MBG-PC",
+ .probe = p1020_mbg_pc_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
+
+define_machine(p1020_utm_pc) {
+ .name = "P1020 UTM-PC",
+ .probe = p1020_utm_pc_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
+
+define_machine(p1020_rdb_pc) {
+ .name = "P1020RDB-PC",
+ .probe = p1020_rdb_pc_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 538bc3f57e9d..d8bd6563d9ca 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -32,9 +32,8 @@
void __init p1010_rdb_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
- MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index bb3d84f4046f..0fe88e39945e 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -25,6 +25,7 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <asm/udbg.h>
#include <asm/fsl_guts.h>
#include "smp.h"
@@ -32,6 +33,10 @@
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+#define PMUXCR_ELBCDIU_MASK 0xc0000000
+#define PMUXCR_ELBCDIU_NOR16 0x80000000
+#define PMUXCR_ELBCDIU_DIU 0x40000000
+
/*
* Board-specific initialization of the DIU. This code should probably be
* executed when the DIU is opened, rather than in arch code, but the DIU
@@ -49,11 +54,22 @@
#define CLKDVDR_PXCLK_MASK 0x00FF0000
/* Some ngPIXIS register definitions */
+#define PX_CTL 3
+#define PX_BRDCFG0 8
+#define PX_BRDCFG1 9
+
+#define PX_BRDCFG0_ELBC_SPI_MASK 0xc0
+#define PX_BRDCFG0_ELBC_SPI_ELBC 0x00
+#define PX_BRDCFG0_ELBC_SPI_NULL 0xc0
+#define PX_BRDCFG0_ELBC_DIU 0x02
+
#define PX_BRDCFG1_DVIEN 0x80
#define PX_BRDCFG1_DFPEN 0x40
#define PX_BRDCFG1_BACKLIGHT 0x20
#define PX_BRDCFG1_DDCEN 0x10
+#define PX_CTL_ALTACC 0x80
+
/*
* DIU Area Descriptor
*
@@ -132,44 +148,117 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
*/
static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
- struct device_node *np;
- void __iomem *pixis;
- u8 __iomem *brdcfg1;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
- if (!np)
- /* older device trees used "fsl,p1022ds-pixis" */
- np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
- if (!np) {
- pr_err("p1022ds: missing ngPIXIS node\n");
+ struct device_node *guts_node;
+ struct device_node *indirect_node = NULL;
+ struct ccsr_guts_85xx __iomem *guts;
+ u8 __iomem *lbc_lcs0_ba = NULL;
+ u8 __iomem *lbc_lcs1_ba = NULL;
+ u8 b;
+
+ /* Map the global utilities registers. */
+ guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
+ if (!guts_node) {
+ pr_err("p1022ds: missing global utilties device node\n");
return;
}
- pixis = of_iomap(np, 0);
- if (!pixis) {
- pr_err("p1022ds: could not map ngPIXIS registers\n");
- return;
+ guts = of_iomap(guts_node, 0);
+ if (!guts) {
+ pr_err("p1022ds: could not map global utilties device\n");
+ goto exit;
}
- brdcfg1 = pixis + 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */
+
+ indirect_node = of_find_compatible_node(NULL, NULL,
+ "fsl,p1022ds-indirect-pixis");
+ if (!indirect_node) {
+ pr_err("p1022ds: missing pixis indirect mode node\n");
+ goto exit;
+ }
+
+ lbc_lcs0_ba = of_iomap(indirect_node, 0);
+ if (!lbc_lcs0_ba) {
+ pr_err("p1022ds: could not map localbus chip select 0\n");
+ goto exit;
+ }
+
+ lbc_lcs1_ba = of_iomap(indirect_node, 1);
+ if (!lbc_lcs1_ba) {
+ pr_err("p1022ds: could not map localbus chip select 1\n");
+ goto exit;
+ }
+
+ /* Make sure we're in indirect mode first. */
+ if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
+ PMUXCR_ELBCDIU_DIU) {
+ struct device_node *pixis_node;
+ void __iomem *pixis;
+
+ pixis_node =
+ of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
+ if (!pixis_node) {
+ pr_err("p1022ds: missing pixis node\n");
+ goto exit;
+ }
+
+ pixis = of_iomap(pixis_node, 0);
+ of_node_put(pixis_node);
+ if (!pixis) {
+ pr_err("p1022ds: could not map pixis registers\n");
+ goto exit;
+ }
+
+ /* Enable indirect PIXIS mode. */
+ setbits8(pixis + PX_CTL, PX_CTL_ALTACC);
+ iounmap(pixis);
+
+ /* Switch the board mux to the DIU */
+ out_8(lbc_lcs0_ba, PX_BRDCFG0); /* BRDCFG0 */
+ b = in_8(lbc_lcs1_ba);
+ b |= PX_BRDCFG0_ELBC_DIU;
+ out_8(lbc_lcs1_ba, b);
+
+ /* Set the chip mux to DIU mode. */
+ clrsetbits_be32(&guts->pmuxcr, PMUXCR_ELBCDIU_MASK,
+ PMUXCR_ELBCDIU_DIU);
+ in_be32(&guts->pmuxcr);
+ }
+
switch (port) {
case FSL_DIU_PORT_DVI:
- printk(KERN_INFO "%s:%u\n", __func__, __LINE__);
/* Enable the DVI port, disable the DFP and the backlight */
- clrsetbits_8(brdcfg1, PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT,
- PX_BRDCFG1_DVIEN);
+ out_8(lbc_lcs0_ba, PX_BRDCFG1);
+ b = in_8(lbc_lcs1_ba);
+ b &= ~(PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT);
+ b |= PX_BRDCFG1_DVIEN;
+ out_8(lbc_lcs1_ba, b);
break;
case FSL_DIU_PORT_LVDS:
- printk(KERN_INFO "%s:%u\n", __func__, __LINE__);
+ /*
+ * LVDS also needs backlight enabled, otherwise the display
+ * will be blank.
+ */
/* Enable the DFP port, disable the DVI and the backlight */
- clrsetbits_8(brdcfg1, PX_BRDCFG1_DVIEN | PX_BRDCFG1_BACKLIGHT,
- PX_BRDCFG1_DFPEN);
+ out_8(lbc_lcs0_ba, PX_BRDCFG1);
+ b = in_8(lbc_lcs1_ba);
+ b &= ~PX_BRDCFG1_DVIEN;
+ b |= PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT;
+ out_8(lbc_lcs1_ba, b);
break;
default:
pr_err("p1022ds: unsupported monitor port %i\n", port);
}
- iounmap(pixis);
+exit:
+ if (lbc_lcs1_ba)
+ iounmap(lbc_lcs1_ba);
+ if (lbc_lcs0_ba)
+ iounmap(lbc_lcs0_ba);
+ if (guts)
+ iounmap(guts);
+
+ of_node_put(indirect_node);
+ of_node_put(guts_node);
}
/**
@@ -241,15 +330,56 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
void __init p1022_ds_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
}
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
+/*
+ * Disables a node in the device tree.
+ *
+ * This function is called before kmalloc() is available, so the 'new' object
+ * should be allocated in the global area. The easiest way is to do that is
+ * to allocate one static local variable for each call to this function.
+ */
+static void __init disable_one_node(struct device_node *np, struct property *new)
+{
+ struct property *old;
+
+ old = of_find_property(np, new->name, NULL);
+ if (old)
+ prom_update_property(np, new, old);
+ else
+ prom_add_property(np, new);
+}
+
+/* TRUE if there is a "video=fslfb" command-line parameter. */
+static bool fslfb;
+
+/*
+ * Search for a "video=fslfb" command-line parameter, and set 'fslfb' to
+ * true if we find it.
+ *
+ * We need to use early_param() instead of __setup() because the normal
+ * __setup() gets called to late. However, early_param() gets called very
+ * early, before the device tree is unflattened, so all we can do now is set a
+ * global variable. Later on, p1022_ds_setup_arch() will use that variable
+ * to determine if we need to update the device tree.
+ */
+static int __init early_video_setup(char *options)
+{
+ fslfb = (strncmp(options, "fslfb:", 6) == 0);
+
+ return 0;
+}
+early_param("video", early_video_setup);
+
+#endif
+
/*
* Setup the architecture
*/
@@ -287,6 +417,34 @@ static void __init p1022_ds_setup_arch(void)
diu_ops.set_monitor_port = p1022ds_set_monitor_port;
diu_ops.set_pixel_clock = p1022ds_set_pixel_clock;
diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
+
+ /*
+ * Disable the NOR flash node if there is video=fslfb... command-line
+ * parameter. When the DIU is active, NOR flash is unavailable, so we
+ * have to disable the node before the MTD driver loads.
+ */
+ if (fslfb) {
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
+
+ if (np) {
+ np = of_find_compatible_node(np, NULL, "cfi-flash");
+ if (np) {
+ static struct property nor_status = {
+ .name = "status",
+ .value = "disabled",
+ .length = sizeof("disabled"),
+ };
+
+ pr_info("p1022ds: disabling %s node",
+ np->full_name);
+ disable_one_node(np, &nor_status);
+ of_node_put(np);
+ }
+ }
+
+ }
+
#endif
mpc85xx_smp_init();
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index d951e7027bb6..6b07398e4369 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -93,9 +93,8 @@ machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices);
static void __init mpc85xx_rds_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
- MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 184a50784617..1677b8a22677 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -54,8 +54,7 @@ static int sbc_rev;
static void __init sbc8548_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 940752e93051..3c3bbcc27566 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -41,8 +41,7 @@
static void __init sbc8560_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index 18f635906b27..b71919217756 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -48,8 +48,7 @@ static void __init socrates_pic_init(void)
{
struct device_node *np;
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 12cb9bb2cc68..3bbbf7489487 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -51,7 +51,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
static void __iomem *socrates_fpga_pic_iobase;
-static struct irq_host *socrates_fpga_pic_irq_host;
+static struct irq_domain *socrates_fpga_pic_irq_host;
static unsigned int socrates_fpga_irqs[3];
static inline uint32_t socrates_fpga_pic_read(int reg)
@@ -227,7 +227,7 @@ static struct irq_chip socrates_fpga_pic_chip = {
.irq_set_type = socrates_fpga_pic_set_type,
};
-static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
+static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
@@ -238,7 +238,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int socrates_fpga_pic_host_xlate(struct irq_host *h,
+static int socrates_fpga_pic_host_xlate(struct irq_domain *h,
struct device_node *ct, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -269,7 +269,7 @@ static int socrates_fpga_pic_host_xlate(struct irq_host *h,
return 0;
}
-static struct irq_host_ops socrates_fpga_pic_host_ops = {
+static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
.map = socrates_fpga_pic_host_map,
.xlate = socrates_fpga_pic_host_xlate,
};
@@ -279,10 +279,9 @@ void socrates_fpga_pic_init(struct device_node *pic)
unsigned long flags;
int i;
- /* Setup an irq_host structure */
- socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_HOST_MAP_LINEAR,
- SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops,
- SOCRATES_FPGA_NUM_IRQS);
+ /* Setup an irq_domain structure */
+ socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
+ SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
if (socrates_fpga_pic_irq_host == NULL) {
pr_err("FPGA PIC: Unable to allocate host\n");
return;
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index e9e5234b4e76..27ca3a7b04ab 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -48,8 +48,7 @@
static void __init stx_gp3_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index bf7c89fb75bb..d7504cefe016 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -47,7 +47,7 @@
static void __init tqm85xx_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 3a69f8b77de6..503c21596c63 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -43,9 +43,7 @@
void __init xes_mpc85xx_pic_init(void)
{
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET |
- MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 8d6599d54ea6..7a6279e38213 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -39,6 +39,7 @@ config GEF_PPC9A
select MMIO_NVRAM
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select GE_FPGA
help
This option enables support for the GE PPC9A.
@@ -48,6 +49,7 @@ config GEF_SBC310
select MMIO_NVRAM
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select GE_FPGA
help
This option enables support for the GE SBC310.
@@ -57,6 +59,7 @@ config GEF_SBC610
select MMIO_NVRAM
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
+ select GE_FPGA
select HAS_RAPIDIO
help
This option enables support for the GE SBC610.
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 4b0d7b1aa005..ede815d6489d 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_SMP) += mpc86xx_smp.o
obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
obj-$(CONFIG_SBC8641D) += sbc8641d.o
obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
-gef-gpio-$(CONFIG_GPIOLIB) += gef_gpio.o
-obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o $(gef-gpio-y)
-obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o gef_pic.o $(gef-gpio-y)
-obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o gef_pic.o $(gef-gpio-y)
+obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o
+obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o
+obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o
diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c
deleted file mode 100644
index 2a703365e664..000000000000
--- a/arch/powerpc/platforms/86xx/gef_gpio.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Driver for GE FPGA based GPIO
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- *
- * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-/* TODO
- *
- * Configuration of output modes (totem-pole/open-drain)
- * Interrupt configuration - interrupts are always generated the FPGA relies on
- * the I/O interrupt controllers mask to stop them propergating
- */
-
-#include <linux/kernel.h>
-#include <linux/compiler.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#define GEF_GPIO_DIRECT 0x00
-#define GEF_GPIO_IN 0x04
-#define GEF_GPIO_OUT 0x08
-#define GEF_GPIO_TRIG 0x0C
-#define GEF_GPIO_POLAR_A 0x10
-#define GEF_GPIO_POLAR_B 0x14
-#define GEF_GPIO_INT_STAT 0x18
-#define GEF_GPIO_OVERRUN 0x1C
-#define GEF_GPIO_MODE 0x20
-
-static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
-{
- unsigned int data;
-
- data = ioread32be(reg);
- /* value: 0=low; 1=high */
- if (value & 0x1)
- data = data | (0x1 << offset);
- else
- data = data & ~(0x1 << offset);
-
- iowrite32be(data, reg);
-}
-
-
-static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
-{
- unsigned int data;
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
- data = data | (0x1 << offset);
- iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
-
- return 0;
-}
-
-static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
-{
- unsigned int data;
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- /* Set direction before switching to input */
- _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
-
- data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
- data = data & ~(0x1 << offset);
- iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
-
- return 0;
-}
-
-static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- unsigned int data;
- int state = 0;
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- data = ioread32be(mmchip->regs + GEF_GPIO_IN);
- state = (int)((data >> offset) & 0x1);
-
- return state;
-}
-
-static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
-
- _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
-}
-
-static int __init gef_gpio_init(void)
-{
- struct device_node *np;
- int retval;
- struct of_mm_gpio_chip *gef_gpio_chip;
-
- for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
-
- pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
-
- /* Allocate chip structure */
- gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
- if (!gef_gpio_chip) {
- pr_err("%s: Unable to allocate structure\n",
- np->full_name);
- continue;
- }
-
- /* Setup pointers to chip functions */
- gef_gpio_chip->gc.of_gpio_n_cells = 2;
- gef_gpio_chip->gc.ngpio = 19;
- gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
- gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
- gef_gpio_chip->gc.get = gef_gpio_get;
- gef_gpio_chip->gc.set = gef_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = of_mm_gpiochip_add(np, gef_gpio_chip);
- if (retval) {
- kfree(gef_gpio_chip);
- pr_err("%s: Unable to add GPIO\n", np->full_name);
- }
- }
-
- for_each_compatible_node(np, NULL, "gef,sbc310-gpio") {
-
- pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
-
- /* Allocate chip structure */
- gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
- if (!gef_gpio_chip) {
- pr_err("%s: Unable to allocate structure\n",
- np->full_name);
- continue;
- }
-
- /* Setup pointers to chip functions */
- gef_gpio_chip->gc.of_gpio_n_cells = 2;
- gef_gpio_chip->gc.ngpio = 6;
- gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
- gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
- gef_gpio_chip->gc.get = gef_gpio_get;
- gef_gpio_chip->gc.set = gef_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = of_mm_gpiochip_add(np, gef_gpio_chip);
- if (retval) {
- kfree(gef_gpio_chip);
- pr_err("%s: Unable to add GPIO\n", np->full_name);
- }
- }
-
- return 0;
-};
-arch_initcall(gef_gpio_init);
-
-MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 60ce07e39100..ed58b6cfd60c 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -37,9 +37,9 @@
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/ge/ge_pic.h>
#include "mpc86xx.h"
-#include "gef_pic.h"
#undef DEBUG
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 3ecee25bf3ed..710db69bd523 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -37,9 +37,9 @@
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/ge/ge_pic.h>
#include "mpc86xx.h"
-#include "gef_pic.h"
#undef DEBUG
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 5090d608d9ee..4a13d2f4ac20 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -37,9 +37,9 @@
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/ge/ge_pic.h>
#include "mpc86xx.h"
-#include "gef_pic.h"
#undef DEBUG
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 52bbfa031531..22cc3571ae19 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -37,9 +37,8 @@ void __init mpc86xx_init_irq(void)
int cascade_irq;
#endif
- struct mpic *mpic = mpic_alloc(NULL, 0,
- MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
- MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
0, 256, " MPIC ");
BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index ee56a9ea6a79..1fb0b3cddeb3 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -26,6 +26,7 @@ config MPC86XADS
config MPC885ADS
bool "MPC885ADS"
select CPM1
+ select OF_DYNAMIC
help
Freescale Semiconductor MPC885 Application Development System (ADS).
Also known as DUET.
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 0cfb46d54b8c..a35ca44ade66 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -2,7 +2,6 @@ menu "Platform support"
source "arch/powerpc/platforms/powernv/Kconfig"
source "arch/powerpc/platforms/pseries/Kconfig"
-source "arch/powerpc/platforms/iseries/Kconfig"
source "arch/powerpc/platforms/chrp/Kconfig"
source "arch/powerpc/platforms/512x/Kconfig"
source "arch/powerpc/platforms/52xx/Kconfig"
@@ -87,6 +86,14 @@ config MPIC_WEIRD
bool
default n
+config MPIC_MSGR
+ bool "MPIC message register support"
+ depends on MPIC
+ default n
+ help
+ Enables support for the MPIC message registers. These
+ registers are used for inter-processor communication.
+
config PPC_I8259
bool
default n
@@ -138,7 +145,7 @@ config MPIC_BROKEN_REGREAD
of the register contents in software.
config IBMVIO
- depends on PPC_PSERIES || PPC_ISERIES
+ depends on PPC_PSERIES
bool
default y
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 2635a22bade2..879b4a448498 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
obj-$(CONFIG_PPC_86xx) += 86xx/
obj-$(CONFIG_PPC_POWERNV) += powernv/
obj-$(CONFIG_PPC_PSERIES) += pseries/
-obj-$(CONFIG_PPC_ISERIES) += iseries/
obj-$(CONFIG_PPC_MAPLE) += maple/
obj-$(CONFIG_PPC_PASEMI) += pasemi/
obj-$(CONFIG_PPC_CELL) += cell/
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 40a6e34793b4..db360fc4cf0e 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -67,7 +67,7 @@
struct axon_msic {
- struct irq_host *irq_host;
+ struct irq_domain *irq_domain;
__le32 *fifo_virt;
dma_addr_t fifo_phys;
dcr_host_t dcr_host;
@@ -152,7 +152,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
static struct axon_msic *find_msi_translator(struct pci_dev *dev)
{
- struct irq_host *irq_host;
+ struct irq_domain *irq_domain;
struct device_node *dn, *tmp;
const phandle *ph;
struct axon_msic *msic = NULL;
@@ -184,14 +184,14 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
goto out_error;
}
- irq_host = irq_find_host(dn);
- if (!irq_host) {
- dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n",
+ irq_domain = irq_find_host(dn);
+ if (!irq_domain) {
+ dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
dn->full_name);
goto out_error;
}
- msic = irq_host->host_data;
+ msic = irq_domain->host_data;
out_error:
of_node_put(dn);
@@ -280,7 +280,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
BUILD_BUG_ON(NR_IRQS > 65536);
list_for_each_entry(entry, &dev->msi_list, list) {
- virq = irq_create_direct_mapping(msic->irq_host);
+ virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) {
dev_warn(&dev->dev,
"axon_msi: virq allocation failed!\n");
@@ -318,7 +318,7 @@ static struct irq_chip msic_irq_chip = {
.name = "AXON-MSI",
};
-static int msic_host_map(struct irq_host *h, unsigned int virq,
+static int msic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_data(virq, h->host_data);
@@ -327,7 +327,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops msic_host_ops = {
+static const struct irq_domain_ops msic_host_ops = {
.map = msic_host_map,
};
@@ -337,7 +337,7 @@ static void axon_msi_shutdown(struct platform_device *device)
u32 tmp;
pr_devel("axon_msi: disabling %s\n",
- msic->irq_host->of_node->full_name);
+ msic->irq_domain->of_node->full_name);
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
@@ -392,16 +392,13 @@ static int axon_msi_probe(struct platform_device *device)
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
- msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
- NR_IRQS, &msic_host_ops, 0);
- if (!msic->irq_host) {
- printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
+ msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
+ if (!msic->irq_domain) {
+ printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name);
goto out_free_fifo;
}
- msic->irq_host->host_data = msic;
-
irq_set_handler_data(virq, msic);
irq_set_chained_handler(virq, axon_msi_cascade);
pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 2516c1cf8467..943c9d39aa16 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -95,7 +95,6 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
unsigned long lpar_rc;
u64 hpte_v, hpte_r, slot;
- /* same as iseries */
if (vflags & HPTE_V_SECONDARY)
return -1;
@@ -319,7 +318,6 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
unsigned long lpar_rc;
u64 hpte_v, hpte_r, slot;
- /* same as iseries */
if (vflags & HPTE_V_SECONDARY)
return -1;
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 55015e1f6939..e5c3a2c6090d 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -34,7 +34,7 @@ static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
-static struct irq_host *beatic_host;
+static struct irq_domain *beatic_host;
/*
* In this implementation, "virq" == "IRQ plug number",
@@ -122,7 +122,7 @@ static struct irq_chip beatic_pic = {
*
* Note that the number (virq) is already assigned at upper layer.
*/
-static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
+static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
{
beat_destruct_irq_plug(virq);
}
@@ -133,7 +133,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
*
* Note that the number (virq) is already assigned at upper layer.
*/
-static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
+static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
int64_t err;
@@ -154,7 +154,7 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
* Called from irq_create_of_mapping() only.
* Note: We have only 1 entry to translate.
*/
-static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
+static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
@@ -166,13 +166,13 @@ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static int beatic_pic_host_match(struct irq_host *h, struct device_node *np)
+static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
{
/* Match all */
return 1;
}
-static struct irq_host_ops beatic_pic_host_ops = {
+static const struct irq_domain_ops beatic_pic_host_ops = {
.map = beatic_pic_host_map,
.unmap = beatic_pic_host_unmap,
.xlate = beatic_pic_host_xlate,
@@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void)
ppc_md.get_irq = beatic_get_irq;
/* Allocate an irq host */
- beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
- &beatic_pic_host_ops,
- 0);
+ beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
BUG_ON(beatic_host == NULL);
irq_set_default_host(beatic_host);
}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 96a433dd2d64..2d42f3bb66d6 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -56,7 +56,7 @@ struct iic {
static DEFINE_PER_CPU(struct iic, cpu_iic);
#define IIC_NODE_COUNT 2
-static struct irq_host *iic_host;
+static struct irq_domain *iic_host;
/* Convert between "pending" bits and hw irq number */
static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
@@ -186,7 +186,7 @@ void iic_message_pass(int cpu, int msg)
out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
}
-struct irq_host *iic_get_irq_host(int node)
+struct irq_domain *iic_get_irq_host(int node)
{
return iic_host;
}
@@ -222,13 +222,13 @@ void iic_request_IPIs(void)
#endif /* CONFIG_SMP */
-static int iic_host_match(struct irq_host *h, struct device_node *node)
+static int iic_host_match(struct irq_domain *h, struct device_node *node)
{
return of_device_is_compatible(node,
"IBM,CBEA-Internal-Interrupt-Controller");
}
-static int iic_host_map(struct irq_host *h, unsigned int virq,
+static int iic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
switch (hw & IIC_IRQ_TYPE_MASK) {
@@ -245,7 +245,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
+static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -285,7 +285,7 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops iic_host_ops = {
+static const struct irq_domain_ops iic_host_ops = {
.match = iic_host_match,
.map = iic_host_map,
.xlate = iic_host_xlate,
@@ -378,8 +378,8 @@ static int __init setup_iic(void)
void __init iic_init_IRQ(void)
{
/* Setup an irq host data structure */
- iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
- &iic_host_ops, IIC_IRQ_INVALID);
+ iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
+ NULL);
BUG_ON(iic_host == NULL);
irq_set_default_host(iic_host);
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 62002a7edfed..fa3e294fd343 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -197,7 +197,8 @@ static void __init mpic_init_IRQ(void)
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
- mpic = mpic_alloc(dn, 0, MPIC_SECONDARY, 0, 0, " MPIC ");
+ mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET,
+ 0, 0, " MPIC ");
if (mpic == NULL)
continue;
mpic_init(mpic);
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 442c28c00f88..d8b7cc8a66ca 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -62,7 +62,7 @@ enum {
#define SPIDER_IRQ_INVALID 63
struct spider_pic {
- struct irq_host *host;
+ struct irq_domain *host;
void __iomem *regs;
unsigned int node_id;
};
@@ -168,7 +168,7 @@ static struct irq_chip spider_pic = {
.irq_set_type = spider_set_irq_type,
};
-static int spider_host_map(struct irq_host *h, unsigned int virq,
+static int spider_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_data(virq, h->host_data);
@@ -180,7 +180,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
+static int spider_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -194,7 +194,7 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops spider_host_ops = {
+static const struct irq_domain_ops spider_host_ops = {
.map = spider_host_map,
.xlate = spider_host_xlate,
};
@@ -299,12 +299,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
panic("spider_pic: can't map registers !");
/* Allocate a host */
- pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR,
- SPIDER_SRC_COUNT, &spider_host_ops,
- SPIDER_IRQ_INVALID);
+ pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
+ &spider_host_ops, pic);
if (pic->host == NULL)
panic("spider_pic: can't allocate irq host !");
- pic->host->host_data = pic;
/* Go through all sources and disable them */
for (i = 0; i < SPIDER_SRC_COUNT; i++) {
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index d4a094ca96f3..1d75c92ea8fb 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -646,6 +646,7 @@ long spufs_create(struct path *path, struct dentry *dentry,
out:
mutex_unlock(&path->dentry->d_inode->i_mutex);
+ dput(dentry);
return ret;
}
@@ -757,9 +758,9 @@ spufs_create_root(struct super_block *sb, void *data)
goto out_iput;
ret = -ENOMEM;
- sb->s_root = d_alloc_root(inode);
+ sb->s_root = d_make_root(inode);
if (!sb->s_root)
- goto out_iput;
+ goto out;
return 0;
out_iput:
@@ -828,19 +829,19 @@ static int __init spufs_init(void)
ret = spu_sched_init();
if (ret)
goto out_cache;
- ret = register_filesystem(&spufs_type);
+ ret = register_spu_syscalls(&spufs_calls);
if (ret)
goto out_sched;
- ret = register_spu_syscalls(&spufs_calls);
+ ret = register_filesystem(&spufs_type);
if (ret)
- goto out_fs;
+ goto out_syscalls;
spufs_init_isolated_loader();
return 0;
-out_fs:
- unregister_filesystem(&spufs_type);
+out_syscalls:
+ unregister_spu_syscalls(&spufs_calls);
out_sched:
spu_sched_exit();
out_cache:
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 8591bb62d7fc..5665dcc382c7 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -70,8 +70,6 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
ret = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
ret = spufs_create(&path, dentry, flags, mode, neighbor);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- dput(dentry);
path_put(&path);
}
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index f1f17bb2c33c..c665d7de6c99 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -435,7 +435,8 @@ static void __init chrp_find_openpic(void)
if (len > 1)
isu_size = iranges[3];
- chrp_mpic = mpic_alloc(np, opaddr, 0, isu_size, 0, " MPIC ");
+ chrp_mpic = mpic_alloc(np, opaddr, MPIC_NO_RESET,
+ isu_size, 0, " MPIC ");
if (chrp_mpic == NULL) {
printk(KERN_ERR "Failed to allocate MPIC structure\n");
goto bail;
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index f61a2dd96b99..53d6eee01963 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -96,9 +96,9 @@ static struct irq_chip flipper_pic = {
*
*/
-static struct irq_host *flipper_irq_host;
+static struct irq_domain *flipper_irq_host;
-static int flipper_pic_map(struct irq_host *h, unsigned int virq,
+static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(virq, h->host_data);
@@ -107,13 +107,13 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int flipper_pic_match(struct irq_host *h, struct device_node *np)
+static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
{
return 1;
}
-static struct irq_host_ops flipper_irq_host_ops = {
+static const struct irq_domain_ops flipper_irq_domain_ops = {
.map = flipper_pic_map,
.match = flipper_pic_match,
};
@@ -130,10 +130,10 @@ static void __flipper_quiesce(void __iomem *io_base)
out_be32(io_base + FLIPPER_ICR, 0xffffffff);
}
-struct irq_host * __init flipper_pic_init(struct device_node *np)
+struct irq_domain * __init flipper_pic_init(struct device_node *np)
{
struct device_node *pi;
- struct irq_host *irq_host = NULL;
+ struct irq_domain *irq_domain = NULL;
struct resource res;
void __iomem *io_base;
int retval;
@@ -159,17 +159,15 @@ struct irq_host * __init flipper_pic_init(struct device_node *np)
__flipper_quiesce(io_base);
- irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS,
- &flipper_irq_host_ops, -1);
- if (!irq_host) {
- pr_err("failed to allocate irq_host\n");
+ irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
+ &flipper_irq_domain_ops, io_base);
+ if (!irq_domain) {
+ pr_err("failed to allocate irq_domain\n");
return NULL;
}
- irq_host->host_data = io_base;
-
out:
- return irq_host;
+ return irq_domain;
}
unsigned int flipper_pic_get_irq(void)
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index e4919170c6bc..3006b5117ec6 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -89,9 +89,9 @@ static struct irq_chip hlwd_pic = {
*
*/
-static struct irq_host *hlwd_irq_host;
+static struct irq_domain *hlwd_irq_host;
-static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
+static int hlwd_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(virq, h->host_data);
@@ -100,11 +100,11 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops hlwd_irq_host_ops = {
+static const struct irq_domain_ops hlwd_irq_domain_ops = {
.map = hlwd_pic_map,
};
-static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
+static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
{
void __iomem *io_base = h->host_data;
int irq;
@@ -123,14 +123,14 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irq_host *irq_host = irq_get_handler_data(cascade_virq);
+ struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq);
unsigned int virq;
raw_spin_lock(&desc->lock);
chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
raw_spin_unlock(&desc->lock);
- virq = __hlwd_pic_get_irq(irq_host);
+ virq = __hlwd_pic_get_irq(irq_domain);
if (virq != NO_IRQ)
generic_handle_irq(virq);
else
@@ -155,9 +155,9 @@ static void __hlwd_quiesce(void __iomem *io_base)
out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
}
-struct irq_host *hlwd_pic_init(struct device_node *np)
+struct irq_domain *hlwd_pic_init(struct device_node *np)
{
- struct irq_host *irq_host;
+ struct irq_domain *irq_domain;
struct resource res;
void __iomem *io_base;
int retval;
@@ -177,15 +177,14 @@ struct irq_host *hlwd_pic_init(struct device_node *np)
__hlwd_quiesce(io_base);
- irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS,
- &hlwd_irq_host_ops, -1);
- if (!irq_host) {
- pr_err("failed to allocate irq_host\n");
+ irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
+ &hlwd_irq_domain_ops, io_base);
+ if (!irq_domain) {
+ pr_err("failed to allocate irq_domain\n");
return NULL;
}
- irq_host->host_data = io_base;
- return irq_host;
+ return irq_domain;
}
unsigned int hlwd_pic_get_irq(void)
@@ -200,7 +199,7 @@ unsigned int hlwd_pic_get_irq(void)
void hlwd_pic_probe(void)
{
- struct irq_host *host;
+ struct irq_domain *host;
struct device_node *np;
const u32 *interrupts;
int cascade_virq;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 9cfcf20c0560..ab51b21b4bd7 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -154,11 +154,9 @@ static void __init holly_init_IRQ(void)
struct device_node *cascade_node = NULL;
#endif
- mpic = mpic_alloc(NULL, 0,
- MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
+ mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
- 24,
- NR_IRQS-4, /* num_sources used */
+ 24, 0,
"Tsi108_PIC");
BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index bcfad92c9cec..455e7c087422 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -82,8 +82,7 @@ static void __init linkstation_init_IRQ(void)
{
struct mpic *mpic;
- mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET,
- 4, 32, " EPIC ");
+ mpic = mpic_alloc(NULL, 0, 0, 4, 0, " EPIC ");
BUG_ON(mpic == NULL);
/* PCI IRQs */
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index f3350d786f5b..74ccce36baed 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -108,11 +108,9 @@ static void __init mpc7448_hpc2_init_IRQ(void)
struct device_node *cascade_node = NULL;
#endif
- mpic = mpic_alloc(NULL, 0,
- MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
+ mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
- 24,
- NR_IRQS-4, /* num_sources used */
+ 24, 0,
"Tsi108_PIC");
BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index afa638834965..e0ed3c71d69b 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -84,8 +84,7 @@ static void __init storcenter_init_IRQ(void)
{
struct mpic *mpic;
- mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET,
- 16, 32, " OpenPIC ");
+ mpic = mpic_alloc(NULL, 0, 0, 16, 0, " OpenPIC ");
BUG_ON(mpic == NULL);
/*
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
deleted file mode 100644
index b57cda3a0817..000000000000
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ /dev/null
@@ -1,38 +0,0 @@
-config PPC_ISERIES
- bool "IBM Legacy iSeries"
- depends on PPC64 && PPC_BOOK3S
- select PPC_SMP_MUXED_IPI
- select PPC_INDIRECT_PIO
- select PPC_INDIRECT_MMIO
- select PPC_PCI_CHOICE if EXPERT
-
-menu "iSeries device drivers"
- depends on PPC_ISERIES
-
-config VIODASD
- tristate "iSeries Virtual I/O disk support"
- depends on BLOCK
- select VIOPATH
- help
- If you are running on an iSeries system and you want to use
- virtual disks created and managed by OS/400, say Y.
-
-config VIOCD
- tristate "iSeries Virtual I/O CD support"
- depends on BLOCK
- select VIOPATH
- help
- If you are running Linux on an IBM iSeries system and you want to
- read a CD drive owned by OS/400, say Y here.
-
-config VIOTAPE
- tristate "iSeries Virtual Tape Support"
- select VIOPATH
- help
- If you are running Linux on an iSeries system and you want Linux
- to read and/or write a tape drive owned by OS/400, say Y here.
-
-endmenu
-
-config VIOPATH
- bool
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
deleted file mode 100644
index a7602b11ed9d..000000000000
--- a/arch/powerpc/platforms/iseries/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-ccflags-y := -mno-minimal-toc
-
-obj-y += exception.o
-obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \
- hvcall.o proc.o htab.o iommu.o misc.o irq.o
-obj-$(CONFIG_PCI) += pci.o
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_VIOPATH) += viopath.o vio.o
-obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
deleted file mode 100644
index 8d95fe4b554e..000000000000
--- a/arch/powerpc/platforms/iseries/call_hpt.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
-#define _PLATFORMS_ISERIES_CALL_HPT_H
-
-/*
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from the OS.
- */
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/mmu.h>
-
-#define HvCallHptGetHptAddress HvCallHpt + 0
-#define HvCallHptGetHptPages HvCallHpt + 1
-#define HvCallHptSetPp HvCallHpt + 5
-#define HvCallHptSetSwBits HvCallHpt + 6
-#define HvCallHptUpdate HvCallHpt + 7
-#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
-#define HvCallHptGet HvCallHpt + 11
-#define HvCallHptFindNextValid HvCallHpt + 12
-#define HvCallHptFindValid HvCallHpt + 13
-#define HvCallHptAddValidate HvCallHpt + 16
-#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
-
-
-static inline u64 HvCallHpt_getHptAddress(void)
-{
- return HvCall0(HvCallHptGetHptAddress);
-}
-
-static inline u64 HvCallHpt_getHptPages(void)
-{
- return HvCall0(HvCallHptGetHptPages);
-}
-
-static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
-{
- HvCall2(HvCallHptSetPp, hpteIndex, value);
-}
-
-static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff)
-{
- HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff);
-}
-
-static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
-{
- HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
-}
-
-static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
- u8 bitsoff)
-{
- u64 compressedStatus;
-
- compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet,
- hpteIndex, bitson, bitsoff, 1);
- HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
- return compressedStatus;
-}
-
-static inline u64 HvCallHpt_findValid(struct hash_pte *hpte, u64 vpn)
-{
- return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
-}
-
-static inline u64 HvCallHpt_findNextValid(struct hash_pte *hpte, u32 hpteIndex,
- u8 bitson, u8 bitsoff)
-{
- return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
- bitson, bitsoff);
-}
-
-static inline void HvCallHpt_get(struct hash_pte *hpte, u32 hpteIndex)
-{
- HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
-}
-
-static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit,
- struct hash_pte *hpte)
-{
- HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
-}
-
-#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h
deleted file mode 100644
index dbdf69850ed9..000000000000
--- a/arch/powerpc/platforms/iseries/call_pci.h
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Provides the Hypervisor PCI calls for iSeries Linux Parition.
- * Copyright (C) 2001 <Wayne G Holm> <IBM Corporation>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
- *
- * Change Activity:
- * Created, Jan 9, 2001
- */
-
-#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
-#define _PLATFORMS_ISERIES_CALL_PCI_H
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-/*
- * DSA == Direct Select Address
- * this struct must be 64 bits in total
- */
-struct HvCallPci_DsaAddr {
- u16 busNumber; /* PHB index? */
- u8 subBusNumber; /* PCI bus number? */
- u8 deviceId; /* device and function? */
- u8 barNumber;
- u8 reserved[3];
-};
-
-union HvDsaMap {
- u64 DsaAddr;
- struct HvCallPci_DsaAddr Dsa;
-};
-
-struct HvCallPci_LoadReturn {
- u64 rc;
- u64 value;
-};
-
-enum HvCallPci_DeviceType {
- HvCallPci_NodeDevice = 1,
- HvCallPci_SpDevice = 2,
- HvCallPci_IopDevice = 3,
- HvCallPci_BridgeDevice = 4,
- HvCallPci_MultiFunctionDevice = 5,
- HvCallPci_IoaDevice = 6
-};
-
-
-struct HvCallPci_DeviceInfo {
- u32 deviceType; /* See DeviceType enum for values */
-};
-
-struct HvCallPci_BusUnitInfo {
- u32 sizeReturned; /* length of data returned */
- u32 deviceType; /* see DeviceType enum for values */
-};
-
-struct HvCallPci_BridgeInfo {
- struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */
- u8 subBusNumber; /* Bus number of secondary bus */
- u8 maxAgents; /* Max idsels on secondary bus */
- u8 maxSubBusNumber; /* Max Sub Bus */
- u8 logicalSlotNumber; /* Logical Slot Number for IOA */
-};
-
-
-/*
- * Maximum BusUnitInfo buffer size. Provided for clients so
- * they can allocate a buffer big enough for any type of bus
- * unit. Increase as needed.
- */
-enum {HvCallPci_MaxBusUnitInfoSize = 128};
-
-struct HvCallPci_BarParms {
- u64 vaddr;
- u64 raddr;
- u64 size;
- u64 protectStart;
- u64 protectEnd;
- u64 relocationOffset;
- u64 pciAddress;
- u64 reserved[3];
-};
-
-enum HvCallPci_VpdType {
- HvCallPci_BusVpd = 1,
- HvCallPci_BusAdapterVpd = 2
-};
-
-#define HvCallPciConfigLoad8 HvCallPci + 0
-#define HvCallPciConfigLoad16 HvCallPci + 1
-#define HvCallPciConfigLoad32 HvCallPci + 2
-#define HvCallPciConfigStore8 HvCallPci + 3
-#define HvCallPciConfigStore16 HvCallPci + 4
-#define HvCallPciConfigStore32 HvCallPci + 5
-#define HvCallPciEoi HvCallPci + 16
-#define HvCallPciGetBarParms HvCallPci + 18
-#define HvCallPciMaskFisr HvCallPci + 20
-#define HvCallPciUnmaskFisr HvCallPci + 21
-#define HvCallPciSetSlotReset HvCallPci + 25
-#define HvCallPciGetDeviceInfo HvCallPci + 27
-#define HvCallPciGetCardVpd HvCallPci + 28
-#define HvCallPciBarLoad8 HvCallPci + 40
-#define HvCallPciBarLoad16 HvCallPci + 41
-#define HvCallPciBarLoad32 HvCallPci + 42
-#define HvCallPciBarLoad64 HvCallPci + 43
-#define HvCallPciBarStore8 HvCallPci + 44
-#define HvCallPciBarStore16 HvCallPci + 45
-#define HvCallPciBarStore32 HvCallPci + 46
-#define HvCallPciBarStore64 HvCallPci + 47
-#define HvCallPciMaskInterrupts HvCallPci + 48
-#define HvCallPciUnmaskInterrupts HvCallPci + 49
-#define HvCallPciGetBusUnitInfo HvCallPci + 50
-
-static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
- u8 deviceId, u32 offset, u16 *value)
-{
- struct HvCallPci_DsaAddr dsa;
- struct HvCallPci_LoadReturn retVal;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumber;
- dsa.subBusNumber = subBusNumber;
- dsa.deviceId = deviceId;
-
- HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
-
- *value = retVal.value;
-
- return retVal.rc;
-}
-
-static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
- u8 deviceId, u32 offset, u32 *value)
-{
- struct HvCallPci_DsaAddr dsa;
- struct HvCallPci_LoadReturn retVal;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumber;
- dsa.subBusNumber = subBusNumber;
- dsa.deviceId = deviceId;
-
- HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
-
- *value = retVal.value;
-
- return retVal.rc;
-}
-
-static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
- u8 deviceId, u32 offset, u8 value)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumber;
- dsa.subBusNumber = subBusNumber;
- dsa.deviceId = deviceId;
-
- return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
-}
-
-static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm)
-{
- struct HvCallPci_DsaAddr dsa;
- struct HvCallPci_LoadReturn retVal;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
-
- HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
-
- return retVal.rc;
-}
-
-static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
- dsa.barNumber = barNumberParm;
-
- return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
-}
-
-static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm, u64 fisrMask)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
-
- return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
-}
-
-static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm, u64 fisrMask)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
-
- return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
-}
-
-static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
- u8 deviceNumberParm, u64 parms, u32 sizeofParms)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceNumberParm << 4;
-
- return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
-}
-
-static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm, u64 interruptMask)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
-
- return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
-}
-
-static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm, u64 interruptMask)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
-
- return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
-}
-
-static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm,
- u8 deviceIdParm, u64 parms, u32 sizeofParms)
-{
- struct HvCallPci_DsaAddr dsa;
-
- *((u64*)&dsa) = 0;
-
- dsa.busNumber = busNumberParm;
- dsa.subBusNumber = subBusParm;
- dsa.deviceId = deviceIdParm;
-
- return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms,
- sizeofParms);
-}
-
-static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
- u16 sizeParm)
-{
- u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
- sizeParm, HvCallPci_BusVpd);
- if (xRc == -1)
- return -1;
- else
- return xRc & 0xFFFF;
-}
-
-#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/call_sm.h b/arch/powerpc/platforms/iseries/call_sm.h
deleted file mode 100644
index c7e251619f48..000000000000
--- a/arch/powerpc/platforms/iseries/call_sm.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ISERIES_CALL_SM_H
-#define _ISERIES_CALL_SM_H
-
-/*
- * This file contains the "hypervisor call" interface which is used to
- * drive the hypervisor from the OS.
- */
-
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
-
-static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
- u64 indexIntoBitMap)
-{
- return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
-}
-
-#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
deleted file mode 100644
index f0491cc28900..000000000000
--- a/arch/powerpc/platforms/iseries/dt.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- * Copyright (C) 2005-2006 Michael Ellerman, IBM Corporation
- * Copyright (C) 2000-2004, IBM Corporation
- *
- * Description:
- * This file contains all the routines to build a flattened device
- * tree for a legacy iSeries machine.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci_regs.h>
-#include <linux/pci_ids.h>
-#include <linux/threads.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/if_ether.h> /* ETH_ALEN */
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/lppaca.h>
-#include <asm/cputable.h>
-#include <asm/abs_addr.h>
-#include <asm/system.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_call_xm.h>
-#include <asm/udbg.h>
-
-#include "processor_vpd.h"
-#include "call_hpt.h"
-#include "call_pci.h"
-#include "pci.h"
-#include "it_exp_vpd_panel.h"
-#include "naca.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/*
- * These are created by the linker script at the start and end
- * of the section containing all the strings marked with the DS macro.
- */
-extern char __dt_strings_start[];
-extern char __dt_strings_end[];
-
-#define DS(s) ({ \
- static const char __s[] __attribute__((section(".dt_strings"))) = s; \
- __s; \
-})
-
-struct iseries_flat_dt {
- struct boot_param_header header;
- u64 reserve_map[2];
-};
-
-static void * __initdata dt_data;
-
-/*
- * Putting these strings here keeps them out of the .dt_strings section
- * that we capture for the strings blob of the flattened device tree.
- */
-static char __initdata device_type_cpu[] = "cpu";
-static char __initdata device_type_memory[] = "memory";
-static char __initdata device_type_serial[] = "serial";
-static char __initdata device_type_network[] = "network";
-static char __initdata device_type_pci[] = "pci";
-static char __initdata device_type_vdevice[] = "vdevice";
-static char __initdata device_type_vscsi[] = "vscsi";
-
-
-/* EBCDIC to ASCII conversion routines */
-
-static unsigned char __init e2a(unsigned char x)
-{
- switch (x) {
- case 0x81 ... 0x89:
- return x - 0x81 + 'a';
- case 0x91 ... 0x99:
- return x - 0x91 + 'j';
- case 0xA2 ... 0xA9:
- return x - 0xA2 + 's';
- case 0xC1 ... 0xC9:
- return x - 0xC1 + 'A';
- case 0xD1 ... 0xD9:
- return x - 0xD1 + 'J';
- case 0xE2 ... 0xE9:
- return x - 0xE2 + 'S';
- case 0xF0 ... 0xF9:
- return x - 0xF0 + '0';
- }
- return ' ';
-}
-
-static unsigned char * __init strne2a(unsigned char *dest,
- const unsigned char *src, size_t n)
-{
- int i;
-
- n = strnlen(src, n);
-
- for (i = 0; i < n; i++)
- dest[i] = e2a(src[i]);
-
- return dest;
-}
-
-static struct iseries_flat_dt * __init dt_init(void)
-{
- struct iseries_flat_dt *dt;
- unsigned long str_len;
-
- str_len = __dt_strings_end - __dt_strings_start;
- dt = (struct iseries_flat_dt *)ALIGN(klimit, 8);
- dt->header.off_mem_rsvmap =
- offsetof(struct iseries_flat_dt, reserve_map);
- dt->header.off_dt_strings = ALIGN(sizeof(*dt), 8);
- dt->header.off_dt_struct = dt->header.off_dt_strings
- + ALIGN(str_len, 8);
- dt_data = (void *)((unsigned long)dt + dt->header.off_dt_struct);
- dt->header.dt_strings_size = str_len;
-
- /* There is no notion of hardware cpu id on iSeries */
- dt->header.boot_cpuid_phys = smp_processor_id();
-
- memcpy((char *)dt + dt->header.off_dt_strings, __dt_strings_start,
- str_len);
-
- dt->header.magic = OF_DT_HEADER;
- dt->header.version = 0x10;
- dt->header.last_comp_version = 0x10;
-
- dt->reserve_map[0] = 0;
- dt->reserve_map[1] = 0;
-
- return dt;
-}
-
-static void __init dt_push_u32(struct iseries_flat_dt *dt, u32 value)
-{
- *((u32 *)dt_data) = value;
- dt_data += sizeof(u32);
-}
-
-#ifdef notyet
-static void __init dt_push_u64(struct iseries_flat_dt *dt, u64 value)
-{
- *((u64 *)dt_data) = value;
- dt_data += sizeof(u64);
-}
-#endif
-
-static void __init dt_push_bytes(struct iseries_flat_dt *dt, const char *data,
- int len)
-{
- memcpy(dt_data, data, len);
- dt_data += ALIGN(len, 4);
-}
-
-static void __init dt_start_node(struct iseries_flat_dt *dt, const char *name)
-{
- dt_push_u32(dt, OF_DT_BEGIN_NODE);
- dt_push_bytes(dt, name, strlen(name) + 1);
-}
-
-#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
-
-static void __init __dt_prop(struct iseries_flat_dt *dt, const char *name,
- const void *data, int len)
-{
- unsigned long offset;
-
- dt_push_u32(dt, OF_DT_PROP);
-
- /* Length of the data */
- dt_push_u32(dt, len);
-
- offset = name - __dt_strings_start;
-
- /* The offset of the properties name in the string blob. */
- dt_push_u32(dt, (u32)offset);
-
- /* The actual data. */
- dt_push_bytes(dt, data, len);
-}
-#define dt_prop(dt, name, data, len) __dt_prop((dt), DS(name), (data), (len))
-
-#define dt_prop_str(dt, name, data) \
- dt_prop((dt), name, (data), strlen((data)) + 1); /* + 1 for NULL */
-
-static void __init __dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
- u32 data)
-{
- __dt_prop(dt, name, &data, sizeof(u32));
-}
-#define dt_prop_u32(dt, name, data) __dt_prop_u32((dt), DS(name), (data))
-
-static void __init __maybe_unused __dt_prop_u64(struct iseries_flat_dt *dt,
- const char *name, u64 data)
-{
- __dt_prop(dt, name, &data, sizeof(u64));
-}
-#define dt_prop_u64(dt, name, data) __dt_prop_u64((dt), DS(name), (data))
-
-#define dt_prop_u64_list(dt, name, data, n) \
- dt_prop((dt), name, (data), sizeof(u64) * (n))
-
-#define dt_prop_u32_list(dt, name, data, n) \
- dt_prop((dt), name, (data), sizeof(u32) * (n))
-
-#define dt_prop_empty(dt, name) dt_prop((dt), name, NULL, 0)
-
-static void __init dt_cpus(struct iseries_flat_dt *dt)
-{
- unsigned char buf[32];
- unsigned char *p;
- unsigned int i, index;
- struct IoHriProcessorVpd *d;
- u32 pft_size[2];
-
- /* yuck */
- snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
- p = strchr(buf, ' ');
- if (!p) p = buf + strlen(buf);
-
- dt_start_node(dt, "cpus");
- dt_prop_u32(dt, "#address-cells", 1);
- dt_prop_u32(dt, "#size-cells", 0);
-
- pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
- pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
-
- for (i = 0; i < NR_LPPACAS; i++) {
- if (lppaca[i].dyn_proc_status >= 2)
- continue;
-
- snprintf(p, 32 - (p - buf), "@%d", i);
- dt_start_node(dt, buf);
-
- dt_prop_str(dt, "device_type", device_type_cpu);
-
- index = lppaca[i].dyn_hv_phys_proc_index;
- d = &xIoHriProcessorVpd[index];
-
- dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
- dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
-
- dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
- dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
-
- /* magic conversions to Hz copied from old code */
- dt_prop_u32(dt, "clock-frequency",
- ((1UL << 34) * 1000000) / d->xProcFreq);
- dt_prop_u32(dt, "timebase-frequency",
- ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
-
- dt_prop_u32(dt, "reg", i);
-
- dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
-
- dt_end_node(dt);
- }
-
- dt_end_node(dt);
-}
-
-static void __init dt_model(struct iseries_flat_dt *dt)
-{
- char buf[16] = "IBM,";
-
- /* N.B. lparcfg.c knows about the "IBM," prefixes ... */
- /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
- strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
- strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
- buf[11] = '\0';
- dt_prop_str(dt, "system-id", buf);
-
- /* "IBM," + machineType[0:4] */
- strne2a(buf + 4, xItExtVpdPanel.machineType, 4);
- buf[8] = '\0';
- dt_prop_str(dt, "model", buf);
-
- dt_prop_str(dt, "compatible", "IBM,iSeries");
- dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
-}
-
-static void __init dt_initrd(struct iseries_flat_dt *dt)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (naca.xRamDisk) {
- dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
- dt_prop_u64(dt, "linux,initrd-end",
- (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
- }
-#endif
-}
-
-static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
- const char *name, u32 reg, int unit,
- const char *type, const char *compat, int end)
-{
- char buf[32];
-
- snprintf(buf, 32, "%s@%08x", name, reg + ((unit >= 0) ? unit : 0));
- dt_start_node(dt, buf);
- dt_prop_str(dt, "device_type", type);
- if (compat)
- dt_prop_str(dt, "compatible", compat);
- dt_prop_u32(dt, "reg", reg + ((unit >= 0) ? unit : 0));
- if (unit >= 0)
- dt_prop_u32(dt, "linux,unit_address", unit);
- if (end)
- dt_end_node(dt);
-}
-
-static void __init dt_vdevices(struct iseries_flat_dt *dt)
-{
- u32 reg = 0;
- HvLpIndexMap vlan_map;
- int i;
-
- dt_start_node(dt, "vdevice");
- dt_prop_str(dt, "device_type", device_type_vdevice);
- dt_prop_str(dt, "compatible", "IBM,iSeries-vdevice");
- dt_prop_u32(dt, "#address-cells", 1);
- dt_prop_u32(dt, "#size-cells", 0);
-
- dt_do_vdevice(dt, "vty", reg, -1, device_type_serial,
- "IBM,iSeries-vty", 1);
- reg++;
-
- dt_do_vdevice(dt, "v-scsi", reg, -1, device_type_vscsi,
- "IBM,v-scsi", 1);
- reg++;
-
- vlan_map = HvLpConfig_getVirtualLanIndexMap();
- for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
- unsigned char mac_addr[ETH_ALEN];
-
- if ((vlan_map & (0x8000 >> i)) == 0)
- continue;
- dt_do_vdevice(dt, "l-lan", reg, i, device_type_network,
- "IBM,iSeries-l-lan", 0);
- mac_addr[0] = 0x02;
- mac_addr[1] = 0x01;
- mac_addr[2] = 0xff;
- mac_addr[3] = i;
- mac_addr[4] = 0xff;
- mac_addr[5] = HvLpConfig_getLpIndex_outline();
- dt_prop(dt, "local-mac-address", (char *)mac_addr, ETH_ALEN);
- dt_prop(dt, "mac-address", (char *)mac_addr, ETH_ALEN);
- dt_prop_u32(dt, "max-frame-size", 9000);
- dt_prop_u32(dt, "address-bits", 48);
-
- dt_end_node(dt);
- }
-
- dt_end_node(dt);
-}
-
-struct pci_class_name {
- u16 code;
- const char *name;
- const char *type;
-};
-
-static struct pci_class_name __initdata pci_class_name[] = {
- { PCI_CLASS_NETWORK_ETHERNET, "ethernet", device_type_network },
-};
-
-static struct pci_class_name * __init dt_find_pci_class_name(u16 class_code)
-{
- struct pci_class_name *cp;
-
- for (cp = pci_class_name;
- cp < &pci_class_name[ARRAY_SIZE(pci_class_name)]; cp++)
- if (cp->code == class_code)
- return cp;
- return NULL;
-}
-
-/*
- * This assumes that the node slot is always on the primary bus!
- */
-static void __init scan_bridge_slot(struct iseries_flat_dt *dt,
- HvBusNumber bus, struct HvCallPci_BridgeInfo *bridge_info)
-{
- HvSubBusNumber sub_bus = bridge_info->subBusNumber;
- u16 vendor_id;
- u16 device_id;
- u32 class_id;
- int err;
- char buf[32];
- u32 reg[5];
- int id_sel = ISERIES_GET_DEVICE_FROM_SUBBUS(sub_bus);
- int function = ISERIES_GET_FUNCTION_FROM_SUBBUS(sub_bus);
- HvAgentId eads_id_sel = ISERIES_PCI_AGENTID(id_sel, function);
- u8 devfn;
- struct pci_class_name *cp;
-
- /*
- * Connect all functions of any device found.
- */
- for (id_sel = 1; id_sel <= bridge_info->maxAgents; id_sel++) {
- for (function = 0; function < 8; function++) {
- HvAgentId agent_id = ISERIES_PCI_AGENTID(id_sel,
- function);
- err = HvCallXm_connectBusUnit(bus, sub_bus,
- agent_id, 0);
- if (err) {
- if (err != 0x302)
- DBG("connectBusUnit(%x, %x, %x) %x\n",
- bus, sub_bus, agent_id, err);
- continue;
- }
-
- err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
- PCI_VENDOR_ID, &vendor_id);
- if (err) {
- DBG("ReadVendor(%x, %x, %x) %x\n",
- bus, sub_bus, agent_id, err);
- continue;
- }
- err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
- PCI_DEVICE_ID, &device_id);
- if (err) {
- DBG("ReadDevice(%x, %x, %x) %x\n",
- bus, sub_bus, agent_id, err);
- continue;
- }
- err = HvCallPci_configLoad32(bus, sub_bus, agent_id,
- PCI_CLASS_REVISION , &class_id);
- if (err) {
- DBG("ReadClass(%x, %x, %x) %x\n",
- bus, sub_bus, agent_id, err);
- continue;
- }
-
- devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(eads_id_sel),
- function);
- cp = dt_find_pci_class_name(class_id >> 16);
- if (cp && cp->name)
- strncpy(buf, cp->name, sizeof(buf) - 1);
- else
- snprintf(buf, sizeof(buf), "pci%x,%x",
- vendor_id, device_id);
- buf[sizeof(buf) - 1] = '\0';
- snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
- "@%x", PCI_SLOT(devfn));
- buf[sizeof(buf) - 1] = '\0';
- if (function != 0)
- snprintf(buf + strlen(buf),
- sizeof(buf) - strlen(buf),
- ",%x", function);
- dt_start_node(dt, buf);
- reg[0] = (bus << 16) | (devfn << 8);
- reg[1] = 0;
- reg[2] = 0;
- reg[3] = 0;
- reg[4] = 0;
- dt_prop_u32_list(dt, "reg", reg, 5);
- if (cp && (cp->type || cp->name))
- dt_prop_str(dt, "device_type",
- cp->type ? cp->type : cp->name);
- dt_prop_u32(dt, "vendor-id", vendor_id);
- dt_prop_u32(dt, "device-id", device_id);
- dt_prop_u32(dt, "class-code", class_id >> 8);
- dt_prop_u32(dt, "revision-id", class_id & 0xff);
- dt_prop_u32(dt, "linux,subbus", sub_bus);
- dt_prop_u32(dt, "linux,agent-id", agent_id);
- dt_prop_u32(dt, "linux,logical-slot-number",
- bridge_info->logicalSlotNumber);
- dt_end_node(dt);
-
- }
- }
-}
-
-static void __init scan_bridge(struct iseries_flat_dt *dt, HvBusNumber bus,
- HvSubBusNumber sub_bus, int id_sel)
-{
- struct HvCallPci_BridgeInfo bridge_info;
- HvAgentId agent_id;
- int function;
- int ret;
-
- /* Note: hvSubBus and irq is always be 0 at this level! */
- for (function = 0; function < 8; ++function) {
- agent_id = ISERIES_PCI_AGENTID(id_sel, function);
- ret = HvCallXm_connectBusUnit(bus, sub_bus, agent_id, 0);
- if (ret != 0) {
- if (ret != 0xb)
- DBG("connectBusUnit(%x, %x, %x) %x\n",
- bus, sub_bus, agent_id, ret);
- continue;
- }
- DBG("found device at bus %d idsel %d func %d (AgentId %x)\n",
- bus, id_sel, function, agent_id);
- ret = HvCallPci_getBusUnitInfo(bus, sub_bus, agent_id,
- iseries_hv_addr(&bridge_info),
- sizeof(struct HvCallPci_BridgeInfo));
- if (ret != 0)
- continue;
- DBG("bridge info: type %x subbus %x "
- "maxAgents %x maxsubbus %x logslot %x\n",
- bridge_info.busUnitInfo.deviceType,
- bridge_info.subBusNumber,
- bridge_info.maxAgents,
- bridge_info.maxSubBusNumber,
- bridge_info.logicalSlotNumber);
- if (bridge_info.busUnitInfo.deviceType ==
- HvCallPci_BridgeDevice)
- scan_bridge_slot(dt, bus, &bridge_info);
- else
- DBG("PCI: Invalid Bridge Configuration(0x%02X)",
- bridge_info.busUnitInfo.deviceType);
- }
-}
-
-static void __init scan_phb(struct iseries_flat_dt *dt, HvBusNumber bus)
-{
- struct HvCallPci_DeviceInfo dev_info;
- const HvSubBusNumber sub_bus = 0; /* EADs is always 0. */
- int err;
- int id_sel;
- const int max_agents = 8;
-
- /*
- * Probe for EADs Bridges
- */
- for (id_sel = 1; id_sel < max_agents; ++id_sel) {
- err = HvCallPci_getDeviceInfo(bus, sub_bus, id_sel,
- iseries_hv_addr(&dev_info),
- sizeof(struct HvCallPci_DeviceInfo));
- if (err) {
- if (err != 0x302)
- DBG("getDeviceInfo(%x, %x, %x) %x\n",
- bus, sub_bus, id_sel, err);
- continue;
- }
- if (dev_info.deviceType != HvCallPci_NodeDevice) {
- DBG("PCI: Invalid System Configuration"
- "(0x%02X) for bus 0x%02x id 0x%02x.\n",
- dev_info.deviceType, bus, id_sel);
- continue;
- }
- scan_bridge(dt, bus, sub_bus, id_sel);
- }
-}
-
-static void __init dt_pci_devices(struct iseries_flat_dt *dt)
-{
- HvBusNumber bus;
- char buf[32];
- u32 buses[2];
- int phb_num = 0;
-
- /* Check all possible buses. */
- for (bus = 0; bus < 256; bus++) {
- int err = HvCallXm_testBus(bus);
-
- if (err) {
- /*
- * Check for Unexpected Return code, a clue that
- * something has gone wrong.
- */
- if (err != 0x0301)
- DBG("Unexpected Return on Probe(0x%02X) "
- "0x%04X\n", bus, err);
- continue;
- }
- DBG("bus %d appears to exist\n", bus);
- snprintf(buf, 32, "pci@%d", phb_num);
- dt_start_node(dt, buf);
- dt_prop_str(dt, "device_type", device_type_pci);
- dt_prop_str(dt, "compatible", "IBM,iSeries-Logical-PHB");
- dt_prop_u32(dt, "#address-cells", 3);
- dt_prop_u32(dt, "#size-cells", 2);
- buses[0] = buses[1] = bus;
- dt_prop_u32_list(dt, "bus-range", buses, 2);
- scan_phb(dt, bus);
- dt_end_node(dt);
- phb_num++;
- }
-}
-
-static void dt_finish(struct iseries_flat_dt *dt)
-{
- dt_push_u32(dt, OF_DT_END);
- dt->header.totalsize = (unsigned long)dt_data - (unsigned long)dt;
- klimit = ALIGN((unsigned long)dt_data, 8);
-}
-
-void * __init build_flat_dt(unsigned long phys_mem_size)
-{
- struct iseries_flat_dt *iseries_dt;
- u64 tmp[2];
-
- iseries_dt = dt_init();
-
- dt_start_node(iseries_dt, "");
-
- dt_prop_u32(iseries_dt, "#address-cells", 2);
- dt_prop_u32(iseries_dt, "#size-cells", 2);
- dt_model(iseries_dt);
-
- /* /memory */
- dt_start_node(iseries_dt, "memory@0");
- dt_prop_str(iseries_dt, "device_type", device_type_memory);
- tmp[0] = 0;
- tmp[1] = phys_mem_size;
- dt_prop_u64_list(iseries_dt, "reg", tmp, 2);
- dt_end_node(iseries_dt);
-
- /* /chosen */
- dt_start_node(iseries_dt, "chosen");
- dt_prop_str(iseries_dt, "bootargs", cmd_line);
- dt_initrd(iseries_dt);
- dt_end_node(iseries_dt);
-
- dt_cpus(iseries_dt);
-
- dt_vdevices(iseries_dt);
- dt_pci_devices(iseries_dt);
-
- dt_end_node(iseries_dt);
-
- dt_finish(iseries_dt);
-
- return iseries_dt;
-}
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
deleted file mode 100644
index f519ee17ff7d..000000000000
--- a/arch/powerpc/platforms/iseries/exception.S
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Low level routines for legacy iSeries support.
- *
- * Extracted from head_64.S
- *
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
- * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
- * Adapted for Power Macintosh by Paul Mackerras.
- * Low-level exception handlers and MMU support
- * rewritten by Paul Mackerras.
- * Copyright (C) 1996 Paul Mackerras.
- *
- * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
- * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
- *
- * This file contains the low-level support and setup for the
- * PowerPC-64 platform, including trap and interrupt dispatch.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/reg.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/ptrace.h>
-#include <asm/cputable.h>
-#include <asm/mmu.h>
-
-#include "exception.h"
-
- .text
-
- .globl system_reset_iSeries
-system_reset_iSeries:
- bl .relative_toc
- mfspr r13,SPRN_SPRG3 /* Get alpaca address */
- LOAD_REG_ADDR(r23, alpaca)
- li r0,ALPACA_SIZE
- sub r23,r13,r23
- divdu r24,r23,r0 /* r24 has cpu number */
- cmpwi 0,r24,0 /* Are we processor 0? */
- bne 1f
- LOAD_REG_ADDR(r13, boot_paca)
- mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
- mfmsr r23
- ori r23,r23,MSR_RI
- mtmsrd r23 /* RI on */
- b .__start_initialization_iSeries /* Start up the first processor */
-1: mfspr r4,SPRN_CTRLF
- li r5,CTRL_RUNLATCH /* Turn off the run light */
- andc r4,r4,r5
- mtspr SPRN_CTRLT,r4
-
-/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
-/* In the UP case we'll yield() later, and we will not access the paca anyway */
-#ifdef CONFIG_SMP
-iSeries_secondary_wait_paca:
- HMT_LOW
- LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
- ld r23,0(r23)
-
- cmpdi 0,r23,0
- bne 2f /* go on when the master is ready */
-
- /* Keep poking the Hypervisor until we're released */
- /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
- lis r3,0x8002
- rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
- li r0,-1 /* r0=-1 indicates a Hypervisor call */
- sc /* Invoke the hypervisor via a system call */
- b iSeries_secondary_wait_paca
-
-2:
- HMT_MEDIUM
- sync
-
- LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
- lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
- cmpld 0,r24,r3 /* is our cpu number allocated? */
- bge iSeries_secondary_yield /* no, yield forever */
-
- /* Load our paca now that it's been allocated */
- LOAD_REG_ADDR(r13, paca)
- ld r13,0(r13)
- mulli r0,r24,PACA_SIZE
- add r13,r13,r0
- mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
- mfmsr r23
- ori r23,r23,MSR_RI
- mtmsrd r23 /* RI on */
-
-iSeries_secondary_smp_loop:
- lbz r23,PACAPROCSTART(r13) /* Test if this processor
- * should start */
- cmpwi 0,r23,0
- bne 3f /* go on when we are told */
-
- HMT_LOW
- /* Let the Hypervisor know we are alive */
- /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
- lis r3,0x8002
- rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
- li r0,-1 /* r0=-1 indicates a Hypervisor call */
- sc /* Invoke the hypervisor via a system call */
- mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
- b iSeries_secondary_smp_loop /* wait for signal to start */
-
-3:
- HMT_MEDIUM
- sync
- LOAD_REG_ADDR(r3,current_set)
- sldi r28,r24,3 /* get current_set[cpu#] */
- ldx r3,r3,r28
- addi r1,r3,THREAD_SIZE
- subi r1,r1,STACK_FRAME_OVERHEAD
-
- b __secondary_start /* Loop until told to go */
-#endif /* CONFIG_SMP */
-
-iSeries_secondary_yield:
- /* Yield the processor. This is required for non-SMP kernels
- which are running on multi-threaded machines. */
- HMT_LOW
- lis r3,0x8000
- rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
- addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
- li r4,0 /* "yield timed" */
- li r5,-1 /* "yield forever" */
- li r0,-1 /* r0=-1 indicates a Hypervisor call */
- sc /* Invoke the hypervisor via a system call */
- mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
- b iSeries_secondary_yield /* If SMP not configured, secondaries
- * loop forever */
-
-/*** ISeries-LPAR interrupt handlers ***/
-
- STD_EXCEPTION_ISERIES(machine_check, PACA_EXMC)
-
- .globl data_access_iSeries
-data_access_iSeries:
- mtspr SPRN_SPRG_SCRATCH0,r13
-BEGIN_FTR_SECTION
- mfspr r13,SPRN_SPRG_PACA
- std r9,PACA_EXSLB+EX_R9(r13)
- std r10,PACA_EXSLB+EX_R10(r13)
- mfspr r10,SPRN_DAR
- mfspr r9,SPRN_DSISR
- srdi r10,r10,60
- rlwimi r10,r9,16,0x20
- mfcr r9
- cmpwi r10,0x2c
- beq .do_stab_bolted_iSeries
- ld r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- ld r11,PACA_EXSLB+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r12,SPRN_SPRG_SCRATCH0
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R13(r13)
- EXCEPTION_PROLOG_ISERIES_1
-FTR_SECTION_ELSE
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
- EXCEPTION_PROLOG_ISERIES_1
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
- b data_access_common
-
-.do_stab_bolted_iSeries:
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
- std r10,PACA_EXSLB+EX_R13(r13)
- EXCEPTION_PROLOG_ISERIES_1
- b .do_stab_bolted
-
- .globl data_access_slb_iSeries
-data_access_slb_iSeries:
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_DAR
- std r9,PACA_EXSLB+EX_R9(r13)
- mfcr r9
-#ifdef __DISABLED__
- cmpdi r3,0
- bge slb_miss_user_iseries
-#endif
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
- std r10,PACA_EXSLB+EX_R13(r13)
- ld r12,PACALPPACAPTR(r13)
- ld r12,LPPACASRR1(r12)
- b .slb_miss_realmode
-
- STD_EXCEPTION_ISERIES(instruction_access, PACA_EXGEN)
-
- .globl instruction_access_slb_iSeries
-instruction_access_slb_iSeries:
- mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
- mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
- std r3,PACA_EXSLB+EX_R3(r13)
- ld r3,PACALPPACAPTR(r13)
- ld r3,LPPACASRR0(r3) /* get SRR0 value */
- std r9,PACA_EXSLB+EX_R9(r13)
- mfcr r9
-#ifdef __DISABLED__
- cmpdi r3,0
- bge slb_miss_user_iseries
-#endif
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG_SCRATCH0
- std r10,PACA_EXSLB+EX_R13(r13)
- ld r12,PACALPPACAPTR(r13)
- ld r12,LPPACASRR1(r12)
- b .slb_miss_realmode
-
-#ifdef __DISABLED__
-slb_miss_user_iseries:
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r10,SPRG_SCRATCH0
- ld r11,PACA_EXSLB+EX_R9(r13)
- ld r12,PACA_EXSLB+EX_R3(r13)
- std r10,PACA_EXGEN+EX_R13(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R3(r13)
- EXCEPTION_PROLOG_ISERIES_1
- b slb_miss_user_common
-#endif
-
- MASKABLE_EXCEPTION_ISERIES(hardware_interrupt)
- STD_EXCEPTION_ISERIES(alignment, PACA_EXGEN)
- STD_EXCEPTION_ISERIES(program_check, PACA_EXGEN)
- STD_EXCEPTION_ISERIES(fp_unavailable, PACA_EXGEN)
- MASKABLE_EXCEPTION_ISERIES(decrementer)
- STD_EXCEPTION_ISERIES(trap_0a, PACA_EXGEN)
- STD_EXCEPTION_ISERIES(trap_0b, PACA_EXGEN)
-
- .globl system_call_iSeries
-system_call_iSeries:
- mr r9,r13
- mfspr r13,SPRN_SPRG_PACA
- EXCEPTION_PROLOG_ISERIES_1
- b system_call_common
-
- STD_EXCEPTION_ISERIES(single_step, PACA_EXGEN)
- STD_EXCEPTION_ISERIES(trap_0e, PACA_EXGEN)
- STD_EXCEPTION_ISERIES(performance_monitor, PACA_EXGEN)
-
-decrementer_iSeries_masked:
- /* We may not have a valid TOC pointer in here. */
- li r11,1
- ld r12,PACALPPACAPTR(r13)
- stb r11,LPPACADECRINT(r12)
- li r12,-1
- clrldi r12,r12,33 /* set DEC to 0x7fffffff */
- mtspr SPRN_DEC,r12
- /* fall through */
-
-hardware_interrupt_iSeries_masked:
- mtcrf 0x80,r9 /* Restore regs */
- ld r12,PACALPPACAPTR(r13)
- ld r11,LPPACASRR0(r12)
- ld r12,LPPACASRR1(r12)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- ld r9,PACA_EXGEN+EX_R9(r13)
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- ld r12,PACA_EXGEN+EX_R12(r13)
- ld r13,PACA_EXGEN+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-_INIT_STATIC(__start_initialization_iSeries)
- /* Clear out the BSS */
- LOAD_REG_ADDR(r11,__bss_stop)
- LOAD_REG_ADDR(r8,__bss_start)
- sub r11,r11,r8 /* bss size */
- addi r11,r11,7 /* round up to an even double word */
- rldicl. r11,r11,61,3 /* shift right by 3 */
- beq 4f
- addi r8,r8,-8
- li r0,0
- mtctr r11 /* zero this many doublewords */
-3: stdu r0,8(r8)
- bdnz 3b
-4:
- LOAD_REG_ADDR(r1,init_thread_union)
- addi r1,r1,THREAD_SIZE
- li r0,0
- stdu r0,-STACK_FRAME_OVERHEAD(r1)
-
- bl .iSeries_early_setup
- bl .early_setup
-
- /* relocation is on at this point */
-
- b .start_here_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
deleted file mode 100644
index 50271b550a99..000000000000
--- a/arch/powerpc/platforms/iseries/exception.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _ASM_POWERPC_ISERIES_EXCEPTION_H
-#define _ASM_POWERPC_ISERIES_EXCEPTION_H
-/*
- * Extracted from head_64.S
- *
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
- * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
- * Adapted for Power Macintosh by Paul Mackerras.
- * Low-level exception handlers and MMU support
- * rewritten by Paul Mackerras.
- * Copyright (C) 1996 Paul Mackerras.
- *
- * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
- * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
- *
- * This file contains the low-level support and setup for the
- * PowerPC-64 platform, including trap and interrupt dispatch.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <asm/exception-64s.h>
-
-#define EXCEPTION_PROLOG_ISERIES_1 \
- mfmsr r10; \
- ld r12,PACALPPACAPTR(r13); \
- ld r11,LPPACASRR0(r12); \
- ld r12,LPPACASRR1(r12); \
- ori r10,r10,MSR_RI; \
- mtmsrd r10,1
-
-#define STD_EXCEPTION_ISERIES(label, area) \
- .globl label##_iSeries; \
-label##_iSeries: \
- HMT_MEDIUM; \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(area, NOTEST, 0); \
- EXCEPTION_PROLOG_ISERIES_1; \
- b label##_common
-
-#define MASKABLE_EXCEPTION_ISERIES(label) \
- .globl label##_iSeries; \
-label##_iSeries: \
- HMT_MEDIUM; \
- mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
- lbz r10,PACASOFTIRQEN(r13); \
- cmpwi 0,r10,0; \
- beq- label##_iSeries_masked; \
- EXCEPTION_PROLOG_ISERIES_1; \
- b label##_common; \
-
-#endif /* _ASM_POWERPC_ISERIES_EXCEPTION_H */
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
deleted file mode 100644
index 3ae66ab9d5e7..000000000000
--- a/arch/powerpc/platforms/iseries/htab.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * iSeries hashtable management.
- * Derived from pSeries_htab.c
- *
- * SMP scalability work:
- * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <asm/machdep.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/mmu_context.h>
-#include <asm/abs_addr.h>
-#include <linux/spinlock.h>
-
-#include "call_hpt.h"
-
-static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
-
-/*
- * Very primitive algorithm for picking up a lock
- */
-static inline void iSeries_hlock(unsigned long slot)
-{
- if (slot & 0x8)
- slot = ~slot;
- spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
-}
-
-static inline void iSeries_hunlock(unsigned long slot)
-{
- if (slot & 0x8)
- slot = ~slot;
- spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
-}
-
-static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
- unsigned long pa, unsigned long rflags,
- unsigned long vflags, int psize, int ssize)
-{
- long slot;
- struct hash_pte lhpte;
- int secondary = 0;
-
- BUG_ON(psize != MMU_PAGE_4K);
-
- /*
- * The hypervisor tries both primary and secondary.
- * If we are being called to insert in the secondary,
- * it means we have already tried both primary and secondary,
- * so we return failure immediately.
- */
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- iSeries_hlock(hpte_group);
-
- slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
- if (unlikely(lhpte.v & HPTE_V_VALID)) {
- if (vflags & HPTE_V_BOLTED) {
- HvCallHpt_setSwBits(slot, 0x10, 0);
- HvCallHpt_setPp(slot, PP_RWXX);
- iSeries_hunlock(hpte_group);
- if (slot < 0)
- return 0x8 | (slot & 7);
- else
- return slot & 7;
- }
- BUG();
- }
-
- if (slot == -1) { /* No available entry found in either group */
- iSeries_hunlock(hpte_group);
- return -1;
- }
-
- if (slot < 0) { /* MSB set means secondary group */
- vflags |= HPTE_V_SECONDARY;
- secondary = 1;
- slot &= 0x7fffffffffffffff;
- }
-
-
- lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
-
- /* Now fill in the actual HPTE */
- HvCallHpt_addValidate(slot, secondary, &lhpte);
-
- iSeries_hunlock(hpte_group);
-
- return (secondary << 3) | (slot & 7);
-}
-
-static unsigned long iSeries_hpte_getword0(unsigned long slot)
-{
- struct hash_pte hpte;
-
- HvCallHpt_get(&hpte, slot);
- return hpte.v;
-}
-
-static long iSeries_hpte_remove(unsigned long hpte_group)
-{
- unsigned long slot_offset;
- int i;
- unsigned long hpte_v;
-
- /* Pick a random slot to start at */
- slot_offset = mftb() & 0x7;
-
- iSeries_hlock(hpte_group);
-
- for (i = 0; i < HPTES_PER_GROUP; i++) {
- hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
-
- if (! (hpte_v & HPTE_V_BOLTED)) {
- HvCallHpt_invalidateSetSwBitsGet(hpte_group +
- slot_offset, 0, 0);
- iSeries_hunlock(hpte_group);
- return i;
- }
-
- slot_offset++;
- slot_offset &= 0x7;
- }
-
- iSeries_hunlock(hpte_group);
-
- return -1;
-}
-
-/*
- * The HyperVisor expects the "flags" argument in this form:
- * bits 0..59 : reserved
- * bit 60 : N
- * bits 61..63 : PP2,PP1,PP0
- */
-static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int psize, int ssize, int local)
-{
- struct hash_pte hpte;
- unsigned long want_v;
-
- iSeries_hlock(slot);
-
- HvCallHpt_get(&hpte, slot);
- want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
-
- if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
- /*
- * Hypervisor expects bits as NPPP, which is
- * different from how they are mapped in our PP.
- */
- HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
- iSeries_hunlock(slot);
- return 0;
- }
- iSeries_hunlock(slot);
-
- return -1;
-}
-
-/*
- * Functions used to find the PTE for a particular virtual address.
- * Only used during boot when bolting pages.
- *
- * Input : vpn : virtual page number
- * Output: PTE index within the page table of the entry
- * -1 on failure
- */
-static long iSeries_hpte_find(unsigned long vpn)
-{
- struct hash_pte hpte;
- long slot;
-
- /*
- * The HvCallHpt_findValid interface is as follows:
- * 0xffffffffffffffff : No entry found.
- * 0x00000000xxxxxxxx : Entry found in primary group, slot x
- * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
- */
- slot = HvCallHpt_findValid(&hpte, vpn);
- if (hpte.v & HPTE_V_VALID) {
- if (slot < 0) {
- slot &= 0x7fffffffffffffff;
- slot = -slot;
- }
- } else
- slot = -1;
- return slot;
-}
-
-/*
- * Update the page protection bits. Intended to be used to create
- * guard pages for kernel data structures on pages which are bolted
- * in the HPT. Assumes pages being operated on will not be stolen.
- * Does not work on large pages.
- *
- * No need to lock here because we should be the only user.
- */
-static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
- int psize, int ssize)
-{
- unsigned long vsid,va,vpn;
- long slot;
-
- BUG_ON(psize != MMU_PAGE_4K);
-
- vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
- va = (vsid << 28) | (ea & 0x0fffffff);
- vpn = va >> HW_PAGE_SHIFT;
- slot = iSeries_hpte_find(vpn);
- if (slot == -1)
- panic("updateboltedpp: Could not find page to bolt\n");
- HvCallHpt_setPp(slot, newpp);
-}
-
-static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
- int psize, int ssize, int local)
-{
- unsigned long hpte_v;
- unsigned long avpn = va >> 23;
- unsigned long flags;
-
- local_irq_save(flags);
-
- iSeries_hlock(slot);
-
- hpte_v = iSeries_hpte_getword0(slot);
-
- if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
- HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
-
- iSeries_hunlock(slot);
-
- local_irq_restore(flags);
-}
-
-void __init hpte_init_iSeries(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
- spin_lock_init(&iSeries_hlocks[i]);
-
- ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
- ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
- ppc_md.hpte_insert = iSeries_hpte_insert;
- ppc_md.hpte_remove = iSeries_hpte_remove;
-}
diff --git a/arch/powerpc/platforms/iseries/hvcall.S b/arch/powerpc/platforms/iseries/hvcall.S
deleted file mode 100644
index 07ae6ad5f49f..000000000000
--- a/arch/powerpc/platforms/iseries/hvcall.S
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * This file contains the code to perform calls to the
- * iSeries LPAR hypervisor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
-
- .text
-
-/*
- * Hypervisor call
- *
- * Invoke the iSeries hypervisor via the System Call instruction
- * Parameters are passed to this routine in registers r3 - r10
- *
- * r3 contains the HV function to be called
- * r4-r10 contain the operands to the hypervisor function
- *
- */
-
-_GLOBAL(HvCall)
-_GLOBAL(HvCall0)
-_GLOBAL(HvCall1)
-_GLOBAL(HvCall2)
-_GLOBAL(HvCall3)
-_GLOBAL(HvCall4)
-_GLOBAL(HvCall5)
-_GLOBAL(HvCall6)
-_GLOBAL(HvCall7)
-
-
- mfcr r0
- std r0,-8(r1)
- stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
-
- /* r0 = 0xffffffffffffffff indicates a hypervisor call */
-
- li r0,-1
-
- /* Invoke the hypervisor */
-
- sc
-
- ld r1,0(r1)
- ld r0,-8(r1)
- mtcrf 0xff,r0
-
- /* return to caller, return value in r3 */
-
- blr
-
-_GLOBAL(HvCall0Ret16)
-_GLOBAL(HvCall1Ret16)
-_GLOBAL(HvCall2Ret16)
-_GLOBAL(HvCall3Ret16)
-_GLOBAL(HvCall4Ret16)
-_GLOBAL(HvCall5Ret16)
-_GLOBAL(HvCall6Ret16)
-_GLOBAL(HvCall7Ret16)
-
- mfcr r0
- std r0,-8(r1)
- std r31,-16(r1)
- stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
-
- mr r31,r4
- li r0,-1
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
- mr r9,r10
-
- sc
-
- std r3,0(r31)
- std r4,8(r31)
-
- mr r3,r5
-
- ld r1,0(r1)
- ld r0,-8(r1)
- mtcrf 0xff,r0
- ld r31,-16(r1)
-
- blr
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c
deleted file mode 100644
index f476d71194fa..000000000000
--- a/arch/powerpc/platforms/iseries/hvlog.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <asm/page.h>
-#include <asm/abs_addr.h>
-#include <asm/iseries/hv_call.h>
-#include <asm/iseries/hv_call_sc.h>
-#include <asm/iseries/hv_types.h>
-
-
-void HvCall_writeLogBuffer(const void *buffer, u64 len)
-{
- struct HvLpBufferList hv_buf;
- u64 left_this_page;
- u64 cur = virt_to_abs(buffer);
-
- while (len) {
- hv_buf.addr = cur;
- left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
- if (left_this_page > len)
- left_this_page = len;
- hv_buf.len = left_this_page;
- len -= left_this_page;
- HvCall2(HvCallBaseWriteLogBuffer,
- virt_to_abs(&hv_buf),
- left_this_page);
- cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
- }
-}
diff --git a/arch/powerpc/platforms/iseries/hvlpconfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
deleted file mode 100644
index f62a0c5fa670..000000000000
--- a/arch/powerpc/platforms/iseries/hvlpconfig.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/export.h>
-#include <asm/iseries/hv_lp_config.h>
-#include "it_lp_naca.h"
-
-HvLpIndex HvLpConfig_getLpIndex_outline(void)
-{
- return HvLpConfig_getLpIndex();
-}
-EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
-
-HvLpIndex HvLpConfig_getLpIndex(void)
-{
- return itLpNaca.xLpIndex;
-}
-EXPORT_SYMBOL(HvLpConfig_getLpIndex);
-
-HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
-{
- return itLpNaca.xPrimaryLpIndex;
-}
-EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
deleted file mode 100644
index 2f3d9110248c..000000000000
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- *
- * Rewrite, cleanup:
- *
- * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
- * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
- *
- * Dynamic DMA mapping support, iSeries-specific parts.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include <asm/iommu.h>
-#include <asm/vio.h>
-#include <asm/tce.h>
-#include <asm/machdep.h>
-#include <asm/abs_addr.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/iseries/hv_call_xm.h>
-#include <asm/iseries/hv_call_event.h>
-#include <asm/iseries/iommu.h>
-
-static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs)
-{
- u64 rc;
- u64 tce, rpn;
-
- while (npages--) {
- rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
- tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
-
- if (tbl->it_type == TCE_VB) {
- /* Virtual Bus */
- tce |= TCE_VALID|TCE_ALLIO;
- if (direction != DMA_TO_DEVICE)
- tce |= TCE_VB_WRITE;
- } else {
- /* PCI Bus */
- tce |= TCE_PCI_READ; /* Read allowed */
- if (direction != DMA_TO_DEVICE)
- tce |= TCE_PCI_WRITE;
- }
-
- rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
- if (rc)
- panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
- rc);
- index++;
- uaddr += TCE_PAGE_SIZE;
- }
- return 0;
-}
-
-static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
-{
- u64 rc;
-
- while (npages--) {
- rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
- if (rc)
- panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
- rc);
- index++;
- }
-}
-
-/*
- * Structure passed to HvCallXm_getTceTableParms
- */
-struct iommu_table_cb {
- unsigned long itc_busno; /* Bus number for this tce table */
- unsigned long itc_start; /* Will be NULL for secondary */
- unsigned long itc_totalsize; /* Size (in pages) of whole table */
- unsigned long itc_offset; /* Index into real tce table of the
- start of our section */
- unsigned long itc_size; /* Size (in pages) of our section */
- unsigned long itc_index; /* Index of this tce table */
- unsigned short itc_maxtables; /* Max num of tables for partition */
- unsigned char itc_virtbus; /* Flag to indicate virtual bus */
- unsigned char itc_slotno; /* IOA Tce Slot Index */
- unsigned char itc_rsvd[4];
-};
-
-/*
- * Call Hv with the architected data structure to get TCE table info.
- * info. Put the returned data into the Linux representation of the
- * TCE table data.
- * The Hardware Tce table comes in three flavors.
- * 1. TCE table shared between Buses.
- * 2. TCE table per Bus.
- * 3. TCE Table per IOA.
- */
-void iommu_table_getparms_iSeries(unsigned long busno,
- unsigned char slotno,
- unsigned char virtbus,
- struct iommu_table* tbl)
-{
- struct iommu_table_cb *parms;
-
- parms = kzalloc(sizeof(*parms), GFP_KERNEL);
- if (parms == NULL)
- panic("PCI_DMA: TCE Table Allocation failed.");
-
- parms->itc_busno = busno;
- parms->itc_slotno = slotno;
- parms->itc_virtbus = virtbus;
-
- HvCallXm_getTceTableParms(iseries_hv_addr(parms));
-
- if (parms->itc_size == 0)
- panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
-
- /* itc_size is in pages worth of table, it_size is in # of entries */
- tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
- tbl->it_busno = parms->itc_busno;
- tbl->it_offset = parms->itc_offset;
- tbl->it_index = parms->itc_index;
- tbl->it_blocksize = 1;
- tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
-
- kfree(parms);
-}
-
-
-#ifdef CONFIG_PCI
-/*
- * This function compares the known tables to find an iommu_table
- * that has already been built for hardware TCEs.
- */
-static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
-{
- struct device_node *node;
-
- for (node = NULL; (node = of_find_all_nodes(node)); ) {
- struct pci_dn *pdn = PCI_DN(node);
- struct iommu_table *it;
-
- if (pdn == NULL)
- continue;
- it = pdn->iommu_table;
- if ((it != NULL) &&
- (it->it_type == TCE_PCI) &&
- (it->it_offset == tbl->it_offset) &&
- (it->it_index == tbl->it_index) &&
- (it->it_size == tbl->it_size)) {
- of_node_put(node);
- return it;
- }
- }
- return NULL;
-}
-
-
-static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
-{
- struct iommu_table *tbl;
- struct device_node *dn = pci_device_to_OF_node(pdev);
- struct pci_dn *pdn = PCI_DN(dn);
- const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
-
- BUG_ON(lsn == NULL);
-
- tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
-
- iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
-
- /* Look for existing tce table */
- pdn->iommu_table = iommu_table_find(tbl);
- if (pdn->iommu_table == NULL)
- pdn->iommu_table = iommu_init_table(tbl, -1);
- else
- kfree(tbl);
- set_iommu_table_base(&pdev->dev, pdn->iommu_table);
-}
-#else
-#define pci_dma_dev_setup_iseries NULL
-#endif
-
-static struct iommu_table veth_iommu_table;
-static struct iommu_table vio_iommu_table;
-
-void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
-{
- return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
- DMA_BIT_MASK(32), flag, -1);
-}
-EXPORT_SYMBOL_GPL(iseries_hv_alloc);
-
-void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
-{
- iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
-}
-EXPORT_SYMBOL_GPL(iseries_hv_free);
-
-dma_addr_t iseries_hv_map(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
- (unsigned long)vaddr % PAGE_SIZE, size,
- DMA_BIT_MASK(32), direction, NULL);
-}
-
-void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
-}
-
-void __init iommu_vio_init(void)
-{
- iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
- veth_iommu_table.it_size /= 2;
- vio_iommu_table = veth_iommu_table;
- vio_iommu_table.it_offset += veth_iommu_table.it_size;
-
- if (!iommu_init_table(&veth_iommu_table, -1))
- printk("Virtual Bus VETH TCE table failed.\n");
- if (!iommu_init_table(&vio_iommu_table, -1))
- printk("Virtual Bus VIO TCE table failed.\n");
-}
-
-struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
-{
- if (strcmp(dev->type, "network") == 0)
- return &veth_iommu_table;
- return &vio_iommu_table;
-}
-
-void iommu_init_early_iSeries(void)
-{
- ppc_md.tce_build = tce_build_iSeries;
- ppc_md.tce_free = tce_free_iSeries;
-
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
- set_pci_dma_ops(&dma_iommu_ops);
-}
diff --git a/arch/powerpc/platforms/iseries/ipl_parms.h b/arch/powerpc/platforms/iseries/ipl_parms.h
deleted file mode 100644
index 83e4ca42fc57..000000000000
--- a/arch/powerpc/platforms/iseries/ipl_parms.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ISERIES_IPL_PARMS_H
-#define _ISERIES_IPL_PARMS_H
-
-/*
- * This struct maps the IPL Parameters DMA'd from the SP.
- *
- * Warning:
- * This data must map in exactly 64 bytes and match the architecture for
- * the IPL parms
- */
-
-#include <asm/types.h>
-
-struct ItIplParmsReal {
- u8 xFormat; // Defines format of IplParms x00-x00
- u8 xRsvd01:6; // Reserved x01-x01
- u8 xAlternateSearch:1; // Alternate search indicator ...
- u8 xUaSupplied:1; // UA Supplied on programmed IPL...
- u8 xLsUaFormat; // Format byte for UA x02-x02
- u8 xRsvd02; // Reserved x03-x03
- u32 xLsUa; // LS UA x04-x07
- u32 xUnusedLsLid; // First OS LID to load x08-x0B
- u16 xLsBusNumber; // LS Bus Number x0C-x0D
- u8 xLsCardAdr; // LS Card Address x0E-x0E
- u8 xLsBoardAdr; // LS Board Address x0F-x0F
- u32 xRsvd03; // Reserved x10-x13
- u8 xSpcnPresent:1; // SPCN present x14-x14
- u8 xCpmPresent:1; // CPM present ...
- u8 xRsvd04:6; // Reserved ...
- u8 xRsvd05:4; // Reserved x15-x15
- u8 xKeyLock:4; // Keylock setting ...
- u8 xRsvd06:6; // Reserved x16-x16
- u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
- u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
- u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19
- u16 xPowerOnResetIpl:1; // Indicate POR condition ...
- u16 xMainStorePreserved:1; // Main Storage is preserved ...
- u16 xRsvd07:13; // Reserved ...
- u16 xIplSource:16; // Ipl source x1A-x1B
- u8 xIplReason:8; // Reason for this IPL x1C-x1C
- u8 xRsvd08; // Reserved x1D-x1D
- u16 xRsvd09; // Reserved x1E-x1F
- u16 xSysBoxType; // System Box Type x20-x21
- u16 xSysProcType; // System Processor Type x22-x23
- u32 xRsvd10; // Reserved x24-x27
- u64 xRsvd11; // Reserved x28-x2F
- u64 xRsvd12; // Reserved x30-x37
- u64 xRsvd13; // Reserved x38-x3F
-};
-
-#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
deleted file mode 100644
index b2103453eb01..000000000000
--- a/arch/powerpc/platforms/iseries/irq.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * This module supports the iSeries PCI bus interrupt handling
- * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp>
- * Copyright (C) 2004-2005 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
- *
- * Change Activity:
- * Created, December 13, 2000 by Wayne Holm
- * End Change Activity
- */
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/bootmem.h>
-#include <linux/irq.h>
-#include <linux/spinlock.h>
-
-#include <asm/paca.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_call_xm.h>
-#include <asm/iseries/it_lp_queue.h>
-
-#include "irq.h"
-#include "pci.h"
-#include "call_pci.h"
-
-#ifdef CONFIG_PCI
-
-enum pci_event_type {
- pe_bus_created = 0, /* PHB has been created */
- pe_bus_error = 1, /* PHB has failed */
- pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */
- pe_node_failed = 4, /* Multi-adapter bridge has failed */
- pe_node_recovered = 5, /* Multi-adapter bridge has recovered */
- pe_bus_recovered = 12, /* PHB has been recovered */
- pe_unquiese_bus = 18, /* Secondary bus unqiescing */
- pe_bridge_error = 21, /* Bridge Error */
- pe_slot_interrupt = 22 /* Slot interrupt */
-};
-
-struct pci_event {
- struct HvLpEvent event;
- union {
- u64 __align; /* Align on an 8-byte boundary */
- struct {
- u32 fisr;
- HvBusNumber bus_number;
- HvSubBusNumber sub_bus_number;
- HvAgentId dev_id;
- } slot;
- struct {
- HvBusNumber bus_number;
- HvSubBusNumber sub_bus_number;
- } bus;
- struct {
- HvBusNumber bus_number;
- HvSubBusNumber sub_bus_number;
- HvAgentId dev_id;
- } node;
- } data;
-};
-
-static DEFINE_SPINLOCK(pending_irqs_lock);
-static int num_pending_irqs;
-static int pending_irqs[NR_IRQS];
-
-static void int_received(struct pci_event *event)
-{
- int irq;
-
- switch (event->event.xSubtype) {
- case pe_slot_interrupt:
- irq = event->event.xCorrelationToken;
- if (irq < NR_IRQS) {
- spin_lock(&pending_irqs_lock);
- pending_irqs[irq]++;
- num_pending_irqs++;
- spin_unlock(&pending_irqs_lock);
- } else {
- printk(KERN_WARNING "int_received: bad irq number %d\n",
- irq);
- HvCallPci_eoi(event->data.slot.bus_number,
- event->data.slot.sub_bus_number,
- event->data.slot.dev_id);
- }
- break;
- /* Ignore error recovery events for now */
- case pe_bus_created:
- printk(KERN_INFO "int_received: system bus %d created\n",
- event->data.bus.bus_number);
- break;
- case pe_bus_error:
- case pe_bus_failed:
- printk(KERN_INFO "int_received: system bus %d failed\n",
- event->data.bus.bus_number);
- break;
- case pe_bus_recovered:
- case pe_unquiese_bus:
- printk(KERN_INFO "int_received: system bus %d recovered\n",
- event->data.bus.bus_number);
- break;
- case pe_node_failed:
- case pe_bridge_error:
- printk(KERN_INFO
- "int_received: multi-adapter bridge %d/%d/%d failed\n",
- event->data.node.bus_number,
- event->data.node.sub_bus_number,
- event->data.node.dev_id);
- break;
- case pe_node_recovered:
- printk(KERN_INFO
- "int_received: multi-adapter bridge %d/%d/%d recovered\n",
- event->data.node.bus_number,
- event->data.node.sub_bus_number,
- event->data.node.dev_id);
- break;
- default:
- printk(KERN_ERR
- "int_received: unrecognized event subtype 0x%x\n",
- event->event.xSubtype);
- break;
- }
-}
-
-static void pci_event_handler(struct HvLpEvent *event)
-{
- if (event && (event->xType == HvLpEvent_Type_PciIo)) {
- if (hvlpevent_is_int(event))
- int_received((struct pci_event *)event);
- else
- printk(KERN_ERR
- "pci_event_handler: unexpected ack received\n");
- } else if (event)
- printk(KERN_ERR
- "pci_event_handler: Unrecognized PCI event type 0x%x\n",
- (int)event->xType);
- else
- printk(KERN_ERR "pci_event_handler: NULL event received\n");
-}
-
-#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
-#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
-#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
-#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
-
-/*
- * This will be called by device drivers (via enable_IRQ)
- * to enable INTA in the bridge interrupt status register.
- */
-static void iseries_enable_IRQ(struct irq_data *d)
-{
- u32 bus, dev_id, function, mask;
- const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
-
- /* The IRQ has already been locked by the caller */
- bus = REAL_IRQ_TO_BUS(rirq);
- function = REAL_IRQ_TO_FUNC(rirq);
- dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
-
- /* Unmask secondary INTA */
- mask = 0x80000000;
- HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask);
-}
-
-/* This is called by iseries_activate_IRQs */
-static unsigned int iseries_startup_IRQ(struct irq_data *d)
-{
- u32 bus, dev_id, function, mask;
- const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
-
- bus = REAL_IRQ_TO_BUS(rirq);
- function = REAL_IRQ_TO_FUNC(rirq);
- dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
-
- /* Link the IRQ number to the bridge */
- HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq);
-
- /* Unmask bridge interrupts in the FISR */
- mask = 0x01010000 << function;
- HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
- iseries_enable_IRQ(d);
- return 0;
-}
-
-/*
- * This is called out of iSeries_fixup to activate interrupt
- * generation for usable slots
- */
-void __init iSeries_activate_IRQs()
-{
- int irq;
- unsigned long flags;
-
- for_each_irq (irq) {
- struct irq_desc *desc = irq_to_desc(irq);
- struct irq_chip *chip;
-
- if (!desc)
- continue;
-
- chip = irq_desc_get_chip(desc);
- if (chip && chip->irq_startup) {
- raw_spin_lock_irqsave(&desc->lock, flags);
- chip->irq_startup(&desc->irq_data);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
- }
-}
-
-/* this is not called anywhere currently */
-static void iseries_shutdown_IRQ(struct irq_data *d)
-{
- u32 bus, dev_id, function, mask;
- const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
-
- /* irq should be locked by the caller */
- bus = REAL_IRQ_TO_BUS(rirq);
- function = REAL_IRQ_TO_FUNC(rirq);
- dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
-
- /* Invalidate the IRQ number in the bridge */
- HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0);
-
- /* Mask bridge interrupts in the FISR */
- mask = 0x01010000 << function;
- HvCallPci_maskFisr(bus, sub_bus, dev_id, mask);
-}
-
-/*
- * This will be called by device drivers (via disable_IRQ)
- * to disable INTA in the bridge interrupt status register.
- */
-static void iseries_disable_IRQ(struct irq_data *d)
-{
- u32 bus, dev_id, function, mask;
- const u32 sub_bus = 0;
- unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
-
- /* The IRQ has already been locked by the caller */
- bus = REAL_IRQ_TO_BUS(rirq);
- function = REAL_IRQ_TO_FUNC(rirq);
- dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
-
- /* Mask secondary INTA */
- mask = 0x80000000;
- HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
-}
-
-static void iseries_end_IRQ(struct irq_data *d)
-{
- unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
-
- HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
- (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
-}
-
-static struct irq_chip iseries_pic = {
- .name = "iSeries",
- .irq_startup = iseries_startup_IRQ,
- .irq_shutdown = iseries_shutdown_IRQ,
- .irq_unmask = iseries_enable_IRQ,
- .irq_mask = iseries_disable_IRQ,
- .irq_eoi = iseries_end_IRQ
-};
-
-/*
- * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
- * It calculates the irq value for the slot.
- * Note that sub_bus is always 0 (at the moment at least).
- */
-int __init iSeries_allocate_IRQ(HvBusNumber bus,
- HvSubBusNumber sub_bus, u32 bsubbus)
-{
- unsigned int realirq;
- u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
- u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
-
- realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
- + function;
-
- return irq_create_mapping(NULL, realirq);
-}
-
-#endif /* CONFIG_PCI */
-
-/*
- * Get the next pending IRQ.
- */
-unsigned int iSeries_get_irq(void)
-{
- int irq = NO_IRQ_IGNORE;
-
-#ifdef CONFIG_SMP
- if (get_lppaca()->int_dword.fields.ipi_cnt) {
- get_lppaca()->int_dword.fields.ipi_cnt = 0;
- smp_ipi_demux();
- }
-#endif /* CONFIG_SMP */
- if (hvlpevent_is_pending())
- process_hvlpevents();
-
-#ifdef CONFIG_PCI
- if (num_pending_irqs) {
- spin_lock(&pending_irqs_lock);
- for (irq = 0; irq < NR_IRQS; irq++) {
- if (pending_irqs[irq]) {
- pending_irqs[irq]--;
- num_pending_irqs--;
- break;
- }
- }
- spin_unlock(&pending_irqs_lock);
- if (irq >= NR_IRQS)
- irq = NO_IRQ_IGNORE;
- }
-#endif
-
- return irq;
-}
-
-#ifdef CONFIG_PCI
-
-static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
-
- return 0;
-}
-
-static int iseries_irq_host_match(struct irq_host *h, struct device_node *np)
-{
- /* Match all */
- return 1;
-}
-
-static struct irq_host_ops iseries_irq_host_ops = {
- .map = iseries_irq_host_map,
- .match = iseries_irq_host_match,
-};
-
-/*
- * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
- * It must be called before the bus walk.
- */
-void __init iSeries_init_IRQ(void)
-{
- /* Register PCI event handler and open an event path */
- struct irq_host *host;
- int ret;
-
- /*
- * The Hypervisor only allows us up to 256 interrupt
- * sources (the irq number is passed in a u8).
- */
- irq_set_virq_count(256);
-
- /* Create irq host. No need for a revmap since HV will give us
- * back our virtual irq number
- */
- host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
- &iseries_irq_host_ops, 0);
- BUG_ON(host == NULL);
- irq_set_default_host(host);
-
- ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
- &pci_event_handler);
- if (ret == 0) {
- ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
- if (ret != 0)
- printk(KERN_ERR "iseries_init_IRQ: open event path "
- "failed with rc 0x%x\n", ret);
- } else
- printk(KERN_ERR "iseries_init_IRQ: register handler "
- "failed with rc 0x%x\n", ret);
-}
-
-#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
deleted file mode 100644
index a1c236074034..000000000000
--- a/arch/powerpc/platforms/iseries/irq.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _ISERIES_IRQ_H
-#define _ISERIES_IRQ_H
-
-#ifdef CONFIG_PCI
-extern void iSeries_init_IRQ(void);
-extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
-extern void iSeries_activate_IRQs(void);
-#else
-#define iSeries_init_IRQ NULL
-#endif
-extern unsigned int iSeries_get_irq(void);
-
-#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
deleted file mode 100644
index 6de9097b7f57..000000000000
--- a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2002 Dave Boutcher IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
-#define _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
-
-/*
- * This struct maps the panel information
- *
- * Warning:
- * This data must match the architecture for the panel information
- */
-
-#include <asm/types.h>
-
-struct ItExtVpdPanel {
- /* Definition of the Extended Vpd On Panel Data Area */
- char systemSerial[8];
- char mfgID[4];
- char reserved1[24];
- char machineType[4];
- char systemID[6];
- char somUniqueCnt[4];
- char serialNumberCount;
- char reserved2[7];
- u16 bbu3;
- u16 bbu2;
- u16 bbu1;
- char xLocationLabel[8];
- u8 xRsvd1[6];
- u16 xFrameId;
- u8 xRsvd2[48];
-};
-
-extern struct ItExtVpdPanel xItExtVpdPanel;
-
-#endif /* _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/arch/powerpc/platforms/iseries/it_lp_naca.h b/arch/powerpc/platforms/iseries/it_lp_naca.h
deleted file mode 100644
index cf6dcf6ef07b..000000000000
--- a/arch/powerpc/platforms/iseries/it_lp_naca.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PLATFORMS_ISERIES_IT_LP_NACA_H
-#define _PLATFORMS_ISERIES_IT_LP_NACA_H
-
-#include <linux/types.h>
-
-/*
- * This control block contains the data that is shared between the
- * hypervisor (PLIC) and the OS.
- */
-
-struct ItLpNaca {
-// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
- u32 xDesc; // Eye catcher x00-x03
- u16 xSize; // Size of this class x04-x05
- u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
- u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
- u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
- u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
- u8 xLpIndex; // LP Index x0B-x0B
- u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
- u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
- u8 xPirEnvironMode; // Piranha or hardware x10-x10
- u8 xPirConsoleMode; // Piranha console indicator x11-x11
- u8 xPirDasdMode; // Piranha dasd indicator x12-x12
- u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
- u8 flags; // flags, see below x18-x1F
- u8 xSpVpdFormat; // VPD areas are in CSP format ...
- u8 xIntProcRatio; // Ratio of int procs to procs ...
- u8 xRsvd1_2[5]; // Reserved ...
- u16 xRsvd1_3; // Reserved x20-x21
- u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
- u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
- u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
- u64 xLoadAreaAddr; // ER address of load area x28-x2F
- u32 xLoadAreaChunks; // Chunks for the load area x30-x33
- u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
- // doing an ASR switch on PASE
- // system call.
- u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
- u8 xRsvd1_4[64]; // x40-x7F
-
-// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
- u8 xRsvd2_0[128]; // Reserved x00-x7F
-
-// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
-// NB: Padding required to keep xInterruptHdlr at x300 which is required
-// for v4r4 PLIC.
- u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
- u8 xRsvd3_0[384]; // Reserved 180-2FF
-
-// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
-// handlers
- u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
-};
-
-extern struct ItLpNaca itLpNaca;
-
-#define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */
-#define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */
-#define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */
-#define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */
-
-#endif /* _PLATFORMS_ISERIES_IT_LP_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
deleted file mode 100644
index 997e234fb8b7..000000000000
--- a/arch/powerpc/platforms/iseries/ksyms.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * (C) 2001-2005 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/export.h>
-
-#include <asm/hw_irq.h>
-#include <asm/iseries/hv_call_sc.h>
-
-EXPORT_SYMBOL(HvCall0);
-EXPORT_SYMBOL(HvCall1);
-EXPORT_SYMBOL(HvCall2);
-EXPORT_SYMBOL(HvCall3);
-EXPORT_SYMBOL(HvCall4);
-EXPORT_SYMBOL(HvCall5);
-EXPORT_SYMBOL(HvCall6);
-EXPORT_SYMBOL(HvCall7);
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c
deleted file mode 100644
index 00e0ec813a1c..000000000000
--- a/arch/powerpc/platforms/iseries/lpardata.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright 2001 Mike Corrigan, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/bitops.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/abs_addr.h>
-#include <asm/lppaca.h>
-#include <asm/paca.h>
-#include <asm/iseries/lpar_map.h>
-#include <asm/iseries/it_lp_queue.h>
-#include <asm/iseries/alpaca.h>
-
-#include "naca.h"
-#include "vpd_areas.h"
-#include "spcomm_area.h"
-#include "ipl_parms.h"
-#include "processor_vpd.h"
-#include "release_data.h"
-#include "it_exp_vpd_panel.h"
-#include "it_lp_naca.h"
-
-/* The HvReleaseData is the root of the information shared between
- * the hypervisor and Linux.
- */
-const struct HvReleaseData hvReleaseData = {
- .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
- .xSize = sizeof(struct HvReleaseData),
- .xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
- .xSlicNacaAddr = &naca, /* 64-bit Naca address */
- .xMsNucDataOffset = LPARMAP_PHYS,
- .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
- /* 64 bit */
- /* shared processors */
- /* HMT allowed */
- | 6, /* TEMP: This allows non-GA driver */
- .xVrmIndex = 4, /* We are v5r2m0 */
- .xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
- .xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
- .xVrmName = { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4.64" ebcdic */
- 0xa7, 0x40, 0xf2, 0x4b,
- 0xf4, 0x4b, 0xf6, 0xf4 },
-};
-
-/*
- * The NACA. The first dword of the naca is required by the iSeries
- * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
- * through the pointer in hvReleaseData.
- */
-struct naca_struct naca = {
- .xItVpdAreas = &itVpdAreas,
- .xRamDisk = 0,
- .xRamDiskSize = 0,
-};
-
-struct ItLpRegSave {
- u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
- u16 xSize; // Size of this class 004-005
- u8 xInUse; // Area is live 006-007
- u8 xRsvd1[9]; // Reserved 007-00F
-
- u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
- u32 xCTRL; // Control Register 170-173
- u32 xDEC; // Decrementer 174-177
- u32 xFPSCR; // FP Status and Control Reg 178-17B
- u32 xPVR; // Processor Version Number 17C-17F
-
- u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
- u32 xPMC1; // Perf Monitor Counter 1 188-18B
- u32 xPMC2; // Perf Monitor Counter 2 18C-18F
- u32 xPMC3; // Perf Monitor Counter 3 190-193
- u32 xPMC4; // Perf Monitor Counter 4 194-197
- u32 xPIR; // Processor ID Reg 198-19B
-
- u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
- u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
- u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
- u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
- u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
- u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
- u32 xTSC; // Thread Switch Control 1B4-1B7
- u32 xTST; // Thread Switch Timeout 1B8-1BB
- u32 xRsvd; // Reserved 1BC-1BF
-
- u64 xACCR; // Address Compare Control Reg 1C0-1C7
- u64 xIMR; // Instruction Match Register 1C8-1CF
- u64 xSDR1; // Storage Description Reg 1 1D0-1D7
- u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
- u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
- u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
- u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
- u64 xTB; // Time Base Register 1F8-1FF
-
- u64 xFPR[32]; // Floating Point Registers 200-2FF
-
- u64 xMSR; // Machine State Register 300-307
- u64 xNIA; // Next Instruction Address 308-30F
-
- u64 xDABR; // Data Address Breakpoint Reg 310-317
- u64 xIABR; // Inst Address Breakpoint Reg 318-31F
-
- u64 xHID0; // HW Implementation Dependent0 320-327
-
- u64 xHID4; // HW Implementation Dependent4 328-32F
- u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
- u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
- u64 xSDAR; // Sample Data Address Register 340-347
- u64 xSIAR; // Sample Inst Address Register 348-34F
-
- u8 xRsvd3[176]; // Reserved 350-3FF
-};
-
-extern void system_reset_iSeries(void);
-extern void machine_check_iSeries(void);
-extern void data_access_iSeries(void);
-extern void instruction_access_iSeries(void);
-extern void hardware_interrupt_iSeries(void);
-extern void alignment_iSeries(void);
-extern void program_check_iSeries(void);
-extern void fp_unavailable_iSeries(void);
-extern void decrementer_iSeries(void);
-extern void trap_0a_iSeries(void);
-extern void trap_0b_iSeries(void);
-extern void system_call_iSeries(void);
-extern void single_step_iSeries(void);
-extern void trap_0e_iSeries(void);
-extern void performance_monitor_iSeries(void);
-extern void data_access_slb_iSeries(void);
-extern void instruction_access_slb_iSeries(void);
-
-struct ItLpNaca itLpNaca = {
- .xDesc = 0xd397d581, /* "LpNa" ebcdic */
- .xSize = 0x0400, /* size of ItLpNaca */
- .xIntHdlrOffset = 0x0300, /* offset to int array */
- .xMaxIntHdlrEntries = 19, /* # ents */
- .xPrimaryLpIndex = 0, /* Part # of primary */
- .xServiceLpIndex = 0, /* Part # of serv */
- .xLpIndex = 0, /* Part # of me */
- .xMaxLpQueues = 0, /* # of LP queues */
- .xLpQueueOffset = 0x100, /* offset of start of LP queues */
- .xPirEnvironMode = 0, /* Piranha stuff */
- .xPirConsoleMode = 0,
- .xPirDasdMode = 0,
- .flags = 0,
- .xSpVpdFormat = 0,
- .xIntProcRatio = 0,
- .xPlicVrmIndex = 0, /* VRM index of PLIC */
- .xMinSupportedSlicVrmInd = 0, /* min supported SLIC */
- .xMinCompatableSlicVrmInd = 0, /* min compat SLIC */
- .xLoadAreaAddr = 0, /* 64-bit addr of load area */
- .xLoadAreaChunks = 0, /* chunks for load area */
- .xPaseSysCallCRMask = 0, /* PASE mask */
- .xSlicSegmentTablePtr = 0, /* seg table */
- .xOldLpQueue = { 0 }, /* Old LP Queue */
- .xInterruptHdlr = {
- (u64)system_reset_iSeries, /* 0x100 System Reset */
- (u64)machine_check_iSeries, /* 0x200 Machine Check */
- (u64)data_access_iSeries, /* 0x300 Data Access */
- (u64)instruction_access_iSeries, /* 0x400 Instruction Access */
- (u64)hardware_interrupt_iSeries, /* 0x500 External */
- (u64)alignment_iSeries, /* 0x600 Alignment */
- (u64)program_check_iSeries, /* 0x700 Program Check */
- (u64)fp_unavailable_iSeries, /* 0x800 FP Unavailable */
- (u64)decrementer_iSeries, /* 0x900 Decrementer */
- (u64)trap_0a_iSeries, /* 0xa00 Trap 0A */
- (u64)trap_0b_iSeries, /* 0xb00 Trap 0B */
- (u64)system_call_iSeries, /* 0xc00 System Call */
- (u64)single_step_iSeries, /* 0xd00 Single Step */
- (u64)trap_0e_iSeries, /* 0xe00 Trap 0E */
- (u64)performance_monitor_iSeries,/* 0xf00 Performance Monitor */
- 0, /* int 0x1000 */
- 0, /* int 0x1010 */
- 0, /* int 0x1020 CPU ctls */
- (u64)hardware_interrupt_iSeries, /* SC Ret Hdlr */
- (u64)data_access_slb_iSeries, /* 0x380 D-SLB */
- (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */
- }
-};
-
-/* May be filled in by the hypervisor so cannot end up in the BSS */
-static struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
-
-/* May be filled in by the hypervisor so cannot end up in the BSS */
-struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
-
-#define maxPhysicalProcessors 32
-
-struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
- {
- .xInstCacheOperandSize = 32,
- .xDataCacheOperandSize = 32,
- .xProcFreq = 50000000,
- .xTimeBaseFreq = 50000000,
- .xPVR = 0x3600
- }
-};
-
-/* Space for Main Store Vpd 27,200 bytes */
-/* May be filled in by the hypervisor so cannot end up in the BSS */
-u64 xMsVpd[3400] __attribute__((__section__(".data")));
-
-/* Space for Recovery Log Buffer */
-/* May be filled in by the hypervisor so cannot end up in the BSS */
-static u64 xRecoveryLogBuffer[32] __attribute__((__section__(".data")));
-
-static const struct SpCommArea xSpCommArea = {
- .xDesc = 0xE2D7C3C2,
- .xFormat = 1,
-};
-
-static const struct ItLpRegSave iseries_reg_save[] = {
- [0 ... (NR_CPUS-1)] = {
- .xDesc = 0xd397d9e2, /* "LpRS" */
- .xSize = sizeof(struct ItLpRegSave),
- },
-};
-
-#define ALPACA_INIT(number) \
-{ \
- .lppaca_ptr = &lppaca[number], \
- .reg_save_ptr = &iseries_reg_save[number], \
-}
-
-const struct alpaca alpaca[] = {
- ALPACA_INIT( 0),
-#if NR_CPUS > 1
- ALPACA_INIT( 1), ALPACA_INIT( 2), ALPACA_INIT( 3),
-#if NR_CPUS > 4
- ALPACA_INIT( 4), ALPACA_INIT( 5), ALPACA_INIT( 6), ALPACA_INIT( 7),
-#if NR_CPUS > 8
- ALPACA_INIT( 8), ALPACA_INIT( 9), ALPACA_INIT(10), ALPACA_INIT(11),
- ALPACA_INIT(12), ALPACA_INIT(13), ALPACA_INIT(14), ALPACA_INIT(15),
- ALPACA_INIT(16), ALPACA_INIT(17), ALPACA_INIT(18), ALPACA_INIT(19),
- ALPACA_INIT(20), ALPACA_INIT(21), ALPACA_INIT(22), ALPACA_INIT(23),
- ALPACA_INIT(24), ALPACA_INIT(25), ALPACA_INIT(26), ALPACA_INIT(27),
- ALPACA_INIT(28), ALPACA_INIT(29), ALPACA_INIT(30), ALPACA_INIT(31),
-#if NR_CPUS > 32
- ALPACA_INIT(32), ALPACA_INIT(33), ALPACA_INIT(34), ALPACA_INIT(35),
- ALPACA_INIT(36), ALPACA_INIT(37), ALPACA_INIT(38), ALPACA_INIT(39),
- ALPACA_INIT(40), ALPACA_INIT(41), ALPACA_INIT(42), ALPACA_INIT(43),
- ALPACA_INIT(44), ALPACA_INIT(45), ALPACA_INIT(46), ALPACA_INIT(47),
- ALPACA_INIT(48), ALPACA_INIT(49), ALPACA_INIT(50), ALPACA_INIT(51),
- ALPACA_INIT(52), ALPACA_INIT(53), ALPACA_INIT(54), ALPACA_INIT(55),
- ALPACA_INIT(56), ALPACA_INIT(57), ALPACA_INIT(58), ALPACA_INIT(59),
- ALPACA_INIT(60), ALPACA_INIT(61), ALPACA_INIT(62), ALPACA_INIT(63),
-#endif
-#endif
-#endif
-#endif
-};
-
-/* The LparMap data is now located at offset 0x6000 in head.S
- * It was put there so that the HvReleaseData could address it
- * with a 32-bit offset as required by the iSeries hypervisor
- *
- * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
- * the Naca via the HvReleaseData area. The HvReleaseData has the
- * offset into the Naca of the pointer to the ItVpdAreas.
- */
-const struct ItVpdAreas itVpdAreas = {
- .xSlicDesc = 0xc9a3e5c1, /* "ItVA" */
- .xSlicSize = sizeof(struct ItVpdAreas),
- .xSlicVpdEntries = ItVpdMaxEntries, /* # VPD array entries */
- .xSlicDmaEntries = ItDmaMaxEntries, /* # DMA array entries */
- .xSlicMaxLogicalProcs = NR_CPUS * 2, /* Max logical procs */
- .xSlicMaxPhysicalProcs = maxPhysicalProcessors, /* Max physical procs */
- .xSlicDmaToksOffset = offsetof(struct ItVpdAreas, xPlicDmaToks),
- .xSlicVpdAdrsOffset = offsetof(struct ItVpdAreas, xSlicVpdAdrs),
- .xSlicDmaLensOffset = offsetof(struct ItVpdAreas, xPlicDmaLens),
- .xSlicVpdLensOffset = offsetof(struct ItVpdAreas, xSlicVpdLens),
- .xSlicMaxSlotLabels = 0, /* max slot labels */
- .xSlicMaxLpQueues = 1, /* max LP queues */
- .xPlicDmaLens = { 0 }, /* DMA lengths */
- .xPlicDmaToks = { 0 }, /* DMA tokens */
- .xSlicVpdLens = { /* VPD lengths */
- 0,0,0, /* 0 - 2 */
- sizeof(xItExtVpdPanel), /* 3 Extended VPD */
- sizeof(struct alpaca), /* 4 length of (fake) Paca */
- 0, /* 5 */
- sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
- 26992, /* 7 length of MS VPD */
- 0, /* 8 */
- sizeof(struct ItLpNaca),/* 9 length of LP Naca */
- 0, /* 10 */
- 256, /* 11 length of Recovery Log Buf */
- sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
- 0,0,0, /* 13 - 15 */
- sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
- 0,0,0,0,0,0, /* 17 - 22 */
- sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
- 0,0 /* 24 - 25 */
- },
- .xSlicVpdAdrs = { /* VPD addresses */
- 0,0,0, /* 0 - 2 */
- &xItExtVpdPanel, /* 3 Extended VPD */
- &alpaca[0], /* 4 first (fake) Paca */
- 0, /* 5 */
- &xItIplParmsReal, /* 6 IPL parms */
- &xMsVpd, /* 7 MS Vpd */
- 0, /* 8 */
- &itLpNaca, /* 9 LpNaca */
- 0, /* 10 */
- &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
- &xSpCommArea, /* 12 SP Comm Area */
- 0,0,0, /* 13 - 15 */
- &xIoHriProcessorVpd, /* 16 Proc Vpd */
- 0,0,0,0,0,0, /* 17 - 22 */
- &hvlpevent_queue, /* 23 Lp Queue */
- 0,0
- }
-};
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
deleted file mode 100644
index 202e22798d30..000000000000
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
-#include <linux/export.h>
-
-#include <asm/system.h>
-#include <asm/paca.h>
-#include <asm/firmware.h>
-#include <asm/iseries/it_lp_queue.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_call_event.h>
-#include "it_lp_naca.h"
-
-/*
- * The LpQueue is used to pass event data from the hypervisor to
- * the partition. This is where I/O interrupt events are communicated.
- *
- * It is written to by the hypervisor so cannot end up in the BSS.
- */
-struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
-
-DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
-
-static char *event_types[HvLpEvent_Type_NumTypes] = {
- "Hypervisor",
- "Machine Facilities",
- "Session Manager",
- "SPD I/O",
- "Virtual Bus",
- "PCI I/O",
- "RIO I/O",
- "Virtual Lan",
- "Virtual I/O"
-};
-
-/* Array of LpEvent handler functions */
-static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
-static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
-
-static struct HvLpEvent * get_next_hvlpevent(void)
-{
- struct HvLpEvent * event;
- event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
-
- if (hvlpevent_is_valid(event)) {
- /* rmb() needed only for weakly consistent machines (regatta) */
- rmb();
- /* Set pointer to next potential event */
- hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
- IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
- IT_LP_EVENT_ALIGN;
-
- /* Wrap to beginning if no room at end */
- if (hvlpevent_queue.hq_current_event >
- hvlpevent_queue.hq_last_event) {
- hvlpevent_queue.hq_current_event =
- hvlpevent_queue.hq_event_stack;
- }
- } else {
- event = NULL;
- }
-
- return event;
-}
-
-static unsigned long spread_lpevents = NR_CPUS;
-
-int hvlpevent_is_pending(void)
-{
- struct HvLpEvent *next_event;
-
- if (smp_processor_id() >= spread_lpevents)
- return 0;
-
- next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
-
- return hvlpevent_is_valid(next_event) ||
- hvlpevent_queue.hq_overflow_pending;
-}
-
-static void hvlpevent_clear_valid(struct HvLpEvent * event)
-{
- /* Tell the Hypervisor that we're done with this event.
- * Also clear bits within this event that might look like valid bits.
- * ie. on 64-byte boundaries.
- */
- struct HvLpEvent *tmp;
- unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
- IT_LP_EVENT_ALIGN) - 1;
-
- switch (extra) {
- case 3:
- tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
- hvlpevent_invalidate(tmp);
- case 2:
- tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
- hvlpevent_invalidate(tmp);
- case 1:
- tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
- hvlpevent_invalidate(tmp);
- }
-
- mb();
-
- hvlpevent_invalidate(event);
-}
-
-void process_hvlpevents(void)
-{
- struct HvLpEvent * event;
-
- restart:
- /* If we have recursed, just return */
- if (!spin_trylock(&hvlpevent_queue.hq_lock))
- return;
-
- for (;;) {
- event = get_next_hvlpevent();
- if (event) {
- /* Call appropriate handler here, passing
- * a pointer to the LpEvent. The handler
- * must make a copy of the LpEvent if it
- * needs it in a bottom half. (perhaps for
- * an ACK)
- *
- * Handlers are responsible for ACK processing
- *
- * The Hypervisor guarantees that LpEvents will
- * only be delivered with types that we have
- * registered for, so no type check is necessary
- * here!
- */
- if (event->xType < HvLpEvent_Type_NumTypes)
- __get_cpu_var(hvlpevent_counts)[event->xType]++;
- if (event->xType < HvLpEvent_Type_NumTypes &&
- lpEventHandler[event->xType])
- lpEventHandler[event->xType](event);
- else {
- u8 type = event->xType;
-
- /*
- * Don't printk in the spinlock as printk
- * may require ack events form the HV to send
- * any characters there.
- */
- hvlpevent_clear_valid(event);
- spin_unlock(&hvlpevent_queue.hq_lock);
- printk(KERN_INFO
- "Unexpected Lp Event type=%d\n", type);
- goto restart;
- }
-
- hvlpevent_clear_valid(event);
- } else if (hvlpevent_queue.hq_overflow_pending)
- /*
- * No more valid events. If overflow events are
- * pending process them
- */
- HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
- else
- break;
- }
-
- spin_unlock(&hvlpevent_queue.hq_lock);
-}
-
-static int set_spread_lpevents(char *str)
-{
- unsigned long val = simple_strtoul(str, NULL, 0);
-
- /*
- * The parameter is the number of processors to share in processing
- * lp events.
- */
- if (( val > 0) && (val <= NR_CPUS)) {
- spread_lpevents = val;
- printk("lpevent processing spread over %ld processors\n", val);
- } else {
- printk("invalid spread_lpevents %ld\n", val);
- }
-
- return 1;
-}
-__setup("spread_lpevents=", set_spread_lpevents);
-
-void __init setup_hvlpevent_queue(void)
-{
- void *eventStack;
-
- spin_lock_init(&hvlpevent_queue.hq_lock);
-
- /* Allocate a page for the Event Stack. */
- eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
- memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
-
- /* Invoke the hypervisor to initialize the event stack */
- HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
-
- hvlpevent_queue.hq_event_stack = eventStack;
- hvlpevent_queue.hq_current_event = eventStack;
- hvlpevent_queue.hq_last_event = (char *)eventStack +
- (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
- hvlpevent_queue.hq_index = 0;
-}
-
-/* Register a handler for an LpEvent type */
-int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
-{
- if (eventType < HvLpEvent_Type_NumTypes) {
- lpEventHandler[eventType] = handler;
- return 0;
- }
- return 1;
-}
-EXPORT_SYMBOL(HvLpEvent_registerHandler);
-
-int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
-{
- might_sleep();
-
- if (eventType < HvLpEvent_Type_NumTypes) {
- if (!lpEventHandlerPaths[eventType]) {
- lpEventHandler[eventType] = NULL;
- /*
- * We now sleep until all other CPUs have scheduled.
- * This ensures that the deletion is seen by all
- * other CPUs, and that the deleted handler isn't
- * still running on another CPU when we return.
- */
- synchronize_sched();
- return 0;
- }
- }
- return 1;
-}
-EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
-
-/*
- * lpIndex is the partition index of the target partition.
- * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
- * indicates to use our partition index - for the other types.
- */
-int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
-{
- if ((eventType < HvLpEvent_Type_NumTypes) &&
- lpEventHandler[eventType]) {
- if (lpIndex == 0)
- lpIndex = itLpNaca.xLpIndex;
- HvCallEvent_openLpEventPath(lpIndex, eventType);
- ++lpEventHandlerPaths[eventType];
- return 0;
- }
- return 1;
-}
-
-int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
-{
- if ((eventType < HvLpEvent_Type_NumTypes) &&
- lpEventHandler[eventType] &&
- lpEventHandlerPaths[eventType]) {
- if (lpIndex == 0)
- lpIndex = itLpNaca.xLpIndex;
- HvCallEvent_closeLpEventPath(lpIndex, eventType);
- --lpEventHandlerPaths[eventType];
- return 0;
- }
- return 1;
-}
-
-static int proc_lpevents_show(struct seq_file *m, void *v)
-{
- int cpu, i;
- unsigned long sum;
- static unsigned long cpu_totals[NR_CPUS];
-
- /* FIXME: do we care that there's no locking here? */
- sum = 0;
- for_each_online_cpu(cpu) {
- cpu_totals[cpu] = 0;
- for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
- cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
- }
- sum += cpu_totals[cpu];
- }
-
- seq_printf(m, "LpEventQueue 0\n");
- seq_printf(m, " events processed:\t%lu\n", sum);
-
- for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
- sum = 0;
- for_each_online_cpu(cpu) {
- sum += per_cpu(hvlpevent_counts, cpu)[i];
- }
-
- seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
- }
-
- seq_printf(m, "\n events processed by processor:\n");
-
- for_each_online_cpu(cpu) {
- seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
- }
-
- return 0;
-}
-
-static int proc_lpevents_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_lpevents_show, NULL);
-}
-
-static const struct file_operations proc_lpevents_operations = {
- .open = proc_lpevents_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init proc_lpevents_init(void)
-{
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
-
- proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
- &proc_lpevents_operations);
- return 0;
-}
-__initcall(proc_lpevents_init);
-
diff --git a/arch/powerpc/platforms/iseries/main_store.h b/arch/powerpc/platforms/iseries/main_store.h
deleted file mode 100644
index 1a7a3f50e40b..000000000000
--- a/arch/powerpc/platforms/iseries/main_store.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ISERIES_MAIN_STORE_H
-#define _ISERIES_MAIN_STORE_H
-
-/* Main Store Vpd for Condor,iStar,sStar */
-struct IoHriMainStoreSegment4 {
- u8 msArea0Exists:1;
- u8 msArea1Exists:1;
- u8 msArea2Exists:1;
- u8 msArea3Exists:1;
- u8 reserved1:4;
- u8 reserved2;
-
- u8 msArea0Functional:1;
- u8 msArea1Functional:1;
- u8 msArea2Functional:1;
- u8 msArea3Functional:1;
- u8 reserved3:4;
- u8 reserved4;
-
- u32 totalMainStore;
-
- u64 msArea0Ptr;
- u64 msArea1Ptr;
- u64 msArea2Ptr;
- u64 msArea3Ptr;
-
- u32 cardProductionLevel;
-
- u32 msAdrHole;
-
- u8 msArea0HasRiserVpd:1;
- u8 msArea1HasRiserVpd:1;
- u8 msArea2HasRiserVpd:1;
- u8 msArea3HasRiserVpd:1;
- u8 reserved5:4;
- u8 reserved6;
- u16 reserved7;
-
- u8 reserved8[28];
-
- u64 nonInterleavedBlocksStartAdr;
- u64 nonInterleavedBlocksEndAdr;
-};
-
-/* Main Store VPD for Power4 */
-struct __attribute((packed)) IoHriMainStoreChipInfo1 {
- u32 chipMfgID;
- char chipECLevel[4];
-};
-
-struct IoHriMainStoreVpdIdData {
- char typeNumber[4];
- char modelNumber[4];
- char partNumber[12];
- char serialNumber[12];
-};
-
-struct __attribute((packed)) IoHriMainStoreVpdFruData {
- char fruLabel[8];
- u8 numberOfSlots;
- u8 pluggingType;
- u16 slotMapIndex;
-};
-
-struct __attribute((packed)) IoHriMainStoreAdrRangeBlock {
- void *blockStart;
- void *blockEnd;
- u32 blockProcChipId;
-};
-
-#define MaxAreaAdrRangeBlocks 4
-
-struct __attribute((packed)) IoHriMainStoreArea4 {
- u32 msVpdFormat;
- u8 containedVpdType;
- u8 reserved1;
- u16 reserved2;
-
- u64 msExists;
- u64 msFunctional;
-
- u32 memorySize;
- u32 procNodeId;
-
- u32 numAdrRangeBlocks;
- struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks];
-
- struct IoHriMainStoreChipInfo1 chipInfo0;
- struct IoHriMainStoreChipInfo1 chipInfo1;
- struct IoHriMainStoreChipInfo1 chipInfo2;
- struct IoHriMainStoreChipInfo1 chipInfo3;
- struct IoHriMainStoreChipInfo1 chipInfo4;
- struct IoHriMainStoreChipInfo1 chipInfo5;
- struct IoHriMainStoreChipInfo1 chipInfo6;
- struct IoHriMainStoreChipInfo1 chipInfo7;
-
- void *msRamAreaArray;
- u32 msRamAreaArrayNumEntries;
- u32 msRamAreaArrayEntrySize;
-
- u32 numaDimmExists;
- u32 numaDimmFunctional;
- void *numaDimmArray;
- u32 numaDimmArrayNumEntries;
- u32 numaDimmArrayEntrySize;
-
- struct IoHriMainStoreVpdIdData idData;
-
- u64 powerData;
- u64 cardAssemblyPartNum;
- u64 chipSerialNum;
-
- u64 reserved3;
- char reserved4[16];
-
- struct IoHriMainStoreVpdFruData fruData;
-
- u8 vpdPortNum;
- u8 reserved5;
- u8 frameId;
- u8 rackUnit;
- char asciiKeywordVpd[256];
- u32 reserved6;
-};
-
-
-struct IoHriMainStoreSegment5 {
- u16 reserved1;
- u8 reserved2;
- u8 msVpdFormat;
-
- u32 totalMainStore;
- u64 maxConfiguredMsAdr;
-
- struct IoHriMainStoreArea4 *msAreaArray;
- u32 msAreaArrayNumEntries;
- u32 msAreaArrayEntrySize;
-
- u32 msAreaExists;
- u32 msAreaFunctional;
-
- u64 reserved3;
-};
-
-extern u64 xMsVpd[];
-
-#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
deleted file mode 100644
index 254c1fc3d8dd..000000000000
--- a/arch/powerpc/platforms/iseries/mf.c
+++ /dev/null
@@ -1,1275 +0,0 @@
-/*
- * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
- * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
- *
- * This modules exists as an interface between a Linux secondary partition
- * running on an iSeries and the primary partition's Virtual Service
- * Processor (VSP) object. The VSP has final authority over powering on/off
- * all partitions in the iSeries. It also provides miscellaneous low-level
- * machine facility type operations.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/proc_fs.h>
-#include <linux/dma-mapping.h>
-#include <linux/bcd.h>
-#include <linux/rtc.h>
-#include <linux/slab.h>
-
-#include <asm/time.h>
-#include <asm/uaccess.h>
-#include <asm/paca.h>
-#include <asm/abs_addr.h>
-#include <asm/firmware.h>
-#include <asm/iseries/mf.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/it_lp_queue.h>
-
-#include "setup.h"
-
-static int mf_initialized;
-
-/*
- * This is the structure layout for the Machine Facilities LPAR event
- * flows.
- */
-struct vsp_cmd_data {
- u64 token;
- u16 cmd;
- HvLpIndex lp_index;
- u8 result_code;
- u32 reserved;
- union {
- u64 state; /* GetStateOut */
- u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
- u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
- u64 page[4]; /* GetSrcHistoryIn */
- u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
- SetAutoIplWhenPrimaryIplsIn,
- WhiteButtonPowerOffIn,
- Function08FastPowerOffIn,
- IsSpcnRackPowerIncompleteOut */
- struct {
- u64 token;
- u64 address_type;
- u64 side;
- u32 length;
- u32 offset;
- } kern; /* SetKernelImageIn, GetKernelImageIn,
- SetKernelCmdLineIn, GetKernelCmdLineIn */
- u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
- u8 reserved[80];
- } sub_data;
-};
-
-struct vsp_rsp_data {
- struct completion com;
- struct vsp_cmd_data *response;
-};
-
-struct alloc_data {
- u16 size;
- u16 type;
- u32 count;
- u16 reserved1;
- u8 reserved2;
- HvLpIndex target_lp;
-};
-
-struct ce_msg_data;
-
-typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
-
-struct ce_msg_comp_data {
- ce_msg_comp_hdlr handler;
- void *token;
-};
-
-struct ce_msg_data {
- u8 ce_msg[12];
- char reserved[4];
- struct ce_msg_comp_data *completion;
-};
-
-struct io_mf_lp_event {
- struct HvLpEvent hp_lp_event;
- u16 subtype_result_code;
- u16 reserved1;
- u32 reserved2;
- union {
- struct alloc_data alloc;
- struct ce_msg_data ce_msg;
- struct vsp_cmd_data vsp_cmd;
- } data;
-};
-
-#define subtype_data(a, b, c, d) \
- (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
-
-/*
- * All outgoing event traffic is kept on a FIFO queue. The first
- * pointer points to the one that is outstanding, and all new
- * requests get stuck on the end. Also, we keep a certain number of
- * preallocated pending events so that we can operate very early in
- * the boot up sequence (before kmalloc is ready).
- */
-struct pending_event {
- struct pending_event *next;
- struct io_mf_lp_event event;
- MFCompleteHandler hdlr;
- char dma_data[72];
- unsigned dma_data_length;
- unsigned remote_address;
-};
-static spinlock_t pending_event_spinlock;
-static struct pending_event *pending_event_head;
-static struct pending_event *pending_event_tail;
-static struct pending_event *pending_event_avail;
-#define PENDING_EVENT_PREALLOC_LEN 16
-static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN];
-
-/*
- * Put a pending event onto the available queue, so it can get reused.
- * Attention! You must have the pending_event_spinlock before calling!
- */
-static void free_pending_event(struct pending_event *ev)
-{
- if (ev != NULL) {
- ev->next = pending_event_avail;
- pending_event_avail = ev;
- }
-}
-
-/*
- * Enqueue the outbound event onto the stack. If the queue was
- * empty to begin with, we must also issue it via the Hypervisor
- * interface. There is a section of code below that will touch
- * the first stack pointer without the protection of the pending_event_spinlock.
- * This is OK, because we know that nobody else will be modifying
- * the first pointer when we do this.
- */
-static int signal_event(struct pending_event *ev)
-{
- int rc = 0;
- unsigned long flags;
- int go = 1;
- struct pending_event *ev1;
- HvLpEvent_Rc hv_rc;
-
- /* enqueue the event */
- if (ev != NULL) {
- ev->next = NULL;
- spin_lock_irqsave(&pending_event_spinlock, flags);
- if (pending_event_head == NULL)
- pending_event_head = ev;
- else {
- go = 0;
- pending_event_tail->next = ev;
- }
- pending_event_tail = ev;
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
- }
-
- /* send the event */
- while (go) {
- go = 0;
-
- /* any DMA data to send beforehand? */
- if (pending_event_head->dma_data_length > 0)
- HvCallEvent_dmaToSp(pending_event_head->dma_data,
- pending_event_head->remote_address,
- pending_event_head->dma_data_length,
- HvLpDma_Direction_LocalToRemote);
-
- hv_rc = HvCallEvent_signalLpEvent(
- &pending_event_head->event.hp_lp_event);
- if (hv_rc != HvLpEvent_Rc_Good) {
- printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
- "failed with %d\n", (int)hv_rc);
-
- spin_lock_irqsave(&pending_event_spinlock, flags);
- ev1 = pending_event_head;
- pending_event_head = pending_event_head->next;
- if (pending_event_head != NULL)
- go = 1;
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
-
- if (ev1 == ev)
- rc = -EIO;
- else if (ev1->hdlr != NULL)
- (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
-
- spin_lock_irqsave(&pending_event_spinlock, flags);
- free_pending_event(ev1);
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
- }
- }
-
- return rc;
-}
-
-/*
- * Allocate a new pending_event structure, and initialize it.
- */
-static struct pending_event *new_pending_event(void)
-{
- struct pending_event *ev = NULL;
- HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
- unsigned long flags;
- struct HvLpEvent *hev;
-
- spin_lock_irqsave(&pending_event_spinlock, flags);
- if (pending_event_avail != NULL) {
- ev = pending_event_avail;
- pending_event_avail = pending_event_avail->next;
- }
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
- if (ev == NULL) {
- ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
- if (ev == NULL) {
- printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
- sizeof(struct pending_event));
- return NULL;
- }
- }
- memset(ev, 0, sizeof(struct pending_event));
- hev = &ev->event.hp_lp_event;
- hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
- hev->xType = HvLpEvent_Type_MachineFac;
- hev->xSourceLp = HvLpConfig_getLpIndex();
- hev->xTargetLp = primary_lp;
- hev->xSizeMinus1 = sizeof(ev->event) - 1;
- hev->xRc = HvLpEvent_Rc_Good;
- hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
- HvLpEvent_Type_MachineFac);
- hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
- HvLpEvent_Type_MachineFac);
-
- return ev;
-}
-
-static int __maybe_unused
-signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
-{
- struct pending_event *ev = new_pending_event();
- int rc;
- struct vsp_rsp_data response;
-
- if (ev == NULL)
- return -ENOMEM;
-
- init_completion(&response.com);
- response.response = vsp_cmd;
- ev->event.hp_lp_event.xSubtype = 6;
- ev->event.hp_lp_event.x.xSubtypeData =
- subtype_data('M', 'F', 'V', 'I');
- ev->event.data.vsp_cmd.token = (u64)&response;
- ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
- ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
- ev->event.data.vsp_cmd.result_code = 0xFF;
- ev->event.data.vsp_cmd.reserved = 0;
- memcpy(&(ev->event.data.vsp_cmd.sub_data),
- &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
- mb();
-
- rc = signal_event(ev);
- if (rc == 0)
- wait_for_completion(&response.com);
- return rc;
-}
-
-
-/*
- * Send a 12-byte CE message to the primary partition VSP object
- */
-static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
-{
- struct pending_event *ev = new_pending_event();
-
- if (ev == NULL)
- return -ENOMEM;
-
- ev->event.hp_lp_event.xSubtype = 0;
- ev->event.hp_lp_event.x.xSubtypeData =
- subtype_data('M', 'F', 'C', 'E');
- memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
- ev->event.data.ce_msg.completion = completion;
- return signal_event(ev);
-}
-
-/*
- * Send a 12-byte CE message (with no data) to the primary partition VSP object
- */
-static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
-{
- u8 ce_msg[12];
-
- memset(ce_msg, 0, sizeof(ce_msg));
- ce_msg[3] = ce_op;
- return signal_ce_msg(ce_msg, completion);
-}
-
-/*
- * Send a 12-byte CE message and DMA data to the primary partition VSP object
- */
-static int dma_and_signal_ce_msg(char *ce_msg,
- struct ce_msg_comp_data *completion, void *dma_data,
- unsigned dma_data_length, unsigned remote_address)
-{
- struct pending_event *ev = new_pending_event();
-
- if (ev == NULL)
- return -ENOMEM;
-
- ev->event.hp_lp_event.xSubtype = 0;
- ev->event.hp_lp_event.x.xSubtypeData =
- subtype_data('M', 'F', 'C', 'E');
- memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
- ev->event.data.ce_msg.completion = completion;
- memcpy(ev->dma_data, dma_data, dma_data_length);
- ev->dma_data_length = dma_data_length;
- ev->remote_address = remote_address;
- return signal_event(ev);
-}
-
-/*
- * Initiate a nice (hopefully) shutdown of Linux. We simply are
- * going to try and send the init process a SIGINT signal. If
- * this fails (why?), we'll simply force it off in a not-so-nice
- * manner.
- */
-static int shutdown(void)
-{
- int rc = kill_cad_pid(SIGINT, 1);
-
- if (rc) {
- printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
- "hard shutdown commencing\n", rc);
- mf_power_off();
- } else
- printk(KERN_INFO "mf.c: init has been successfully notified "
- "to proceed with shutdown\n");
- return rc;
-}
-
-/*
- * The primary partition VSP object is sending us a new
- * event flow. Handle it...
- */
-static void handle_int(struct io_mf_lp_event *event)
-{
- struct ce_msg_data *ce_msg_data;
- struct ce_msg_data *pce_msg_data;
- unsigned long flags;
- struct pending_event *pev;
-
- /* ack the interrupt */
- event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
- HvCallEvent_ackLpEvent(&event->hp_lp_event);
-
- /* process interrupt */
- switch (event->hp_lp_event.xSubtype) {
- case 0: /* CE message */
- ce_msg_data = &event->data.ce_msg;
- switch (ce_msg_data->ce_msg[3]) {
- case 0x5B: /* power control notification */
- if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
- printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
- if (shutdown() == 0)
- signal_ce_msg_simple(0xDB, NULL);
- }
- break;
- case 0xC0: /* get time */
- spin_lock_irqsave(&pending_event_spinlock, flags);
- pev = pending_event_head;
- if (pev != NULL)
- pending_event_head = pending_event_head->next;
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
- if (pev == NULL)
- break;
- pce_msg_data = &pev->event.data.ce_msg;
- if (pce_msg_data->ce_msg[3] != 0x40)
- break;
- if (pce_msg_data->completion != NULL) {
- ce_msg_comp_hdlr handler =
- pce_msg_data->completion->handler;
- void *token = pce_msg_data->completion->token;
-
- if (handler != NULL)
- (*handler)(token, ce_msg_data);
- }
- spin_lock_irqsave(&pending_event_spinlock, flags);
- free_pending_event(pev);
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
- /* send next waiting event */
- if (pending_event_head != NULL)
- signal_event(NULL);
- break;
- }
- break;
- case 1: /* IT sys shutdown */
- printk(KERN_INFO "mf.c: Commencing system shutdown\n");
- shutdown();
- break;
- }
-}
-
-/*
- * The primary partition VSP object is acknowledging the receipt
- * of a flow we sent to them. If there are other flows queued
- * up, we must send another one now...
- */
-static void handle_ack(struct io_mf_lp_event *event)
-{
- unsigned long flags;
- struct pending_event *two = NULL;
- unsigned long free_it = 0;
- struct ce_msg_data *ce_msg_data;
- struct ce_msg_data *pce_msg_data;
- struct vsp_rsp_data *rsp;
-
- /* handle current event */
- if (pending_event_head == NULL) {
- printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
- return;
- }
-
- switch (event->hp_lp_event.xSubtype) {
- case 0: /* CE msg */
- ce_msg_data = &event->data.ce_msg;
- if (ce_msg_data->ce_msg[3] != 0x40) {
- free_it = 1;
- break;
- }
- if (ce_msg_data->ce_msg[2] == 0)
- break;
- free_it = 1;
- pce_msg_data = &pending_event_head->event.data.ce_msg;
- if (pce_msg_data->completion != NULL) {
- ce_msg_comp_hdlr handler =
- pce_msg_data->completion->handler;
- void *token = pce_msg_data->completion->token;
-
- if (handler != NULL)
- (*handler)(token, ce_msg_data);
- }
- break;
- case 4: /* allocate */
- case 5: /* deallocate */
- if (pending_event_head->hdlr != NULL)
- (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
- free_it = 1;
- break;
- case 6:
- free_it = 1;
- rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
- if (rsp == NULL) {
- printk(KERN_ERR "mf.c: no rsp\n");
- break;
- }
- if (rsp->response != NULL)
- memcpy(rsp->response, &event->data.vsp_cmd,
- sizeof(event->data.vsp_cmd));
- complete(&rsp->com);
- break;
- }
-
- /* remove from queue */
- spin_lock_irqsave(&pending_event_spinlock, flags);
- if ((pending_event_head != NULL) && (free_it == 1)) {
- struct pending_event *oldHead = pending_event_head;
-
- pending_event_head = pending_event_head->next;
- two = pending_event_head;
- free_pending_event(oldHead);
- }
- spin_unlock_irqrestore(&pending_event_spinlock, flags);
-
- /* send next waiting event */
- if (two != NULL)
- signal_event(NULL);
-}
-
-/*
- * This is the generic event handler we are registering with
- * the Hypervisor. Ensure the flows are for us, and then
- * parse it enough to know if it is an interrupt or an
- * acknowledge.
- */
-static void hv_handler(struct HvLpEvent *event)
-{
- if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
- if (hvlpevent_is_ack(event))
- handle_ack((struct io_mf_lp_event *)event);
- else
- handle_int((struct io_mf_lp_event *)event);
- } else
- printk(KERN_ERR "mf.c: alien event received\n");
-}
-
-/*
- * Global kernel interface to allocate and seed events into the
- * Hypervisor.
- */
-void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
- unsigned size, unsigned count, MFCompleteHandler hdlr,
- void *user_token)
-{
- struct pending_event *ev = new_pending_event();
- int rc;
-
- if (ev == NULL) {
- rc = -ENOMEM;
- } else {
- ev->event.hp_lp_event.xSubtype = 4;
- ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
- ev->event.hp_lp_event.x.xSubtypeData =
- subtype_data('M', 'F', 'M', 'A');
- ev->event.data.alloc.target_lp = target_lp;
- ev->event.data.alloc.type = type;
- ev->event.data.alloc.size = size;
- ev->event.data.alloc.count = count;
- ev->hdlr = hdlr;
- rc = signal_event(ev);
- }
- if ((rc != 0) && (hdlr != NULL))
- (*hdlr)(user_token, rc);
-}
-EXPORT_SYMBOL(mf_allocate_lp_events);
-
-/*
- * Global kernel interface to unseed and deallocate events already in
- * Hypervisor.
- */
-void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
- unsigned count, MFCompleteHandler hdlr, void *user_token)
-{
- struct pending_event *ev = new_pending_event();
- int rc;
-
- if (ev == NULL)
- rc = -ENOMEM;
- else {
- ev->event.hp_lp_event.xSubtype = 5;
- ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
- ev->event.hp_lp_event.x.xSubtypeData =
- subtype_data('M', 'F', 'M', 'D');
- ev->event.data.alloc.target_lp = target_lp;
- ev->event.data.alloc.type = type;
- ev->event.data.alloc.count = count;
- ev->hdlr = hdlr;
- rc = signal_event(ev);
- }
- if ((rc != 0) && (hdlr != NULL))
- (*hdlr)(user_token, rc);
-}
-EXPORT_SYMBOL(mf_deallocate_lp_events);
-
-/*
- * Global kernel interface to tell the VSP object in the primary
- * partition to power this partition off.
- */
-void mf_power_off(void)
-{
- printk(KERN_INFO "mf.c: Down it goes...\n");
- signal_ce_msg_simple(0x4d, NULL);
- for (;;)
- ;
-}
-
-/*
- * Global kernel interface to tell the VSP object in the primary
- * partition to reboot this partition.
- */
-void mf_reboot(char *cmd)
-{
- printk(KERN_INFO "mf.c: Preparing to bounce...\n");
- signal_ce_msg_simple(0x4e, NULL);
- for (;;)
- ;
-}
-
-/*
- * Display a single word SRC onto the VSP control panel.
- */
-void mf_display_src(u32 word)
-{
- u8 ce[12];
-
- memset(ce, 0, sizeof(ce));
- ce[3] = 0x4a;
- ce[7] = 0x01;
- ce[8] = word >> 24;
- ce[9] = word >> 16;
- ce[10] = word >> 8;
- ce[11] = word;
- signal_ce_msg(ce, NULL);
-}
-
-/*
- * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
- */
-static __init void mf_display_progress_src(u16 value)
-{
- u8 ce[12];
- u8 src[72];
-
- memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
- memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00PROGxxxx ",
- 72);
- src[6] = value >> 8;
- src[7] = value & 255;
- src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
- src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
- src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
- src[47] = "0123456789ABCDEF"[value & 15];
- dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
-}
-
-/*
- * Clear the VSP control panel. Used to "erase" an SRC that was
- * previously displayed.
- */
-static void mf_clear_src(void)
-{
- signal_ce_msg_simple(0x4b, NULL);
-}
-
-void __init mf_display_progress(u16 value)
-{
- if (!mf_initialized)
- return;
-
- if (0xFFFF == value)
- mf_clear_src();
- else
- mf_display_progress_src(value);
-}
-
-/*
- * Initialization code here.
- */
-void __init mf_init(void)
-{
- int i;
-
- spin_lock_init(&pending_event_spinlock);
-
- for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++)
- free_pending_event(&pending_event_prealloc[i]);
-
- HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
-
- /* virtual continue ack */
- signal_ce_msg_simple(0x57, NULL);
-
- mf_initialized = 1;
- mb();
-
- printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
- "initialized\n");
-}
-
-struct rtc_time_data {
- struct completion com;
- struct ce_msg_data ce_msg;
- int rc;
-};
-
-static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
-{
- struct rtc_time_data *rtc = token;
-
- memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
- rtc->rc = 0;
- complete(&rtc->com);
-}
-
-static int mf_set_rtc(struct rtc_time *tm)
-{
- char ce_time[12];
- u8 day, mon, hour, min, sec, y1, y2;
- unsigned year;
-
- year = 1900 + tm->tm_year;
- y1 = year / 100;
- y2 = year % 100;
-
- sec = tm->tm_sec;
- min = tm->tm_min;
- hour = tm->tm_hour;
- day = tm->tm_mday;
- mon = tm->tm_mon + 1;
-
- sec = bin2bcd(sec);
- min = bin2bcd(min);
- hour = bin2bcd(hour);
- mon = bin2bcd(mon);
- day = bin2bcd(day);
- y1 = bin2bcd(y1);
- y2 = bin2bcd(y2);
-
- memset(ce_time, 0, sizeof(ce_time));
- ce_time[3] = 0x41;
- ce_time[4] = y1;
- ce_time[5] = y2;
- ce_time[6] = sec;
- ce_time[7] = min;
- ce_time[8] = hour;
- ce_time[10] = day;
- ce_time[11] = mon;
-
- return signal_ce_msg(ce_time, NULL);
-}
-
-static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
-{
- tm->tm_wday = 0;
- tm->tm_yday = 0;
- tm->tm_isdst = 0;
- if (rc) {
- tm->tm_sec = 0;
- tm->tm_min = 0;
- tm->tm_hour = 0;
- tm->tm_mday = 15;
- tm->tm_mon = 5;
- tm->tm_year = 52;
- return rc;
- }
-
- if ((ce_msg[2] == 0xa9) ||
- (ce_msg[2] == 0xaf)) {
- /* TOD clock is not set */
- tm->tm_sec = 1;
- tm->tm_min = 1;
- tm->tm_hour = 1;
- tm->tm_mday = 10;
- tm->tm_mon = 8;
- tm->tm_year = 71;
- mf_set_rtc(tm);
- }
- {
- u8 year = ce_msg[5];
- u8 sec = ce_msg[6];
- u8 min = ce_msg[7];
- u8 hour = ce_msg[8];
- u8 day = ce_msg[10];
- u8 mon = ce_msg[11];
-
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
-
- if (year <= 69)
- year += 100;
-
- tm->tm_sec = sec;
- tm->tm_min = min;
- tm->tm_hour = hour;
- tm->tm_mday = day;
- tm->tm_mon = mon;
- tm->tm_year = year;
- }
-
- return 0;
-}
-
-static int mf_get_rtc(struct rtc_time *tm)
-{
- struct ce_msg_comp_data ce_complete;
- struct rtc_time_data rtc_data;
- int rc;
-
- memset(&ce_complete, 0, sizeof(ce_complete));
- memset(&rtc_data, 0, sizeof(rtc_data));
- init_completion(&rtc_data.com);
- ce_complete.handler = &get_rtc_time_complete;
- ce_complete.token = &rtc_data;
- rc = signal_ce_msg_simple(0x40, &ce_complete);
- if (rc)
- return rc;
- wait_for_completion(&rtc_data.com);
- return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
-}
-
-struct boot_rtc_time_data {
- int busy;
- struct ce_msg_data ce_msg;
- int rc;
-};
-
-static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
-{
- struct boot_rtc_time_data *rtc = token;
-
- memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
- rtc->rc = 0;
- rtc->busy = 0;
-}
-
-static int mf_get_boot_rtc(struct rtc_time *tm)
-{
- struct ce_msg_comp_data ce_complete;
- struct boot_rtc_time_data rtc_data;
- int rc;
-
- memset(&ce_complete, 0, sizeof(ce_complete));
- memset(&rtc_data, 0, sizeof(rtc_data));
- rtc_data.busy = 1;
- ce_complete.handler = &get_boot_rtc_time_complete;
- ce_complete.token = &rtc_data;
- rc = signal_ce_msg_simple(0x40, &ce_complete);
- if (rc)
- return rc;
- /* We need to poll here as we are not yet taking interrupts */
- while (rtc_data.busy) {
- if (hvlpevent_is_pending())
- process_hvlpevents();
- }
- return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
-}
-
-#ifdef CONFIG_PROC_FS
-static int mf_cmdline_proc_show(struct seq_file *m, void *v)
-{
- char *page, *p;
- struct vsp_cmd_data vsp_cmd;
- int rc;
- dma_addr_t dma_addr;
-
- /* The HV appears to return no more than 256 bytes of command line */
- page = kmalloc(256, GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE);
- if (dma_addr == DMA_ERROR_CODE) {
- kfree(page);
- return -ENOMEM;
- }
- memset(page, 0, 256);
- memset(&vsp_cmd, 0, sizeof(vsp_cmd));
- vsp_cmd.cmd = 33;
- vsp_cmd.sub_data.kern.token = dma_addr;
- vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
- vsp_cmd.sub_data.kern.side = (u64)m->private;
- vsp_cmd.sub_data.kern.length = 256;
- mb();
- rc = signal_vsp_instruction(&vsp_cmd);
- iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE);
- if (rc) {
- kfree(page);
- return rc;
- }
- if (vsp_cmd.result_code != 0) {
- kfree(page);
- return -ENOMEM;
- }
- p = page;
- while (p - page < 256) {
- if (*p == '\0' || *p == '\n') {
- *p = '\n';
- break;
- }
- p++;
-
- }
- seq_write(m, page, p - page);
- kfree(page);
- return 0;
-}
-
-static int mf_cmdline_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mf_cmdline_proc_show, PDE(inode)->data);
-}
-
-#if 0
-static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
-{
- struct vsp_cmd_data vsp_cmd;
- int rc;
- int len = *size;
- dma_addr_t dma_addr;
-
- dma_addr = iseries_hv_map(buffer, len, DMA_FROM_DEVICE);
- memset(buffer, 0, len);
- memset(&vsp_cmd, 0, sizeof(vsp_cmd));
- vsp_cmd.cmd = 32;
- vsp_cmd.sub_data.kern.token = dma_addr;
- vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
- vsp_cmd.sub_data.kern.side = side;
- vsp_cmd.sub_data.kern.offset = offset;
- vsp_cmd.sub_data.kern.length = len;
- mb();
- rc = signal_vsp_instruction(&vsp_cmd);
- if (rc == 0) {
- if (vsp_cmd.result_code == 0)
- *size = vsp_cmd.sub_data.length_out;
- else
- rc = -ENOMEM;
- }
-
- iseries_hv_unmap(dma_addr, len, DMA_FROM_DEVICE);
-
- return rc;
-}
-
-static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int sizeToGet = count;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
- if (sizeToGet != 0) {
- *start = page + off;
- return sizeToGet;
- }
- *eof = 1;
- return 0;
- }
- *eof = 1;
- return 0;
-}
-#endif
-
-static int mf_side_proc_show(struct seq_file *m, void *v)
-{
- char mf_current_side = ' ';
- struct vsp_cmd_data vsp_cmd;
-
- memset(&vsp_cmd, 0, sizeof(vsp_cmd));
- vsp_cmd.cmd = 2;
- vsp_cmd.sub_data.ipl_type = 0;
- mb();
-
- if (signal_vsp_instruction(&vsp_cmd) == 0) {
- if (vsp_cmd.result_code == 0) {
- switch (vsp_cmd.sub_data.ipl_type) {
- case 0: mf_current_side = 'A';
- break;
- case 1: mf_current_side = 'B';
- break;
- case 2: mf_current_side = 'C';
- break;
- default: mf_current_side = 'D';
- break;
- }
- }
- }
-
- seq_printf(m, "%c\n", mf_current_side);
- return 0;
-}
-
-static int mf_side_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mf_side_proc_show, NULL);
-}
-
-static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- char side;
- u64 newSide;
- struct vsp_cmd_data vsp_cmd;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (count == 0)
- return 0;
-
- if (get_user(side, buffer))
- return -EFAULT;
-
- switch (side) {
- case 'A': newSide = 0;
- break;
- case 'B': newSide = 1;
- break;
- case 'C': newSide = 2;
- break;
- case 'D': newSide = 3;
- break;
- default:
- printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
- return -EINVAL;
- }
-
- memset(&vsp_cmd, 0, sizeof(vsp_cmd));
- vsp_cmd.sub_data.ipl_type = newSide;
- vsp_cmd.cmd = 10;
-
- (void)signal_vsp_instruction(&vsp_cmd);
-
- return count;
-}
-
-static const struct file_operations mf_side_proc_fops = {
- .owner = THIS_MODULE,
- .open = mf_side_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = mf_side_proc_write,
-};
-
-static int mf_src_proc_show(struct seq_file *m, void *v)
-{
- return 0;
-}
-
-static int mf_src_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mf_src_proc_show, NULL);
-}
-
-static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- char stkbuf[10];
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if ((count < 4) && (count != 1)) {
- printk(KERN_ERR "mf_proc: invalid src\n");
- return -EINVAL;
- }
-
- if (count > (sizeof(stkbuf) - 1))
- count = sizeof(stkbuf) - 1;
- if (copy_from_user(stkbuf, buffer, count))
- return -EFAULT;
-
- if ((count == 1) && (*stkbuf == '\0'))
- mf_clear_src();
- else
- mf_display_src(*(u32 *)stkbuf);
-
- return count;
-}
-
-static const struct file_operations mf_src_proc_fops = {
- .owner = THIS_MODULE,
- .open = mf_src_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = mf_src_proc_write,
-};
-
-static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- void *data = PDE(file->f_path.dentry->d_inode)->data;
- struct vsp_cmd_data vsp_cmd;
- dma_addr_t dma_addr;
- char *page;
- int ret = -EACCES;
-
- if (!capable(CAP_SYS_ADMIN))
- goto out;
-
- dma_addr = 0;
- page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
- ret = -ENOMEM;
- if (page == NULL)
- goto out;
-
- ret = -EFAULT;
- if (copy_from_user(page, buffer, count))
- goto out_free;
-
- memset(&vsp_cmd, 0, sizeof(vsp_cmd));
- vsp_cmd.cmd = 31;
- vsp_cmd.sub_data.kern.token = dma_addr;
- vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
- vsp_cmd.sub_data.kern.side = (u64)data;
- vsp_cmd.sub_data.kern.length = count;
- mb();
- (void)signal_vsp_instruction(&vsp_cmd);
- ret = count;
-
-out_free:
- iseries_hv_free(count, page, dma_addr);
-out:
- return ret;
-}
-
-static const struct file_operations mf_cmdline_proc_fops = {
- .owner = THIS_MODULE,
- .open = mf_cmdline_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = mf_cmdline_proc_write,
-};
-
-static ssize_t proc_mf_change_vmlinux(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
- ssize_t rc;
- dma_addr_t dma_addr;
- char *page;
- struct vsp_cmd_data vsp_cmd;
-
- rc = -EACCES;
- if (!capable(CAP_SYS_ADMIN))
- goto out;
-
- dma_addr = 0;
- page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
- rc = -ENOMEM;
- if (page == NULL) {
- printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
- goto out;
- }
- rc = -EFAULT;
- if (copy_from_user(page, buf, count))
- goto out_free;
-
- memset(&vsp_cmd, 0, sizeof(vsp_cmd));
- vsp_cmd.cmd = 30;
- vsp_cmd.sub_data.kern.token = dma_addr;
- vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
- vsp_cmd.sub_data.kern.side = (u64)dp->data;
- vsp_cmd.sub_data.kern.offset = *ppos;
- vsp_cmd.sub_data.kern.length = count;
- mb();
- rc = signal_vsp_instruction(&vsp_cmd);
- if (rc)
- goto out_free;
- rc = -ENOMEM;
- if (vsp_cmd.result_code != 0)
- goto out_free;
-
- *ppos += count;
- rc = count;
-out_free:
- iseries_hv_free(count, page, dma_addr);
-out:
- return rc;
-}
-
-static const struct file_operations proc_vmlinux_operations = {
- .write = proc_mf_change_vmlinux,
- .llseek = default_llseek,
-};
-
-static int __init mf_proc_init(void)
-{
- struct proc_dir_entry *mf_proc_root;
- struct proc_dir_entry *ent;
- struct proc_dir_entry *mf;
- char name[2];
- int i;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
-
- mf_proc_root = proc_mkdir("iSeries/mf", NULL);
- if (!mf_proc_root)
- return 1;
-
- name[1] = '\0';
- for (i = 0; i < 4; i++) {
- name[0] = 'A' + i;
- mf = proc_mkdir(name, mf_proc_root);
- if (!mf)
- return 1;
-
- ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf,
- &mf_cmdline_proc_fops, (void *)(long)i);
- if (!ent)
- return 1;
-
- if (i == 3) /* no vmlinux entry for 'D' */
- continue;
-
- ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
- &proc_vmlinux_operations,
- (void *)(long)i);
- if (!ent)
- return 1;
- }
-
- ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
- &mf_side_proc_fops);
- if (!ent)
- return 1;
-
- ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
- &mf_src_proc_fops);
- if (!ent)
- return 1;
-
- return 0;
-}
-
-__initcall(mf_proc_init);
-
-#endif /* CONFIG_PROC_FS */
-
-/*
- * Get the RTC from the virtual service processor
- * This requires flowing LpEvents to the primary partition
- */
-void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
-{
- mf_get_rtc(rtc_tm);
- rtc_tm->tm_mon--;
-}
-
-/*
- * Set the RTC in the virtual service processor
- * This requires flowing LpEvents to the primary partition
- */
-int iSeries_set_rtc_time(struct rtc_time *tm)
-{
- mf_set_rtc(tm);
- return 0;
-}
-
-unsigned long iSeries_get_boot_time(void)
-{
- struct rtc_time tm;
-
- mf_get_boot_rtc(&tm);
- return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
-}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
deleted file mode 100644
index 2c6ff0fdac98..000000000000
--- a/arch/powerpc/platforms/iseries/misc.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This file contains miscellaneous low-level functions.
- * Copyright (C) 1995-2005 IBM Corp
- *
- * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
- * and Paul Mackerras.
- * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
- * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/processor.h>
-#include <asm/asm-offsets.h>
-#include <asm/ppc_asm.h>
-
- .text
-
-/* Handle pending interrupts in interrupt context */
-_GLOBAL(iseries_handle_interrupts)
- li r0,0x5555
- sc
- blr
diff --git a/arch/powerpc/platforms/iseries/naca.h b/arch/powerpc/platforms/iseries/naca.h
deleted file mode 100644
index f01708e12862..000000000000
--- a/arch/powerpc/platforms/iseries/naca.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _PLATFORMS_ISERIES_NACA_H
-#define _PLATFORMS_ISERIES_NACA_H
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/types.h>
-
-struct naca_struct {
- /* Kernel only data - undefined for user space */
- const void *xItVpdAreas; /* VPD Data 0x00 */
- void *xRamDisk; /* iSeries ramdisk 0x08 */
- u64 xRamDiskSize; /* In pages 0x10 */
-};
-
-extern struct naca_struct naca;
-
-#endif /* _PLATFORMS_ISERIES_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
deleted file mode 100644
index c75412884625..000000000000
--- a/arch/powerpc/platforms/iseries/pci.c
+++ /dev/null
@@ -1,919 +0,0 @@
-/*
- * Copyright (C) 2001 Allan Trautman, IBM Corporation
- * Copyright (C) 2005,2007 Stephen Rothwell, IBM Corp
- *
- * iSeries specific routines for PCI.
- *
- * Based on code from pci.c and iSeries_pci.c 32bit
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#undef DEBUG
-
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/of.h>
-#include <linux/ratelimit.h>
-
-#include <asm/types.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/iommu.h>
-#include <asm/abs_addr.h>
-#include <asm/firmware.h>
-
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_call_xm.h>
-#include <asm/iseries/mf.h>
-#include <asm/iseries/iommu.h>
-
-#include <asm/ppc-pci.h>
-
-#include "irq.h"
-#include "pci.h"
-#include "call_pci.h"
-
-#define PCI_RETRY_MAX 3
-static int limit_pci_retries = 1; /* Set Retry Error on. */
-
-/*
- * Table defines
- * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
- */
-#define IOMM_TABLE_MAX_ENTRIES 1024
-#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
-#define BASE_IO_MEMORY 0xE000000000000000UL
-#define END_IO_MEMORY 0xEFFFFFFFFFFFFFFFUL
-
-static unsigned long max_io_memory = BASE_IO_MEMORY;
-static long current_iomm_table_entry;
-
-/*
- * Lookup Tables.
- */
-static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
-static u64 ds_addr_table[IOMM_TABLE_MAX_ENTRIES];
-
-static DEFINE_SPINLOCK(iomm_table_lock);
-
-/*
- * Generate a Direct Select Address for the Hypervisor
- */
-static inline u64 iseries_ds_addr(struct device_node *node)
-{
- struct pci_dn *pdn = PCI_DN(node);
- const u32 *sbp = of_get_property(node, "linux,subbus", NULL);
-
- return ((u64)pdn->busno << 48) + ((u64)(sbp ? *sbp : 0) << 40)
- + ((u64)0x10 << 32);
-}
-
-/*
- * Size of Bus VPD data
- */
-#define BUS_VPDSIZE 1024
-
-/*
- * Bus Vpd Tags
- */
-#define VPD_END_OF_AREA 0x79
-#define VPD_ID_STRING 0x82
-#define VPD_VENDOR_AREA 0x84
-
-/*
- * Mfg Area Tags
- */
-#define VPD_FRU_FRAME_ID 0x4649 /* "FI" */
-#define VPD_SLOT_MAP_FORMAT 0x4D46 /* "MF" */
-#define VPD_SLOT_MAP 0x534D /* "SM" */
-
-/*
- * Structures of the areas
- */
-struct mfg_vpd_area {
- u16 tag;
- u8 length;
- u8 data1;
- u8 data2;
-};
-#define MFG_ENTRY_SIZE 3
-
-struct slot_map {
- u8 agent;
- u8 secondary_agent;
- u8 phb;
- char card_location[3];
- char parms[8];
- char reserved[2];
-};
-#define SLOT_ENTRY_SIZE 16
-
-/*
- * Parse the Slot Area
- */
-static void __init iseries_parse_slot_area(struct slot_map *map, int len,
- HvAgentId agent, u8 *phb, char card[4])
-{
- /*
- * Parse Slot label until we find the one requested
- */
- while (len > 0) {
- if (map->agent == agent) {
- /*
- * If Phb wasn't found, grab the entry first one found.
- */
- if (*phb == 0xff)
- *phb = map->phb;
- /* Found it, extract the data. */
- if (map->phb == *phb) {
- memcpy(card, &map->card_location, 3);
- card[3] = 0;
- break;
- }
- }
- /* Point to the next Slot */
- map = (struct slot_map *)((char *)map + SLOT_ENTRY_SIZE);
- len -= SLOT_ENTRY_SIZE;
- }
-}
-
-/*
- * Parse the Mfg Area
- */
-static void __init iseries_parse_mfg_area(struct mfg_vpd_area *area, int len,
- HvAgentId agent, u8 *phb, u8 *frame, char card[4])
-{
- u16 slot_map_fmt = 0;
-
- /* Parse Mfg Data */
- while (len > 0) {
- int mfg_tag_len = area->length;
- /* Frame ID (FI 4649020310 ) */
- if (area->tag == VPD_FRU_FRAME_ID)
- *frame = area->data1;
- /* Slot Map Format (MF 4D46020004 ) */
- else if (area->tag == VPD_SLOT_MAP_FORMAT)
- slot_map_fmt = (area->data1 * 256)
- + area->data2;
- /* Slot Map (SM 534D90 */
- else if (area->tag == VPD_SLOT_MAP) {
- struct slot_map *slot_map;
-
- if (slot_map_fmt == 0x1004)
- slot_map = (struct slot_map *)((char *)area
- + MFG_ENTRY_SIZE + 1);
- else
- slot_map = (struct slot_map *)((char *)area
- + MFG_ENTRY_SIZE);
- iseries_parse_slot_area(slot_map, mfg_tag_len,
- agent, phb, card);
- }
- /*
- * Point to the next Mfg Area
- * Use defined size, sizeof give wrong answer
- */
- area = (struct mfg_vpd_area *)((char *)area + mfg_tag_len
- + MFG_ENTRY_SIZE);
- len -= (mfg_tag_len + MFG_ENTRY_SIZE);
- }
-}
-
-/*
- * Look for "BUS".. Data is not Null terminated.
- * PHBID of 0xFF indicates PHB was not found in VPD Data.
- */
-static u8 __init iseries_parse_phbid(u8 *area, int len)
-{
- while (len > 0) {
- if ((*area == 'B') && (*(area + 1) == 'U')
- && (*(area + 2) == 'S')) {
- area += 3;
- while (*area == ' ')
- area++;
- return *area & 0x0F;
- }
- area++;
- len--;
- }
- return 0xff;
-}
-
-/*
- * Parse out the VPD Areas
- */
-static void __init iseries_parse_vpd(u8 *data, int data_len,
- HvAgentId agent, u8 *frame, char card[4])
-{
- u8 phb = 0xff;
-
- while (data_len > 0) {
- int len;
- u8 tag = *data;
-
- if (tag == VPD_END_OF_AREA)
- break;
- len = *(data + 1) + (*(data + 2) * 256);
- data += 3;
- data_len -= 3;
- if (tag == VPD_ID_STRING)
- phb = iseries_parse_phbid(data, len);
- else if (tag == VPD_VENDOR_AREA)
- iseries_parse_mfg_area((struct mfg_vpd_area *)data, len,
- agent, &phb, frame, card);
- /* Point to next Area. */
- data += len;
- data_len -= len;
- }
-}
-
-static int __init iseries_get_location_code(u16 bus, HvAgentId agent,
- u8 *frame, char card[4])
-{
- int status = 0;
- int bus_vpd_len = 0;
- u8 *bus_vpd = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
-
- if (bus_vpd == NULL) {
- printk("PCI: Bus VPD Buffer allocation failure.\n");
- return 0;
- }
- bus_vpd_len = HvCallPci_getBusVpd(bus, iseries_hv_addr(bus_vpd),
- BUS_VPDSIZE);
- if (bus_vpd_len == 0) {
- printk("PCI: Bus VPD Buffer zero length.\n");
- goto out_free;
- }
- /* printk("PCI: bus_vpd: %p, %d\n",bus_vpd, bus_vpd_len); */
- /* Make sure this is what I think it is */
- if (*bus_vpd != VPD_ID_STRING) {
- printk("PCI: Bus VPD Buffer missing starting tag.\n");
- goto out_free;
- }
- iseries_parse_vpd(bus_vpd, bus_vpd_len, agent, frame, card);
- status = 1;
-out_free:
- kfree(bus_vpd);
- return status;
-}
-
-/*
- * Prints the device information.
- * - Pass in pci_dev* pointer to the device.
- * - Pass in the device count
- *
- * Format:
- * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet
- * controller
- */
-static void __init iseries_device_information(struct pci_dev *pdev,
- u16 bus, HvSubBusNumber subbus)
-{
- u8 frame = 0;
- char card[4];
- HvAgentId agent;
-
- agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
- ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
-
- if (iseries_get_location_code(bus, agent, &frame, card)) {
- printk(KERN_INFO "PCI: %s, Vendor %04X Frame%3d, "
- "Card %4s 0x%04X\n", pci_name(pdev), pdev->vendor,
- frame, card, (int)(pdev->class >> 8));
- }
-}
-
-/*
- * iomm_table_allocate_entry
- *
- * Adds pci_dev entry in address translation table
- *
- * - Allocates the number of entries required in table base on BAR
- * size.
- * - Allocates starting at BASE_IO_MEMORY and increases.
- * - The size is round up to be a multiple of entry size.
- * - CurrentIndex is incremented to keep track of the last entry.
- * - Builds the resource entry for allocated BARs.
- */
-static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
-{
- struct resource *bar_res = &dev->resource[bar_num];
- long bar_size = pci_resource_len(dev, bar_num);
- struct device_node *dn = pci_device_to_OF_node(dev);
-
- /*
- * No space to allocate, quick exit, skip Allocation.
- */
- if (bar_size == 0)
- return;
- /*
- * Set Resource values.
- */
- spin_lock(&iomm_table_lock);
- bar_res->start = BASE_IO_MEMORY +
- IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
- bar_res->end = bar_res->start + bar_size - 1;
- /*
- * Allocate the number of table entries needed for BAR.
- */
- while (bar_size > 0 ) {
- iomm_table[current_iomm_table_entry] = dn;
- ds_addr_table[current_iomm_table_entry] =
- iseries_ds_addr(dn) | (bar_num << 24);
- bar_size -= IOMM_TABLE_ENTRY_SIZE;
- ++current_iomm_table_entry;
- }
- max_io_memory = BASE_IO_MEMORY +
- IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
- spin_unlock(&iomm_table_lock);
-}
-
-/*
- * allocate_device_bars
- *
- * - Allocates ALL pci_dev BAR's and updates the resources with the
- * BAR value. BARS with zero length will have the resources
- * The HvCallPci_getBarParms is used to get the size of the BAR
- * space. It calls iomm_table_allocate_entry to allocate
- * each entry.
- * - Loops through The Bar resources(0 - 5) including the ROM
- * is resource(6).
- */
-static void __init allocate_device_bars(struct pci_dev *dev)
-{
- int bar_num;
-
- for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
- iomm_table_allocate_entry(dev, bar_num);
-}
-
-/*
- * Log error information to system console.
- * Filter out the device not there errors.
- * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
- * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
- * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
- */
-static void pci_log_error(char *error, int bus, int subbus,
- int agent, int hv_res)
-{
- if (hv_res == 0x0302)
- return;
- printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
- error, bus, subbus, agent, hv_res);
-}
-
-/*
- * Look down the chain to find the matching Device Device
- */
-static struct device_node *find_device_node(int bus, int devfn)
-{
- struct device_node *node;
-
- for (node = NULL; (node = of_find_all_nodes(node)); ) {
- struct pci_dn *pdn = PCI_DN(node);
-
- if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
- return node;
- }
- return NULL;
-}
-
-/*
- * iSeries_pcibios_fixup_resources
- *
- * Fixes up all resources for devices
- */
-void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev)
-{
- const u32 *agent;
- const u32 *sub_bus;
- unsigned char bus = pdev->bus->number;
- struct device_node *node;
- int i;
-
- node = pci_device_to_OF_node(pdev);
- pr_debug("PCI: iSeries %s, pdev %p, node %p\n",
- pci_name(pdev), pdev, node);
- if (!node) {
- printk("PCI: %s disabled, device tree entry not found !\n",
- pci_name(pdev));
- for (i = 0; i <= PCI_ROM_RESOURCE; i++)
- pdev->resource[i].flags = 0;
- return;
- }
- sub_bus = of_get_property(node, "linux,subbus", NULL);
- agent = of_get_property(node, "linux,agent-id", NULL);
- if (agent && sub_bus) {
- u8 irq = iSeries_allocate_IRQ(bus, 0, *sub_bus);
- int err;
-
- err = HvCallXm_connectBusUnit(bus, *sub_bus, *agent, irq);
- if (err)
- pci_log_error("Connect Bus Unit",
- bus, *sub_bus, *agent, err);
- else {
- err = HvCallPci_configStore8(bus, *sub_bus,
- *agent, PCI_INTERRUPT_LINE, irq);
- if (err)
- pci_log_error("PciCfgStore Irq Failed!",
- bus, *sub_bus, *agent, err);
- else
- pdev->irq = irq;
- }
- }
-
- allocate_device_bars(pdev);
- if (likely(sub_bus))
- iseries_device_information(pdev, bus, *sub_bus);
- else
- printk(KERN_ERR "PCI: Device node %s has missing or invalid "
- "linux,subbus property\n", node->full_name);
-}
-
-/*
- * iSeries_pci_final_fixup(void)
- */
-void __init iSeries_pci_final_fixup(void)
-{
- /* Fix up at the device node and pci_dev relationship */
- mf_display_src(0xC9000100);
- iSeries_activate_IRQs();
- mf_display_src(0xC9000200);
-}
-
-/*
- * Config space read and write functions.
- * For now at least, we look for the device node for the bus and devfn
- * that we are asked to access. It may be possible to translate the devfn
- * to a subbus and deviceid more directly.
- */
-static u64 hv_cfg_read_func[4] = {
- HvCallPciConfigLoad8, HvCallPciConfigLoad16,
- HvCallPciConfigLoad32, HvCallPciConfigLoad32
-};
-
-static u64 hv_cfg_write_func[4] = {
- HvCallPciConfigStore8, HvCallPciConfigStore16,
- HvCallPciConfigStore32, HvCallPciConfigStore32
-};
-
-/*
- * Read PCI config space
- */
-static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int size, u32 *val)
-{
- struct device_node *node = find_device_node(bus->number, devfn);
- u64 fn;
- struct HvCallPci_LoadReturn ret;
-
- if (node == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset > 255) {
- *val = ~0;
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- fn = hv_cfg_read_func[(size - 1) & 3];
- HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
-
- if (ret.rc != 0) {
- *val = ~0;
- return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
- }
-
- *val = ret.value;
- return 0;
-}
-
-/*
- * Write PCI config space
- */
-
-static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int size, u32 val)
-{
- struct device_node *node = find_device_node(bus->number, devfn);
- u64 fn;
- u64 ret;
-
- if (node == NULL)
- return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset > 255)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- fn = hv_cfg_write_func[(size - 1) & 3];
- ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
-
- if (ret != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return 0;
-}
-
-static struct pci_ops iSeries_pci_ops = {
- .read = iSeries_pci_read_config,
- .write = iSeries_pci_write_config
-};
-
-/*
- * Check Return Code
- * -> On Failure, print and log information.
- * Increment Retry Count, if exceeds max, panic partition.
- *
- * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
- * PCI: Device 23.90 ReadL Retry( 1)
- * PCI: Device 23.90 ReadL Retry Successful(1)
- */
-static int check_return_code(char *type, struct device_node *dn,
- int *retry, u64 ret)
-{
- if (ret != 0) {
- struct pci_dn *pdn = PCI_DN(dn);
-
- (*retry)++;
- printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
- type, pdn->busno, pdn->devfn,
- *retry, (int)ret);
- /*
- * Bump the retry and check for retry count exceeded.
- * If, Exceeded, panic the system.
- */
- if (((*retry) > PCI_RETRY_MAX) &&
- (limit_pci_retries > 0)) {
- mf_display_src(0xB6000103);
- panic_timeout = 0;
- panic("PCI: Hardware I/O Error, SRC B6000103, "
- "Automatic Reboot Disabled.\n");
- }
- return -1; /* Retry Try */
- }
- return 0;
-}
-
-/*
- * Translate the I/O Address into a device node, bar, and bar offset.
- * Note: Make sure the passed variable end up on the stack to avoid
- * the exposure of being device global.
- */
-static inline struct device_node *xlate_iomm_address(
- const volatile void __iomem *addr,
- u64 *dsaptr, u64 *bar_offset, const char *func)
-{
- unsigned long orig_addr;
- unsigned long base_addr;
- unsigned long ind;
- struct device_node *dn;
-
- orig_addr = (unsigned long __force)addr;
- if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
- static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
-
- if (__ratelimit(&ratelimit))
- printk(KERN_ERR
- "iSeries_%s: invalid access at IO address %p\n",
- func, addr);
- return NULL;
- }
- base_addr = orig_addr - BASE_IO_MEMORY;
- ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
- dn = iomm_table[ind];
-
- if (dn != NULL) {
- *dsaptr = ds_addr_table[ind];
- *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
- } else
- panic("PCI: Invalid PCI IO address detected!\n");
- return dn;
-}
-
-/*
- * Read MM I/O Instructions for the iSeries
- * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
- * else, data is returned in Big Endian format.
- */
-static u8 iseries_readb(const volatile void __iomem *addr)
-{
- u64 bar_offset;
- u64 dsa;
- int retry = 0;
- struct HvCallPci_LoadReturn ret;
- struct device_node *dn =
- xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte");
-
- if (dn == NULL)
- return 0xff;
- do {
- HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
- } while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
-
- return ret.value;
-}
-
-static u16 iseries_readw_be(const volatile void __iomem *addr)
-{
- u64 bar_offset;
- u64 dsa;
- int retry = 0;
- struct HvCallPci_LoadReturn ret;
- struct device_node *dn =
- xlate_iomm_address(addr, &dsa, &bar_offset, "read_word");
-
- if (dn == NULL)
- return 0xffff;
- do {
- HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
- bar_offset, 0);
- } while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
-
- return ret.value;
-}
-
-static u32 iseries_readl_be(const volatile void __iomem *addr)
-{
- u64 bar_offset;
- u64 dsa;
- int retry = 0;
- struct HvCallPci_LoadReturn ret;
- struct device_node *dn =
- xlate_iomm_address(addr, &dsa, &bar_offset, "read_long");
-
- if (dn == NULL)
- return 0xffffffff;
- do {
- HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
- bar_offset, 0);
- } while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
-
- return ret.value;
-}
-
-/*
- * Write MM I/O Instructions for the iSeries
- *
- */
-static void iseries_writeb(u8 data, volatile void __iomem *addr)
-{
- u64 bar_offset;
- u64 dsa;
- int retry = 0;
- u64 rc;
- struct device_node *dn =
- xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte");
-
- if (dn == NULL)
- return;
- do {
- rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
- } while (check_return_code("WWB", dn, &retry, rc) != 0);
-}
-
-static void iseries_writew_be(u16 data, volatile void __iomem *addr)
-{
- u64 bar_offset;
- u64 dsa;
- int retry = 0;
- u64 rc;
- struct device_node *dn =
- xlate_iomm_address(addr, &dsa, &bar_offset, "write_word");
-
- if (dn == NULL)
- return;
- do {
- rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
- } while (check_return_code("WWW", dn, &retry, rc) != 0);
-}
-
-static void iseries_writel_be(u32 data, volatile void __iomem *addr)
-{
- u64 bar_offset;
- u64 dsa;
- int retry = 0;
- u64 rc;
- struct device_node *dn =
- xlate_iomm_address(addr, &dsa, &bar_offset, "write_long");
-
- if (dn == NULL)
- return;
- do {
- rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
- } while (check_return_code("WWL", dn, &retry, rc) != 0);
-}
-
-static u16 iseries_readw(const volatile void __iomem *addr)
-{
- return le16_to_cpu(iseries_readw_be(addr));
-}
-
-static u32 iseries_readl(const volatile void __iomem *addr)
-{
- return le32_to_cpu(iseries_readl_be(addr));
-}
-
-static void iseries_writew(u16 data, volatile void __iomem *addr)
-{
- iseries_writew_be(cpu_to_le16(data), addr);
-}
-
-static void iseries_writel(u32 data, volatile void __iomem *addr)
-{
- iseries_writel(cpu_to_le32(data), addr);
-}
-
-static void iseries_readsb(const volatile void __iomem *addr, void *buf,
- unsigned long count)
-{
- u8 *dst = buf;
- while(count-- > 0)
- *(dst++) = iseries_readb(addr);
-}
-
-static void iseries_readsw(const volatile void __iomem *addr, void *buf,
- unsigned long count)
-{
- u16 *dst = buf;
- while(count-- > 0)
- *(dst++) = iseries_readw_be(addr);
-}
-
-static void iseries_readsl(const volatile void __iomem *addr, void *buf,
- unsigned long count)
-{
- u32 *dst = buf;
- while(count-- > 0)
- *(dst++) = iseries_readl_be(addr);
-}
-
-static void iseries_writesb(volatile void __iomem *addr, const void *buf,
- unsigned long count)
-{
- const u8 *src = buf;
- while(count-- > 0)
- iseries_writeb(*(src++), addr);
-}
-
-static void iseries_writesw(volatile void __iomem *addr, const void *buf,
- unsigned long count)
-{
- const u16 *src = buf;
- while(count-- > 0)
- iseries_writew_be(*(src++), addr);
-}
-
-static void iseries_writesl(volatile void __iomem *addr, const void *buf,
- unsigned long count)
-{
- const u32 *src = buf;
- while(count-- > 0)
- iseries_writel_be(*(src++), addr);
-}
-
-static void iseries_memset_io(volatile void __iomem *addr, int c,
- unsigned long n)
-{
- volatile char __iomem *d = addr;
-
- while (n-- > 0)
- iseries_writeb(c, d++);
-}
-
-static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
- unsigned long n)
-{
- char *d = dest;
- const volatile char __iomem *s = src;
-
- while (n-- > 0)
- *d++ = iseries_readb(s++);
-}
-
-static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
- unsigned long n)
-{
- const char *s = src;
- volatile char __iomem *d = dest;
-
- while (n-- > 0)
- iseries_writeb(*s++, d++);
-}
-
-/* We only set MMIO ops. The default PIO ops will be default
- * to the MMIO ops + pci_io_base which is 0 on iSeries as
- * expected so both should work.
- *
- * Note that we don't implement the readq/writeq versions as
- * I don't know of an HV call for doing so. Thus, the default
- * operation will be used instead, which will fault a the value
- * return by iSeries for MMIO addresses always hits a non mapped
- * area. This is as good as the BUG() we used to have there.
- */
-static struct ppc_pci_io __initdata iseries_pci_io = {
- .readb = iseries_readb,
- .readw = iseries_readw,
- .readl = iseries_readl,
- .readw_be = iseries_readw_be,
- .readl_be = iseries_readl_be,
- .writeb = iseries_writeb,
- .writew = iseries_writew,
- .writel = iseries_writel,
- .writew_be = iseries_writew_be,
- .writel_be = iseries_writel_be,
- .readsb = iseries_readsb,
- .readsw = iseries_readsw,
- .readsl = iseries_readsl,
- .writesb = iseries_writesb,
- .writesw = iseries_writesw,
- .writesl = iseries_writesl,
- .memset_io = iseries_memset_io,
- .memcpy_fromio = iseries_memcpy_fromio,
- .memcpy_toio = iseries_memcpy_toio,
-};
-
-/*
- * iSeries_pcibios_init
- *
- * Description:
- * This function checks for all possible system PCI host bridges that connect
- * PCI buses. The system hypervisor is queried as to the guest partition
- * ownership status. A pci_controller is built for any bus which is partially
- * owned or fully owned by this guest partition.
- */
-void __init iSeries_pcibios_init(void)
-{
- struct pci_controller *phb;
- struct device_node *root = of_find_node_by_path("/");
- struct device_node *node = NULL;
-
- /* Install IO hooks */
- ppc_pci_io = iseries_pci_io;
-
- pci_probe_only = 1;
-
- /* iSeries has no IO space in the common sense, it needs to set
- * the IO base to 0
- */
- pci_io_base = 0;
-
- if (root == NULL) {
- printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
- "of device tree\n");
- return;
- }
- while ((node = of_get_next_child(root, node)) != NULL) {
- HvBusNumber bus;
- const u32 *busp;
-
- if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
- continue;
-
- busp = of_get_property(node, "bus-range", NULL);
- if (busp == NULL)
- continue;
- bus = *busp;
- printk("bus %d appears to exist\n", bus);
- phb = pcibios_alloc_controller(node);
- if (phb == NULL)
- continue;
- /* All legacy iSeries PHBs are in domain zero */
- phb->global_number = 0;
-
- phb->first_busno = bus;
- phb->last_busno = bus;
- phb->ops = &iSeries_pci_ops;
- phb->io_base_virt = (void __iomem *)_IO_BASE;
- phb->io_resource.flags = IORESOURCE_IO;
- phb->io_resource.start = BASE_IO_MEMORY;
- phb->io_resource.end = END_IO_MEMORY;
- phb->io_resource.name = "iSeries PCI IO";
- phb->mem_resources[0].flags = IORESOURCE_MEM;
- phb->mem_resources[0].start = BASE_IO_MEMORY;
- phb->mem_resources[0].end = END_IO_MEMORY;
- phb->mem_resources[0].name = "Series PCI MEM";
- }
-
- of_node_put(root);
-
- pci_devs_phb_init();
-}
-
diff --git a/arch/powerpc/platforms/iseries/pci.h b/arch/powerpc/platforms/iseries/pci.h
deleted file mode 100644
index d9cf974c2718..000000000000
--- a/arch/powerpc/platforms/iseries/pci.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef _PLATFORMS_ISERIES_PCI_H
-#define _PLATFORMS_ISERIES_PCI_H
-
-/*
- * Created by Allan Trautman on Tue Feb 20, 2001.
- *
- * Define some useful macros for the iSeries pci routines.
- * Copyright (C) 2001 Allan H Trautman, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
- *
- * Change Activity:
- * Created Feb 20, 2001
- * Added device reset, March 22, 2001
- * Ported to ppc64, May 25, 2001
- * End Change Activity
- */
-
-/*
- * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
- * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h
- */
-
-#define ISERIES_PCI_AGENTID(idsel, func) \
- (((idsel & 0x0F) << 4) | (func & 0x07))
-#define ISERIES_ENCODE_DEVICE(agentid) \
- ((0x10) | ((agentid & 0x20) >> 2) | (agentid & 0x07))
-
-#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7)
-#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
-
-struct pci_dev;
-
-#ifdef CONFIG_PCI
-extern void iSeries_pcibios_init(void);
-extern void iSeries_pci_final_fixup(void);
-extern void iSeries_pcibios_fixup_resources(struct pci_dev *dev);
-#else
-static inline void iSeries_pcibios_init(void) { }
-static inline void iSeries_pci_final_fixup(void) { }
-static inline void iSeries_pcibios_fixup_resources(struct pci_dev *dev) {}
-#endif
-
-#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
deleted file mode 100644
index 06763682db47..000000000000
--- a/arch/powerpc/platforms/iseries/proc.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/param.h> /* for HZ */
-#include <asm/paca.h>
-#include <asm/processor.h>
-#include <asm/time.h>
-#include <asm/lppaca.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_call_xm.h>
-
-#include "processor_vpd.h"
-#include "main_store.h"
-
-static int __init iseries_proc_create(void)
-{
- struct proc_dir_entry *e;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
-
- e = proc_mkdir("iSeries", 0);
- if (!e)
- return 1;
-
- return 0;
-}
-core_initcall(iseries_proc_create);
-
-static unsigned long startTitan = 0;
-static unsigned long startTb = 0;
-
-static int proc_titantod_show(struct seq_file *m, void *v)
-{
- unsigned long tb0, titan_tod;
-
- tb0 = get_tb();
- titan_tod = HvCallXm_loadTod();
-
- seq_printf(m, "Titan\n" );
- seq_printf(m, " time base = %016lx\n", tb0);
- seq_printf(m, " titan tod = %016lx\n", titan_tod);
- seq_printf(m, " xProcFreq = %016x\n",
- xIoHriProcessorVpd[0].xProcFreq);
- seq_printf(m, " xTimeBaseFreq = %016x\n",
- xIoHriProcessorVpd[0].xTimeBaseFreq);
- seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy);
- seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec);
-
- if (!startTitan) {
- startTitan = titan_tod;
- startTb = tb0;
- } else {
- unsigned long titan_usec = (titan_tod - startTitan) >> 12;
- unsigned long tb_ticks = (tb0 - startTb);
- unsigned long titan_jiffies = titan_usec / (1000000/HZ);
- unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
- unsigned long titan_jiff_rem_usec =
- titan_usec - titan_jiff_usec;
- unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
- unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
- unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
- unsigned long tb_jiff_rem_usec =
- tb_jiff_rem_ticks / tb_ticks_per_usec;
- unsigned long new_tb_ticks_per_jiffy =
- (tb_ticks * (1000000/HZ))/titan_usec;
-
- seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
- seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
- seq_printf(m, " titan jiffies = %lu.%04lu\n", titan_jiffies,
- titan_jiff_rem_usec);
- seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies,
- tb_jiff_rem_usec);
- seq_printf(m, " new tb_ticks_per_jiffy = %lu\n",
- new_tb_ticks_per_jiffy);
- }
-
- return 0;
-}
-
-static int proc_titantod_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_titantod_show, NULL);
-}
-
-static const struct file_operations proc_titantod_operations = {
- .open = proc_titantod_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init iseries_proc_init(void)
-{
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
-
- proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL,
- &proc_titantod_operations);
- return 0;
-}
-__initcall(iseries_proc_init);
diff --git a/arch/powerpc/platforms/iseries/processor_vpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
deleted file mode 100644
index 7ac5d0d0dbfa..000000000000
--- a/arch/powerpc/platforms/iseries/processor_vpd.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ISERIES_PROCESSOR_VPD_H
-#define _ISERIES_PROCESSOR_VPD_H
-
-#include <asm/types.h>
-
-/*
- * This struct maps Processor Vpd that is DMAd to SLIC by CSP
- */
-struct IoHriProcessorVpd {
- u8 xFormat; // VPD format indicator x00-x00
- u8 xProcStatus:8; // Processor State x01-x01
- u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02
- u8 xSrcType:1; // Src Type x03-x03
- u8 xSrcSoft:1; // Src stay soft ...
- u8 xSrcParable:1; // Src parable ...
- u8 xRsvd1:5; // Reserved ...
- u16 xHvPhysicalProcIndex; // Hypervisor physical proc index04-x05
- u16 xRsvd2; // Reserved x06-x07
- u32 xHwNodeId; // Hardware node id x08-x0B
- u32 xHwProcId; // Hardware processor id x0C-x0F
-
- u32 xTypeNum; // Card Type/CCIN number x10-x13
- u32 xModelNum; // Model/Feature number x14-x17
- u64 xSerialNum; // Serial number x18-x1F
- char xPartNum[12]; // Book Part or FPU number x20-x2B
- char xMfgID[4]; // Manufacturing ID x2C-x2F
-
- u32 xProcFreq; // Processor Frequency x30-x33
- u32 xTimeBaseFreq; // Time Base Frequency x34-x37
-
- u32 xChipEcLevel; // Chip EC Levels x38-x3B
- u32 xProcIdReg; // PIR SPR value x3C-x3F
- u32 xPVR; // PVR value x40-x43
- u8 xRsvd3[12]; // Reserved x44-x4F
-
- u32 xInstCacheSize; // Instruction cache size in KB x50-x53
- u32 xInstBlockSize; // Instruction cache block size x54-x57
- u32 xDataCacheOperandSize; // Data cache operand size x58-x5B
- u32 xInstCacheOperandSize; // Inst cache operand size x5C-x5F
-
- u32 xDataL1CacheSizeKB; // L1 data cache size in KB x60-x63
- u32 xDataL1CacheLineSize; // L1 data cache block size x64-x67
- u64 xRsvd4; // Reserved x68-x6F
-
- u32 xDataL2CacheSizeKB; // L2 data cache size in KB x70-x73
- u32 xDataL2CacheLineSize; // L2 data cache block size x74-x77
- u64 xRsvd5; // Reserved x78-x7F
-
- u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83
- u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87
- u64 xRsvd6; // Reserved x88-x8F
-
- u64 xFruLabel; // Card Location Label x90-x97
- u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98
- u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99
- u16 xSlotMapIndex; // Index in slot map table x9A-x9B
- u8 xSmartCardPortNo; // Smart card port number x9C-x9C
- u8 xRsvd7; // Reserved x9D-x9D
- u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F
-
- u8 xRsvd8[24]; // Reserved xA0-xB7
-
- char xProcSrc[72]; // CSP format SRC xB8-xFF
-};
-
-extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
-
-#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/arch/powerpc/platforms/iseries/release_data.h b/arch/powerpc/platforms/iseries/release_data.h
deleted file mode 100644
index 6ad7d843e8fc..000000000000
--- a/arch/powerpc/platforms/iseries/release_data.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ISERIES_RELEASE_DATA_H
-#define _ISERIES_RELEASE_DATA_H
-
-/*
- * This control block contains the critical information about the
- * release so that it can be changed in the future (ie, the virtual
- * address of the OS's NACA).
- */
-#include <asm/types.h>
-#include "naca.h"
-
-/*
- * When we IPL a secondary partition, we will check if if the
- * secondary xMinPlicVrmIndex > the primary xVrmIndex.
- * If it is then this tells PLIC that this secondary is not
- * supported running on this "old" of a level of PLIC.
- *
- * Likewise, we will compare the primary xMinSlicVrmIndex to
- * the secondary xVrmIndex.
- * If the primary xMinSlicVrmDelta > secondary xVrmDelta then we
- * know that this PLIC does not support running an OS "that old".
- */
-
-#define HVREL_TAGSINACTIVE 0x8000
-#define HVREL_32BIT 0x4000
-#define HVREL_NOSHAREDPROCS 0x2000
-#define HVREL_NOHMT 0x1000
-
-struct HvReleaseData {
- u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */
- u16 xSize; /* Size of this control block x04-x05 */
- u16 xVpdAreasPtrOffset; /* Offset in NACA of ItVpdAreas x06-x07 */
- struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */
- u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */
- u32 xRsvd1; /* Reserved x14-x17 */
- u16 xFlags;
- u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */
- u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */
- u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */
- char xVrmName[12]; /* Displayable name x20-x2B */
- char xRsvd3[20]; /* Reserved x2C-x3F */
-};
-
-extern const struct HvReleaseData hvReleaseData;
-
-#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
deleted file mode 100644
index 8fc62586a973..000000000000
--- a/arch/powerpc/platforms/iseries/setup.c
+++ /dev/null
@@ -1,722 +0,0 @@
-/*
- * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
- * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Description:
- * Architecture- / platform-specific boot-time initialization code for
- * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
- * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
- * <dan@net4x.com>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG
-
-#include <linux/init.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/export.h>
-#include <linux/seq_file.h>
-#include <linux/kdev_t.h>
-#include <linux/kexec.h>
-#include <linux/major.h>
-#include <linux/root_dev.h>
-#include <linux/kernel.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-
-#include <asm/processor.h>
-#include <asm/machdep.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/cputable.h>
-#include <asm/sections.h>
-#include <asm/iommu.h>
-#include <asm/firmware.h>
-#include <asm/system.h>
-#include <asm/time.h>
-#include <asm/paca.h>
-#include <asm/cache.h>
-#include <asm/abs_addr.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/hv_call_event.h>
-#include <asm/iseries/hv_call_xm.h>
-#include <asm/iseries/it_lp_queue.h>
-#include <asm/iseries/mf.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/lpar_map.h>
-#include <asm/udbg.h>
-#include <asm/irq.h>
-
-#include "naca.h"
-#include "setup.h"
-#include "irq.h"
-#include "vpd_areas.h"
-#include "processor_vpd.h"
-#include "it_lp_naca.h"
-#include "main_store.h"
-#include "call_sm.h"
-#include "call_hpt.h"
-#include "pci.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/* Function Prototypes */
-static unsigned long build_iSeries_Memory_Map(void);
-static void iseries_shared_idle(void);
-static void iseries_dedicated_idle(void);
-
-
-struct MemoryBlock {
- unsigned long absStart;
- unsigned long absEnd;
- unsigned long logicalStart;
- unsigned long logicalEnd;
-};
-
-/*
- * Process the main store vpd to determine where the holes in memory are
- * and return the number of physical blocks and fill in the array of
- * block data.
- */
-static unsigned long iSeries_process_Condor_mainstore_vpd(
- struct MemoryBlock *mb_array, unsigned long max_entries)
-{
- unsigned long holeFirstChunk, holeSizeChunks;
- unsigned long numMemoryBlocks = 1;
- struct IoHriMainStoreSegment4 *msVpd =
- (struct IoHriMainStoreSegment4 *)xMsVpd;
- unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
- unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
- unsigned long holeSize = holeEnd - holeStart;
-
- printk("Mainstore_VPD: Condor\n");
- /*
- * Determine if absolute memory has any
- * holes so that we can interpret the
- * access map we get back from the hypervisor
- * correctly.
- */
- mb_array[0].logicalStart = 0;
- mb_array[0].logicalEnd = 0x100000000UL;
- mb_array[0].absStart = 0;
- mb_array[0].absEnd = 0x100000000UL;
-
- if (holeSize) {
- numMemoryBlocks = 2;
- holeStart = holeStart & 0x000fffffffffffffUL;
- holeStart = addr_to_chunk(holeStart);
- holeFirstChunk = holeStart;
- holeSize = addr_to_chunk(holeSize);
- holeSizeChunks = holeSize;
- printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
- holeFirstChunk, holeSizeChunks );
- mb_array[0].logicalEnd = holeFirstChunk;
- mb_array[0].absEnd = holeFirstChunk;
- mb_array[1].logicalStart = holeFirstChunk;
- mb_array[1].logicalEnd = 0x100000000UL - holeSizeChunks;
- mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
- mb_array[1].absEnd = 0x100000000UL;
- }
- return numMemoryBlocks;
-}
-
-#define MaxSegmentAreas 32
-#define MaxSegmentAdrRangeBlocks 128
-#define MaxAreaRangeBlocks 4
-
-static unsigned long iSeries_process_Regatta_mainstore_vpd(
- struct MemoryBlock *mb_array, unsigned long max_entries)
-{
- struct IoHriMainStoreSegment5 *msVpdP =
- (struct IoHriMainStoreSegment5 *)xMsVpd;
- unsigned long numSegmentBlocks = 0;
- u32 existsBits = msVpdP->msAreaExists;
- unsigned long area_num;
-
- printk("Mainstore_VPD: Regatta\n");
-
- for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
- unsigned long numAreaBlocks;
- struct IoHriMainStoreArea4 *currentArea;
-
- if (existsBits & 0x80000000) {
- unsigned long block_num;
-
- currentArea = &msVpdP->msAreaArray[area_num];
- numAreaBlocks = currentArea->numAdrRangeBlocks;
- printk("ms_vpd: processing area %2ld blocks=%ld",
- area_num, numAreaBlocks);
- for (block_num = 0; block_num < numAreaBlocks;
- ++block_num ) {
- /* Process an address range block */
- struct MemoryBlock tempBlock;
- unsigned long i;
-
- tempBlock.absStart =
- (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
- tempBlock.absEnd =
- (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
- tempBlock.logicalStart = 0;
- tempBlock.logicalEnd = 0;
- printk("\n block %ld absStart=%016lx absEnd=%016lx",
- block_num, tempBlock.absStart,
- tempBlock.absEnd);
-
- for (i = 0; i < numSegmentBlocks; ++i) {
- if (mb_array[i].absStart ==
- tempBlock.absStart)
- break;
- }
- if (i == numSegmentBlocks) {
- if (numSegmentBlocks == max_entries)
- panic("iSeries_process_mainstore_vpd: too many memory blocks");
- mb_array[numSegmentBlocks] = tempBlock;
- ++numSegmentBlocks;
- } else
- printk(" (duplicate)");
- }
- printk("\n");
- }
- existsBits <<= 1;
- }
- /* Now sort the blocks found into ascending sequence */
- if (numSegmentBlocks > 1) {
- unsigned long m, n;
-
- for (m = 0; m < numSegmentBlocks - 1; ++m) {
- for (n = numSegmentBlocks - 1; m < n; --n) {
- if (mb_array[n].absStart <
- mb_array[n-1].absStart) {
- struct MemoryBlock tempBlock;
-
- tempBlock = mb_array[n];
- mb_array[n] = mb_array[n-1];
- mb_array[n-1] = tempBlock;
- }
- }
- }
- }
- /*
- * Assign "logical" addresses to each block. These
- * addresses correspond to the hypervisor "bitmap" space.
- * Convert all addresses into units of 256K chunks.
- */
- {
- unsigned long i, nextBitmapAddress;
-
- printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
- nextBitmapAddress = 0;
- for (i = 0; i < numSegmentBlocks; ++i) {
- unsigned long length = mb_array[i].absEnd -
- mb_array[i].absStart;
-
- mb_array[i].logicalStart = nextBitmapAddress;
- mb_array[i].logicalEnd = nextBitmapAddress + length;
- nextBitmapAddress += length;
- printk(" Bitmap range: %016lx - %016lx\n"
- " Absolute range: %016lx - %016lx\n",
- mb_array[i].logicalStart,
- mb_array[i].logicalEnd,
- mb_array[i].absStart, mb_array[i].absEnd);
- mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
- 0x000fffffffffffffUL);
- mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
- 0x000fffffffffffffUL);
- mb_array[i].logicalStart =
- addr_to_chunk(mb_array[i].logicalStart);
- mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
- }
- }
-
- return numSegmentBlocks;
-}
-
-static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
- unsigned long max_entries)
-{
- unsigned long i;
- unsigned long mem_blocks = 0;
-
- if (mmu_has_feature(MMU_FTR_SLB))
- mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
- max_entries);
- else
- mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
- max_entries);
-
- printk("Mainstore_VPD: numMemoryBlocks = %ld\n", mem_blocks);
- for (i = 0; i < mem_blocks; ++i) {
- printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
- " abs chunks %016lx - %016lx\n",
- i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
- mb_array[i].absStart, mb_array[i].absEnd);
- }
- return mem_blocks;
-}
-
-static void __init iSeries_get_cmdline(void)
-{
- char *p, *q;
-
- /* copy the command line parameter from the primary VSP */
- HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
- HvLpDma_Direction_RemoteToLocal);
-
- p = cmd_line;
- q = cmd_line + 255;
- while(p < q) {
- if (!*p || *p == '\n')
- break;
- ++p;
- }
- *p = 0;
-}
-
-static void __init iSeries_init_early(void)
-{
- DBG(" -> iSeries_init_early()\n");
-
- /* Snapshot the timebase, for use in later recalibration */
- iSeries_time_init_early();
-
- /*
- * Initialize the DMA/TCE management
- */
- iommu_init_early_iSeries();
-
- /* Initialize machine-dependency vectors */
-#ifdef CONFIG_SMP
- smp_init_iSeries();
-#endif
-
- /* Associate Lp Event Queue 0 with processor 0 */
- HvCallEvent_setLpEventQueueInterruptProc(0, 0);
-
- mf_init();
-
- DBG(" <- iSeries_init_early()\n");
-}
-
-struct mschunks_map mschunks_map = {
- /* XXX We don't use these, but Piranha might need them. */
- .chunk_size = MSCHUNKS_CHUNK_SIZE,
- .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
- .chunk_mask = MSCHUNKS_OFFSET_MASK,
-};
-EXPORT_SYMBOL(mschunks_map);
-
-static void mschunks_alloc(unsigned long num_chunks)
-{
- klimit = _ALIGN(klimit, sizeof(u32));
- mschunks_map.mapping = (u32 *)klimit;
- klimit += num_chunks * sizeof(u32);
- mschunks_map.num_chunks = num_chunks;
-}
-
-/*
- * The iSeries may have very large memories ( > 128 GB ) and a partition
- * may get memory in "chunks" that may be anywhere in the 2**52 real
- * address space. The chunks are 256K in size. To map this to the
- * memory model Linux expects, the AS/400 specific code builds a
- * translation table to translate what Linux thinks are "physical"
- * addresses to the actual real addresses. This allows us to make
- * it appear to Linux that we have contiguous memory starting at
- * physical address zero while in fact this could be far from the truth.
- * To avoid confusion, I'll let the words physical and/or real address
- * apply to the Linux addresses while I'll use "absolute address" to
- * refer to the actual hardware real address.
- *
- * build_iSeries_Memory_Map gets information from the Hypervisor and
- * looks at the Main Store VPD to determine the absolute addresses
- * of the memory that has been assigned to our partition and builds
- * a table used to translate Linux's physical addresses to these
- * absolute addresses. Absolute addresses are needed when
- * communicating with the hypervisor (e.g. to build HPT entries)
- *
- * Returns the physical memory size
- */
-
-static unsigned long __init build_iSeries_Memory_Map(void)
-{
- u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
- u32 nextPhysChunk;
- u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
- u32 totalChunks,moreChunks;
- u32 currChunk, thisChunk, absChunk;
- u32 currDword;
- u32 chunkBit;
- u64 map;
- struct MemoryBlock mb[32];
- unsigned long numMemoryBlocks, curBlock;
-
- /* Chunk size on iSeries is 256K bytes */
- totalChunks = (u32)HvLpConfig_getMsChunks();
- mschunks_alloc(totalChunks);
-
- /*
- * Get absolute address of our load area
- * and map it to physical address 0
- * This guarantees that the loadarea ends up at physical 0
- * otherwise, it might not be returned by PLIC as the first
- * chunks
- */
-
- loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
- loadAreaSize = itLpNaca.xLoadAreaChunks;
-
- /*
- * Only add the pages already mapped here.
- * Otherwise we might add the hpt pages
- * The rest of the pages of the load area
- * aren't in the HPT yet and can still
- * be assigned an arbitrary physical address
- */
- if ((loadAreaSize * 64) > HvPagesToMap)
- loadAreaSize = HvPagesToMap / 64;
-
- loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
-
- /*
- * TODO Do we need to do something if the HPT is in the 64MB load area?
- * This would be required if the itLpNaca.xLoadAreaChunks includes
- * the HPT size
- */
-
- printk("Mapping load area - physical addr = 0000000000000000\n"
- " absolute addr = %016lx\n",
- chunk_to_addr(loadAreaFirstChunk));
- printk("Load area size %dK\n", loadAreaSize * 256);
-
- for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
- mschunks_map.mapping[nextPhysChunk] =
- loadAreaFirstChunk + nextPhysChunk;
-
- /*
- * Get absolute address of our HPT and remember it so
- * we won't map it to any physical address
- */
- hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
- hptSizePages = (u32)HvCallHpt_getHptPages();
- hptSizeChunks = hptSizePages >>
- (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
- hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
-
- printk("HPT absolute addr = %016lx, size = %dK\n",
- chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
-
- /*
- * Determine if absolute memory has any
- * holes so that we can interpret the
- * access map we get back from the hypervisor
- * correctly.
- */
- numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
-
- /*
- * Process the main store access map from the hypervisor
- * to build up our physical -> absolute translation table
- */
- curBlock = 0;
- currChunk = 0;
- currDword = 0;
- moreChunks = totalChunks;
-
- while (moreChunks) {
- map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
- currDword);
- thisChunk = currChunk;
- while (map) {
- chunkBit = map >> 63;
- map <<= 1;
- if (chunkBit) {
- --moreChunks;
- while (thisChunk >= mb[curBlock].logicalEnd) {
- ++curBlock;
- if (curBlock >= numMemoryBlocks)
- panic("out of memory blocks");
- }
- if (thisChunk < mb[curBlock].logicalStart)
- panic("memory block error");
-
- absChunk = mb[curBlock].absStart +
- (thisChunk - mb[curBlock].logicalStart);
- if (((absChunk < hptFirstChunk) ||
- (absChunk > hptLastChunk)) &&
- ((absChunk < loadAreaFirstChunk) ||
- (absChunk > loadAreaLastChunk))) {
- mschunks_map.mapping[nextPhysChunk] =
- absChunk;
- ++nextPhysChunk;
- }
- }
- ++thisChunk;
- }
- ++currDword;
- currChunk += 64;
- }
-
- /*
- * main store size (in chunks) is
- * totalChunks - hptSizeChunks
- * which should be equal to
- * nextPhysChunk
- */
- return chunk_to_addr(nextPhysChunk);
-}
-
-/*
- * Document me.
- */
-static void __init iSeries_setup_arch(void)
-{
- if (get_lppaca()->shared_proc) {
- ppc_md.idle_loop = iseries_shared_idle;
- printk(KERN_DEBUG "Using shared processor idle loop\n");
- } else {
- ppc_md.idle_loop = iseries_dedicated_idle;
- printk(KERN_DEBUG "Using dedicated idle loop\n");
- }
-
- /* Setup the Lp Event Queue */
- setup_hvlpevent_queue();
-
- printk("Max logical processors = %d\n",
- itVpdAreas.xSlicMaxLogicalProcs);
- printk("Max physical processors = %d\n",
- itVpdAreas.xSlicMaxPhysicalProcs);
-
- iSeries_pcibios_init();
-}
-
-static void iSeries_show_cpuinfo(struct seq_file *m)
-{
- seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
-}
-
-static void __init iSeries_progress(char * st, unsigned short code)
-{
- printk("Progress: [%04x] - %s\n", (unsigned)code, st);
- mf_display_progress(code);
-}
-
-static void __init iSeries_fixup_klimit(void)
-{
- /*
- * Change klimit to take into account any ram disk
- * that may be included
- */
- if (naca.xRamDisk)
- klimit = KERNELBASE + (u64)naca.xRamDisk +
- (naca.xRamDiskSize * HW_PAGE_SIZE);
-}
-
-static int __init iSeries_src_init(void)
-{
- /* clear the progress line */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- ppc_md.progress(" ", 0xffff);
- return 0;
-}
-
-late_initcall(iSeries_src_init);
-
-static inline void process_iSeries_events(void)
-{
- asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
-}
-
-static void yield_shared_processor(void)
-{
- unsigned long tb;
-
- HvCall_setEnabledInterrupts(HvCall_MaskIPI |
- HvCall_MaskLpEvent |
- HvCall_MaskLpProd |
- HvCall_MaskTimeout);
-
- tb = get_tb();
- /* Compute future tb value when yield should expire */
- HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
-
- /*
- * The decrementer stops during the yield. Force a fake decrementer
- * here and let the timer_interrupt code sort out the actual time.
- */
- get_lppaca()->int_dword.fields.decr_int = 1;
- ppc64_runlatch_on();
- process_iSeries_events();
-}
-
-static void iseries_shared_idle(void)
-{
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- while (!need_resched() && !hvlpevent_is_pending()) {
- local_irq_disable();
- ppc64_runlatch_off();
-
- /* Recheck with irqs off */
- if (!need_resched() && !hvlpevent_is_pending())
- yield_shared_processor();
-
- HMT_medium();
- local_irq_enable();
- }
-
- ppc64_runlatch_on();
- rcu_idle_exit();
- tick_nohz_idle_exit();
-
- if (hvlpevent_is_pending())
- process_iSeries_events();
-
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
-
-static void iseries_dedicated_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
- if (!need_resched()) {
- while (!need_resched()) {
- ppc64_runlatch_off();
- HMT_low();
-
- if (hvlpevent_is_pending()) {
- HMT_medium();
- ppc64_runlatch_on();
- process_iSeries_events();
- }
- }
-
- HMT_medium();
- }
-
- ppc64_runlatch_on();
- rcu_idle_exit();
- tick_nohz_idle_exit();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-}
-
-static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
- unsigned long flags, void *caller)
-{
- return (void __iomem *)address;
-}
-
-static void iseries_iounmap(volatile void __iomem *token)
-{
-}
-
-static int __init iseries_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
- return 0;
-
- hpte_init_iSeries();
- /* iSeries does not support 16M pages */
- cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
-
- return 1;
-}
-
-#ifdef CONFIG_KEXEC
-static int iseries_kexec_prepare(struct kimage *image)
-{
- return -ENOSYS;
-}
-#endif
-
-define_machine(iseries) {
- .name = "iSeries",
- .setup_arch = iSeries_setup_arch,
- .show_cpuinfo = iSeries_show_cpuinfo,
- .init_IRQ = iSeries_init_IRQ,
- .get_irq = iSeries_get_irq,
- .init_early = iSeries_init_early,
- .pcibios_fixup = iSeries_pci_final_fixup,
- .pcibios_fixup_resources= iSeries_pcibios_fixup_resources,
- .restart = mf_reboot,
- .power_off = mf_power_off,
- .halt = mf_power_off,
- .get_boot_time = iSeries_get_boot_time,
- .set_rtc_time = iSeries_set_rtc_time,
- .get_rtc_time = iSeries_get_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = iSeries_progress,
- .probe = iseries_probe,
- .ioremap = iseries_ioremap,
- .iounmap = iseries_iounmap,
-#ifdef CONFIG_KEXEC
- .machine_kexec_prepare = iseries_kexec_prepare,
-#endif
- /* XXX Implement enable_pmcs for iSeries */
-};
-
-void * __init iSeries_early_setup(void)
-{
- unsigned long phys_mem_size;
-
- /* Identify CPU type. This is done again by the common code later
- * on but calling this function multiple times is fine.
- */
- identify_cpu(0, mfspr(SPRN_PVR));
- initialise_paca(&boot_paca, 0);
-
- powerpc_firmware_features |= FW_FEATURE_ISERIES;
- powerpc_firmware_features |= FW_FEATURE_LPAR;
-
-#ifdef CONFIG_SMP
- /* On iSeries we know we can never have more than 64 cpus */
- nr_cpu_ids = max(nr_cpu_ids, 64);
-#endif
-
- iSeries_fixup_klimit();
-
- /*
- * Initialize the table which translate Linux physical addresses to
- * AS/400 absolute addresses
- */
- phys_mem_size = build_iSeries_Memory_Map();
-
- iSeries_get_cmdline();
-
- return (void *) __pa(build_flat_dt(phys_mem_size));
-}
-
-static void hvputc(char c)
-{
- if (c == '\n')
- hvputc('\r');
-
- HvCall_writeLogBuffer(&c, 1);
-}
-
-void __init udbg_init_iseries(void)
-{
- udbg_putc = hvputc;
-}
diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h
deleted file mode 100644
index 729754bbb018..000000000000
--- a/arch/powerpc/platforms/iseries/setup.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
- * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Description:
- * Architecture- / platform-specific boot-time initialization code for
- * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
- * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
- * <dan@netx4.com>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef __ISERIES_SETUP_H__
-#define __ISERIES_SETUP_H__
-
-extern void *iSeries_early_setup(void);
-extern unsigned long iSeries_get_boot_time(void);
-extern int iSeries_set_rtc_time(struct rtc_time *tm);
-extern void iSeries_get_rtc_time(struct rtc_time *tm);
-
-extern void *build_flat_dt(unsigned long phys_mem_size);
-
-#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
deleted file mode 100644
index 02df49fb59f0..000000000000
--- a/arch/powerpc/platforms/iseries/smp.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * SMP support for iSeries machines.
- *
- * Dave Engebretsen, Peter Bergner, and
- * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
- *
- * Plus various changes from other IBM teams...
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/cache.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/cpu.h>
-
-#include <asm/ptrace.h>
-#include <linux/atomic.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/paca.h>
-#include <asm/iseries/hv_call.h>
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/system.h>
-
-static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
-{
- HvCall_sendIPI(&(paca[cpu]));
-}
-
-static int smp_iSeries_probe(void)
-{
- return cpumask_weight(cpu_possible_mask);
-}
-
-static int smp_iSeries_kick_cpu(int nr)
-{
- BUG_ON((nr < 0) || (nr >= NR_CPUS));
-
- /* Verify that our partition has a processor nr */
- if (lppaca_of(nr).dyn_proc_status >= 2)
- return -ENOENT;
-
- /* The processor is currently spinning, waiting
- * for the cpu_start field to become non-zero
- * After we set cpu_start, the processor will
- * continue on to secondary_start in iSeries_head.S
- */
- paca[nr].cpu_start = 1;
-
- return 0;
-}
-
-static void __devinit smp_iSeries_setup_cpu(int nr)
-{
-}
-
-static struct smp_ops_t iSeries_smp_ops = {
- .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
- .cause_ipi = smp_iSeries_cause_ipi,
- .probe = smp_iSeries_probe,
- .kick_cpu = smp_iSeries_kick_cpu,
- .setup_cpu = smp_iSeries_setup_cpu,
-};
-
-/* This is called very early. */
-void __init smp_init_iSeries(void)
-{
- smp_ops = &iSeries_smp_ops;
-}
diff --git a/arch/powerpc/platforms/iseries/spcomm_area.h b/arch/powerpc/platforms/iseries/spcomm_area.h
deleted file mode 100644
index 598b7c14573a..000000000000
--- a/arch/powerpc/platforms/iseries/spcomm_area.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _ISERIES_SPCOMM_AREA_H
-#define _ISERIES_SPCOMM_AREA_H
-
-
-struct SpCommArea {
- u32 xDesc; // Descriptor (only in new formats) 000-003
- u8 xFormat; // Format (only in new formats) 004-004
- u8 xRsvd1[11]; // Reserved 005-00F
- u64 xRawTbAtIplStart; // Raw HW TB value when IPL is started 010-017
- u64 xRawTodAtIplStart; // Raw HW TOD value when IPL is started 018-01F
- u64 xBcdTimeAtIplStart; // BCD time when IPL is started 020-027
- u64 xBcdTimeAtOsStart; // BCD time when OS passed control 028-02F
- u8 xRsvd2[80]; // Reserved 030-07F
-};
-
-#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
deleted file mode 100644
index 04be62d368a6..000000000000
--- a/arch/powerpc/platforms/iseries/vio.c
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Legacy iSeries specific vio initialisation
- * that needs to be built in (not a module).
- *
- * © Copyright 2007 IBM Corporation
- * Author: Stephen Rothwell
- * Some parts collected from various other files
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/of.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/completion.h>
-#include <linux/proc_fs.h>
-#include <linux/export.h>
-
-#include <asm/firmware.h>
-#include <asm/vio.h>
-#include <asm/iseries/vio.h>
-#include <asm/iseries/iommu.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-
-#define FIRST_VTY 0
-#define NUM_VTYS 1
-#define FIRST_VSCSI (FIRST_VTY + NUM_VTYS)
-#define NUM_VSCSIS 1
-#define FIRST_VLAN (FIRST_VSCSI + NUM_VSCSIS)
-#define NUM_VLANS HVMAXARCHITECTEDVIRTUALLANS
-#define FIRST_VIODASD (FIRST_VLAN + NUM_VLANS)
-#define NUM_VIODASDS HVMAXARCHITECTEDVIRTUALDISKS
-#define FIRST_VIOCD (FIRST_VIODASD + NUM_VIODASDS)
-#define NUM_VIOCDS HVMAXARCHITECTEDVIRTUALCDROMS
-#define FIRST_VIOTAPE (FIRST_VIOCD + NUM_VIOCDS)
-#define NUM_VIOTAPES HVMAXARCHITECTEDVIRTUALTAPES
-
-struct vio_waitevent {
- struct completion com;
- int rc;
- u16 sub_result;
-};
-
-struct vio_resource {
- char rsrcname[10];
- char type[4];
- char model[3];
-};
-
-static struct property *new_property(const char *name, int length,
- const void *value)
-{
- struct property *np = kzalloc(sizeof(*np) + strlen(name) + 1 + length,
- GFP_KERNEL);
-
- if (!np)
- return NULL;
- np->name = (char *)(np + 1);
- np->value = np->name + strlen(name) + 1;
- strcpy(np->name, name);
- memcpy(np->value, value, length);
- np->length = length;
- return np;
-}
-
-static void free_property(struct property *np)
-{
- kfree(np);
-}
-
-static struct device_node *new_node(const char *path,
- struct device_node *parent)
-{
- struct device_node *np = kzalloc(sizeof(*np), GFP_KERNEL);
-
- if (!np)
- return NULL;
- np->full_name = kstrdup(path, GFP_KERNEL);
- if (!np->full_name) {
- kfree(np);
- return NULL;
- }
- of_node_set_flag(np, OF_DYNAMIC);
- kref_init(&np->kref);
- np->parent = of_node_get(parent);
- return np;
-}
-
-static void free_node(struct device_node *np)
-{
- struct property *next;
- struct property *prop;
-
- next = np->properties;
- while (next) {
- prop = next;
- next = prop->next;
- free_property(prop);
- }
- of_node_put(np->parent);
- kfree(np->full_name);
- kfree(np);
-}
-
-static int add_string_property(struct device_node *np, const char *name,
- const char *value)
-{
- struct property *nprop = new_property(name, strlen(value) + 1, value);
-
- if (!nprop)
- return 0;
- prom_add_property(np, nprop);
- return 1;
-}
-
-static int add_raw_property(struct device_node *np, const char *name,
- int length, const void *value)
-{
- struct property *nprop = new_property(name, length, value);
-
- if (!nprop)
- return 0;
- prom_add_property(np, nprop);
- return 1;
-}
-
-static struct device_node *do_device_node(struct device_node *parent,
- const char *name, u32 reg, u32 unit, const char *type,
- const char *compat, struct vio_resource *res)
-{
- struct device_node *np;
- char path[32];
-
- snprintf(path, sizeof(path), "/vdevice/%s@%08x", name, reg);
- np = new_node(path, parent);
- if (!np)
- return NULL;
- if (!add_string_property(np, "name", name) ||
- !add_string_property(np, "device_type", type) ||
- !add_string_property(np, "compatible", compat) ||
- !add_raw_property(np, "reg", sizeof(reg), &reg) ||
- !add_raw_property(np, "linux,unit_address",
- sizeof(unit), &unit)) {
- goto node_free;
- }
- if (res) {
- if (!add_raw_property(np, "linux,vio_rsrcname",
- sizeof(res->rsrcname), res->rsrcname) ||
- !add_raw_property(np, "linux,vio_type",
- sizeof(res->type), res->type) ||
- !add_raw_property(np, "linux,vio_model",
- sizeof(res->model), res->model))
- goto node_free;
- }
- np->name = of_get_property(np, "name", NULL);
- np->type = of_get_property(np, "device_type", NULL);
- of_attach_node(np);
-#ifdef CONFIG_PROC_DEVICETREE
- if (parent->pde) {
- struct proc_dir_entry *ent;
-
- ent = proc_mkdir(strrchr(np->full_name, '/') + 1, parent->pde);
- if (ent)
- proc_device_tree_add_node(np, ent);
- }
-#endif
- return np;
-
- node_free:
- free_node(np);
- return NULL;
-}
-
-/*
- * This is here so that we can dynamically add viodasd
- * devices without exposing all the above infrastructure.
- */
-struct vio_dev *vio_create_viodasd(u32 unit)
-{
- struct device_node *vio_root;
- struct device_node *np;
- struct vio_dev *vdev = NULL;
-
- vio_root = of_find_node_by_path("/vdevice");
- if (!vio_root)
- return NULL;
- np = do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
- "block", "IBM,iSeries-viodasd", NULL);
- of_node_put(vio_root);
- if (np) {
- vdev = vio_register_device_node(np);
- if (!vdev)
- free_node(np);
- }
- return vdev;
-}
-EXPORT_SYMBOL_GPL(vio_create_viodasd);
-
-static void __init handle_block_event(struct HvLpEvent *event)
-{
- struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
- struct vio_waitevent *pwe;
-
- if (event == NULL)
- /* Notification that a partition went away! */
- return;
- /* First, we should NEVER get an int here...only acks */
- if (hvlpevent_is_int(event)) {
- printk(KERN_WARNING "handle_viod_request: "
- "Yikes! got an int in viodasd event handler!\n");
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- return;
- }
-
- switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
- case vioblockopen:
- /*
- * Handle a response to an open request. We get all the
- * disk information in the response, so update it. The
- * correlation token contains a pointer to a waitevent
- * structure that has a completion in it. update the
- * return code in the waitevent structure and post the
- * completion to wake up the guy who sent the request
- */
- pwe = (struct vio_waitevent *)event->xCorrelationToken;
- pwe->rc = event->xRc;
- pwe->sub_result = bevent->sub_result;
- complete(&pwe->com);
- break;
- case vioblockclose:
- break;
- default:
- printk(KERN_WARNING "handle_viod_request: unexpected subtype!");
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-}
-
-static void __init probe_disk(struct device_node *vio_root, u32 unit)
-{
- HvLpEvent_Rc hvrc;
- struct vio_waitevent we;
- u16 flags = 0;
-
-retry:
- init_completion(&we.com);
-
- /* Send the open event to OS/400 */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_blockio | vioblockopen,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)&we, VIOVERSION << 16,
- ((u64)unit << 48) | ((u64)flags<< 32),
- 0, 0, 0);
- if (hvrc != 0) {
- printk(KERN_WARNING "probe_disk: bad rc on HV open %d\n",
- (int)hvrc);
- return;
- }
-
- wait_for_completion(&we.com);
-
- if (we.rc != 0) {
- if (flags != 0)
- return;
- /* try again with read only flag set */
- flags = vioblockflags_ro;
- goto retry;
- }
-
- /* Send the close event to OS/400. We DON'T expect a response */
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_blockio | vioblockclose,
- HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- 0, VIOVERSION << 16,
- ((u64)unit << 48) | ((u64)flags << 32),
- 0, 0, 0);
- if (hvrc != 0) {
- printk(KERN_WARNING "probe_disk: "
- "bad rc sending event to OS/400 %d\n", (int)hvrc);
- return;
- }
-
- do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
- "block", "IBM,iSeries-viodasd", NULL);
-}
-
-static void __init get_viodasd_info(struct device_node *vio_root)
-{
- int rc;
- u32 unit;
-
- rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 2);
- if (rc) {
- printk(KERN_WARNING "get_viodasd_info: "
- "error opening path to host partition %d\n",
- viopath_hostLp);
- return;
- }
-
- /* Initialize our request handler */
- vio_setHandler(viomajorsubtype_blockio, handle_block_event);
-
- for (unit = 0; unit < HVMAXARCHITECTEDVIRTUALDISKS; unit++)
- probe_disk(vio_root, unit);
-
- vio_clearHandler(viomajorsubtype_blockio);
- viopath_close(viopath_hostLp, viomajorsubtype_blockio, 2);
-}
-
-static void __init handle_cd_event(struct HvLpEvent *event)
-{
- struct viocdlpevent *bevent;
- struct vio_waitevent *pwe;
-
- if (!event)
- /* Notification that a partition went away! */
- return;
-
- /* First, we should NEVER get an int here...only acks */
- if (hvlpevent_is_int(event)) {
- printk(KERN_WARNING "handle_cd_event: got an unexpected int\n");
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- return;
- }
-
- bevent = (struct viocdlpevent *)event;
-
- switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
- case viocdgetinfo:
- pwe = (struct vio_waitevent *)event->xCorrelationToken;
- pwe->rc = event->xRc;
- pwe->sub_result = bevent->sub_result;
- complete(&pwe->com);
- break;
-
- default:
- printk(KERN_WARNING "handle_cd_event: "
- "message with unexpected subtype %0x04X!\n",
- event->xSubtype & VIOMINOR_SUBTYPE_MASK);
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- }
-}
-
-static void __init get_viocd_info(struct device_node *vio_root)
-{
- HvLpEvent_Rc hvrc;
- u32 unit;
- struct vio_waitevent we;
- struct vio_resource *unitinfo;
- dma_addr_t unitinfo_dmaaddr;
- int ret;
-
- ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 2);
- if (ret) {
- printk(KERN_WARNING
- "get_viocd_info: error opening path to host partition %d\n",
- viopath_hostLp);
- return;
- }
-
- /* Initialize our request handler */
- vio_setHandler(viomajorsubtype_cdio, handle_cd_event);
-
- unitinfo = iseries_hv_alloc(
- sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
- &unitinfo_dmaaddr, GFP_ATOMIC);
- if (!unitinfo) {
- printk(KERN_WARNING
- "get_viocd_info: error allocating unitinfo\n");
- goto clear_handler;
- }
-
- memset(unitinfo, 0, sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS);
-
- init_completion(&we.com);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_cdio | viocdgetinfo,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)&we, VIOVERSION << 16, unitinfo_dmaaddr, 0,
- sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(KERN_WARNING
- "get_viocd_info: cdrom error sending event. rc %d\n",
- (int)hvrc);
- goto hv_free;
- }
-
- wait_for_completion(&we.com);
-
- if (we.rc) {
- printk(KERN_WARNING "get_viocd_info: bad rc %d:0x%04X\n",
- we.rc, we.sub_result);
- goto hv_free;
- }
-
- for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALCDROMS) &&
- unitinfo[unit].rsrcname[0]; unit++) {
- if (!do_device_node(vio_root, "viocd", FIRST_VIOCD + unit, unit,
- "block", "IBM,iSeries-viocd", &unitinfo[unit]))
- break;
- }
-
- hv_free:
- iseries_hv_free(sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
- unitinfo, unitinfo_dmaaddr);
- clear_handler:
- vio_clearHandler(viomajorsubtype_cdio);
- viopath_close(viopath_hostLp, viomajorsubtype_cdio, 2);
-}
-
-/* Handle interrupt events for tape */
-static void __init handle_tape_event(struct HvLpEvent *event)
-{
- struct vio_waitevent *we;
- struct viotapelpevent *tevent = (struct viotapelpevent *)event;
-
- if (event == NULL)
- /* Notification that a partition went away! */
- return;
-
- we = (struct vio_waitevent *)event->xCorrelationToken;
- switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
- case viotapegetinfo:
- we->rc = tevent->sub_type_result;
- complete(&we->com);
- break;
- default:
- printk(KERN_WARNING "handle_tape_event: weird ack\n");
- }
-}
-
-static void __init get_viotape_info(struct device_node *vio_root)
-{
- HvLpEvent_Rc hvrc;
- u32 unit;
- struct vio_resource *unitinfo;
- dma_addr_t unitinfo_dmaaddr;
- size_t len = sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALTAPES;
- struct vio_waitevent we;
- int ret;
-
- init_completion(&we.com);
-
- ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2);
- if (ret) {
- printk(KERN_WARNING "get_viotape_info: "
- "error on viopath_open to hostlp %d\n", ret);
- return;
- }
-
- vio_setHandler(viomajorsubtype_tape, handle_tape_event);
-
- unitinfo = iseries_hv_alloc(len, &unitinfo_dmaaddr, GFP_ATOMIC);
- if (!unitinfo)
- goto clear_handler;
-
- memset(unitinfo, 0, len);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_tape | viotapegetinfo,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)&we, VIOVERSION << 16,
- unitinfo_dmaaddr, len, 0, 0);
- if (hvrc != HvLpEvent_Rc_Good) {
- printk(KERN_WARNING "get_viotape_info: hv error on op %d\n",
- (int)hvrc);
- goto hv_free;
- }
-
- wait_for_completion(&we.com);
-
- for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALTAPES) &&
- unitinfo[unit].rsrcname[0]; unit++) {
- if (!do_device_node(vio_root, "viotape", FIRST_VIOTAPE + unit,
- unit, "byte", "IBM,iSeries-viotape",
- &unitinfo[unit]))
- break;
- }
-
- hv_free:
- iseries_hv_free(len, unitinfo, unitinfo_dmaaddr);
- clear_handler:
- vio_clearHandler(viomajorsubtype_tape);
- viopath_close(viopath_hostLp, viomajorsubtype_tape, 2);
-}
-
-static int __init iseries_vio_init(void)
-{
- struct device_node *vio_root;
- int ret = -ENODEV;
-
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- goto out;
-
- iommu_vio_init();
-
- vio_root = of_find_node_by_path("/vdevice");
- if (!vio_root)
- goto out;
-
- if (viopath_hostLp == HvLpIndexInvalid) {
- vio_set_hostlp();
- /* If we don't have a host, bail out */
- if (viopath_hostLp == HvLpIndexInvalid)
- goto put_node;
- }
-
- get_viodasd_info(vio_root);
- get_viocd_info(vio_root);
- get_viotape_info(vio_root);
-
- ret = 0;
-
- put_node:
- of_node_put(vio_root);
- out:
- return ret;
-}
-arch_initcall(iseries_vio_init);
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
deleted file mode 100644
index 40dad0840eb3..000000000000
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/* -*- linux-c -*-
- *
- * iSeries Virtual I/O Message Path code
- *
- * Authors: Dave Boutcher <boutcher@us.ibm.com>
- * Ryan Arnold <ryanarn@us.ibm.com>
- * Colin Devilbiss <devilbis@us.ibm.com>
- *
- * (C) Copyright 2000-2005 IBM Corporation
- *
- * This code is used by the iSeries virtual disk, cd,
- * tape, and console to communicate with OS/400 in another
- * partition.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) anyu later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/proc_fs.h>
-#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/prom.h>
-#include <asm/firmware.h>
-#include <asm/iseries/hv_types.h>
-#include <asm/iseries/hv_lp_event.h>
-#include <asm/iseries/hv_lp_config.h>
-#include <asm/iseries/mf.h>
-#include <asm/iseries/vio.h>
-
-/* Status of the path to each other partition in the system.
- * This is overkill, since we will only ever establish connections
- * to our hosting partition and the primary partition on the system.
- * But this allows for other support in the future.
- */
-static struct viopathStatus {
- int isOpen; /* Did we open the path? */
- int isActive; /* Do we have a mon msg outstanding */
- int users[VIO_MAX_SUBTYPES];
- HvLpInstanceId mSourceInst;
- HvLpInstanceId mTargetInst;
- int numberAllocated;
-} viopathStatus[HVMAXARCHITECTEDLPS];
-
-static DEFINE_SPINLOCK(statuslock);
-
-/*
- * For each kind of event we allocate a buffer that is
- * guaranteed not to cross a page boundary
- */
-static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
- __attribute__((__aligned__(4096)));
-static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
-static int event_buffer_initialised;
-
-static void handleMonitorEvent(struct HvLpEvent *event);
-
-/*
- * We use this structure to handle asynchronous responses. The caller
- * blocks on the semaphore and the handler posts the semaphore. However,
- * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
- */
-struct alloc_parms {
- struct completion done;
- int number;
- atomic_t wait_atomic;
- int used_wait_atomic;
-};
-
-/* Put a sequence number in each mon msg. The value is not
- * important. Start at something other than 0 just for
- * readability. wrapping this is ok.
- */
-static u8 viomonseq = 22;
-
-/* Our hosting logical partition. We get this at startup
- * time, and different modules access this variable directly.
- */
-HvLpIndex viopath_hostLp = HvLpIndexInvalid;
-EXPORT_SYMBOL(viopath_hostLp);
-HvLpIndex viopath_ourLp = HvLpIndexInvalid;
-EXPORT_SYMBOL(viopath_ourLp);
-
-/* For each kind of incoming event we set a pointer to a
- * routine to call.
- */
-static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
-
-#define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
-#define VIOPATH_KERN_INFO KERN_INFO "viopath: "
-
-static int proc_viopath_show(struct seq_file *m, void *v)
-{
- char *buf;
- u16 vlanMap;
- dma_addr_t handle;
- HvLpEvent_Rc hvrc;
- DECLARE_COMPLETION_ONSTACK(done);
- struct device_node *node;
- const char *sysid;
-
- buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE);
-
- hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
- HvLpEvent_Type_VirtualIo,
- viomajorsubtype_config | vioconfigget,
- HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
- viopath_sourceinst(viopath_hostLp),
- viopath_targetinst(viopath_hostLp),
- (u64)(unsigned long)&done, VIOVERSION << 16,
- ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
-
- if (hvrc != HvLpEvent_Rc_Good)
- printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
-
- wait_for_completion(&done);
-
- vlanMap = HvLpConfig_getVirtualLanIndexMap();
-
- buf[HW_PAGE_SIZE-1] = '\0';
- seq_printf(m, "%s", buf);
-
- iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE);
- kfree(buf);
-
- seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
-
- node = of_find_node_by_path("/");
- sysid = NULL;
- if (node != NULL)
- sysid = of_get_property(node, "system-id", NULL);
-
- if (sysid == NULL)
- seq_printf(m, "SRLNBR=<UNKNOWN>\n");
- else
- /* Skip "IBM," on front of serial number, see dt.c */
- seq_printf(m, "SRLNBR=%s\n", sysid + 4);
-
- of_node_put(node);
-
- return 0;
-}
-
-static int proc_viopath_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_viopath_show, NULL);
-}
-
-static const struct file_operations proc_viopath_operations = {
- .open = proc_viopath_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init vio_proc_init(void)
-{
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
-
- proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
- return 0;
-}
-__initcall(vio_proc_init);
-
-/* See if a given LP is active. Allow for invalid lps to be passed in
- * and just return invalid
- */
-int viopath_isactive(HvLpIndex lp)
-{
- if (lp == HvLpIndexInvalid)
- return 0;
- if (lp < HVMAXARCHITECTEDLPS)
- return viopathStatus[lp].isActive;
- else
- return 0;
-}
-EXPORT_SYMBOL(viopath_isactive);
-
-/*
- * We cache the source and target instance ids for each
- * partition.
- */
-HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
-{
- return viopathStatus[lp].mSourceInst;
-}
-EXPORT_SYMBOL(viopath_sourceinst);
-
-HvLpInstanceId viopath_targetinst(HvLpIndex lp)
-{
- return viopathStatus[lp].mTargetInst;
-}
-EXPORT_SYMBOL(viopath_targetinst);
-
-/*
- * Send a monitor message. This is a message with the acknowledge
- * bit on that the other side will NOT explicitly acknowledge. When
- * the other side goes down, the hypervisor will acknowledge any
- * outstanding messages....so we will know when the other side dies.
- */
-static void sendMonMsg(HvLpIndex remoteLp)
-{
- HvLpEvent_Rc hvrc;
-
- viopathStatus[remoteLp].mSourceInst =
- HvCallEvent_getSourceLpInstanceId(remoteLp,
- HvLpEvent_Type_VirtualIo);
- viopathStatus[remoteLp].mTargetInst =
- HvCallEvent_getTargetLpInstanceId(remoteLp,
- HvLpEvent_Type_VirtualIo);
-
- /*
- * Deliberately ignore the return code here. if we call this
- * more than once, we don't care.
- */
- vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
-
- hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
- viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
- HvLpEvent_AckType_DeferredAck,
- viopathStatus[remoteLp].mSourceInst,
- viopathStatus[remoteLp].mTargetInst,
- viomonseq++, 0, 0, 0, 0, 0);
-
- if (hvrc == HvLpEvent_Rc_Good)
- viopathStatus[remoteLp].isActive = 1;
- else {
- printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
- remoteLp);
- viopathStatus[remoteLp].isActive = 0;
- }
-}
-
-static void handleMonitorEvent(struct HvLpEvent *event)
-{
- HvLpIndex remoteLp;
- int i;
-
- /*
- * This handler is _also_ called as part of the loop
- * at the end of this routine, so it must be able to
- * ignore NULL events...
- */
- if (!event)
- return;
-
- /*
- * First see if this is just a normal monitor message from the
- * other partition
- */
- if (hvlpevent_is_int(event)) {
- remoteLp = event->xSourceLp;
- if (!viopathStatus[remoteLp].isActive)
- sendMonMsg(remoteLp);
- return;
- }
-
- /*
- * This path is for an acknowledgement; the other partition
- * died
- */
- remoteLp = event->xTargetLp;
- if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
- (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
- printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
- return;
- }
-
- printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
-
- viopathStatus[remoteLp].isActive = 0;
-
- /*
- * For each active handler, pass them a NULL
- * message to indicate that the other partition
- * died
- */
- for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
- if (vio_handler[i] != NULL)
- (*vio_handler[i])(NULL);
- }
-}
-
-int vio_setHandler(int subtype, vio_event_handler_t *beh)
-{
- subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
- if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
- return -EINVAL;
- if (vio_handler[subtype] != NULL)
- return -EBUSY;
- vio_handler[subtype] = beh;
- return 0;
-}
-EXPORT_SYMBOL(vio_setHandler);
-
-int vio_clearHandler(int subtype)
-{
- subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
- if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
- return -EINVAL;
- if (vio_handler[subtype] == NULL)
- return -EAGAIN;
- vio_handler[subtype] = NULL;
- return 0;
-}
-EXPORT_SYMBOL(vio_clearHandler);
-
-static void handleConfig(struct HvLpEvent *event)
-{
- if (!event)
- return;
- if (hvlpevent_is_int(event)) {
- printk(VIOPATH_KERN_WARN
- "unexpected config request from partition %d",
- event->xSourceLp);
-
- if (hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- return;
- }
-
- complete((struct completion *)event->xCorrelationToken);
-}
-
-/*
- * Initialization of the hosting partition
- */
-void vio_set_hostlp(void)
-{
- /*
- * If this has already been set then we DON'T want to either change
- * it or re-register the proc file system
- */
- if (viopath_hostLp != HvLpIndexInvalid)
- return;
-
- /*
- * Figure out our hosting partition. This isn't allowed to change
- * while we're active
- */
- viopath_ourLp = HvLpConfig_getLpIndex();
- viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
-
- if (viopath_hostLp != HvLpIndexInvalid)
- vio_setHandler(viomajorsubtype_config, handleConfig);
-}
-EXPORT_SYMBOL(vio_set_hostlp);
-
-static void vio_handleEvent(struct HvLpEvent *event)
-{
- HvLpIndex remoteLp;
- int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
- >> VIOMAJOR_SUBTYPE_SHIFT;
-
- if (hvlpevent_is_int(event)) {
- remoteLp = event->xSourceLp;
- /*
- * The isActive is checked because if the hosting partition
- * went down and came back up it would not be active but it
- * would have different source and target instances, in which
- * case we'd want to reset them. This case really protects
- * against an unauthorized active partition sending interrupts
- * or acks to this linux partition.
- */
- if (viopathStatus[remoteLp].isActive
- && (event->xSourceInstanceId !=
- viopathStatus[remoteLp].mTargetInst)) {
- printk(VIOPATH_KERN_WARN
- "message from invalid partition. "
- "int msg rcvd, source inst (%d) doesn't match (%d)\n",
- viopathStatus[remoteLp].mTargetInst,
- event->xSourceInstanceId);
- return;
- }
-
- if (viopathStatus[remoteLp].isActive
- && (event->xTargetInstanceId !=
- viopathStatus[remoteLp].mSourceInst)) {
- printk(VIOPATH_KERN_WARN
- "message from invalid partition. "
- "int msg rcvd, target inst (%d) doesn't match (%d)\n",
- viopathStatus[remoteLp].mSourceInst,
- event->xTargetInstanceId);
- return;
- }
- } else {
- remoteLp = event->xTargetLp;
- if (event->xSourceInstanceId !=
- viopathStatus[remoteLp].mSourceInst) {
- printk(VIOPATH_KERN_WARN
- "message from invalid partition. "
- "ack msg rcvd, source inst (%d) doesn't match (%d)\n",
- viopathStatus[remoteLp].mSourceInst,
- event->xSourceInstanceId);
- return;
- }
-
- if (event->xTargetInstanceId !=
- viopathStatus[remoteLp].mTargetInst) {
- printk(VIOPATH_KERN_WARN
- "message from invalid partition. "
- "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n",
- viopathStatus[remoteLp].mTargetInst,
- event->xTargetInstanceId);
- return;
- }
- }
-
- if (vio_handler[subtype] == NULL) {
- printk(VIOPATH_KERN_WARN
- "unexpected virtual io event subtype %d from partition %d\n",
- event->xSubtype, remoteLp);
- /* No handler. Ack if necessary */
- if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
- event->xRc = HvLpEvent_Rc_InvalidSubtype;
- HvCallEvent_ackLpEvent(event);
- }
- return;
- }
-
- /* This innocuous little line is where all the real work happens */
- (*vio_handler[subtype])(event);
-}
-
-static void viopath_donealloc(void *parm, int number)
-{
- struct alloc_parms *parmsp = parm;
-
- parmsp->number = number;
- if (parmsp->used_wait_atomic)
- atomic_set(&parmsp->wait_atomic, 0);
- else
- complete(&parmsp->done);
-}
-
-static int allocateEvents(HvLpIndex remoteLp, int numEvents)
-{
- struct alloc_parms parms;
-
- if (system_state != SYSTEM_RUNNING) {
- parms.used_wait_atomic = 1;
- atomic_set(&parms.wait_atomic, 1);
- } else {
- parms.used_wait_atomic = 0;
- init_completion(&parms.done);
- }
- mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
- numEvents, &viopath_donealloc, &parms);
- if (system_state != SYSTEM_RUNNING) {
- while (atomic_read(&parms.wait_atomic))
- mb();
- } else
- wait_for_completion(&parms.done);
- return parms.number;
-}
-
-int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
-{
- int i;
- unsigned long flags;
- int tempNumAllocated;
-
- if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
- return -EINVAL;
-
- subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
- if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
- return -EINVAL;
-
- spin_lock_irqsave(&statuslock, flags);
-
- if (!event_buffer_initialised) {
- for (i = 0; i < VIO_MAX_SUBTYPES; i++)
- atomic_set(&event_buffer_available[i], 1);
- event_buffer_initialised = 1;
- }
-
- viopathStatus[remoteLp].users[subtype]++;
-
- if (!viopathStatus[remoteLp].isOpen) {
- viopathStatus[remoteLp].isOpen = 1;
- HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
-
- /*
- * Don't hold the spinlock during an operation that
- * can sleep.
- */
- spin_unlock_irqrestore(&statuslock, flags);
- tempNumAllocated = allocateEvents(remoteLp, 1);
- spin_lock_irqsave(&statuslock, flags);
-
- viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
-
- if (viopathStatus[remoteLp].numberAllocated == 0) {
- HvCallEvent_closeLpEventPath(remoteLp,
- HvLpEvent_Type_VirtualIo);
-
- spin_unlock_irqrestore(&statuslock, flags);
- return -ENOMEM;
- }
-
- viopathStatus[remoteLp].mSourceInst =
- HvCallEvent_getSourceLpInstanceId(remoteLp,
- HvLpEvent_Type_VirtualIo);
- viopathStatus[remoteLp].mTargetInst =
- HvCallEvent_getTargetLpInstanceId(remoteLp,
- HvLpEvent_Type_VirtualIo);
- HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
- &vio_handleEvent);
- sendMonMsg(remoteLp);
- printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
- "setting sinst %d, tinst %d\n",
- remoteLp, viopathStatus[remoteLp].mSourceInst,
- viopathStatus[remoteLp].mTargetInst);
- }
-
- spin_unlock_irqrestore(&statuslock, flags);
- tempNumAllocated = allocateEvents(remoteLp, numReq);
- spin_lock_irqsave(&statuslock, flags);
- viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
- spin_unlock_irqrestore(&statuslock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(viopath_open);
-
-int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
-{
- unsigned long flags;
- int i;
- int numOpen;
- struct alloc_parms parms;
-
- if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
- return -EINVAL;
-
- subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
- if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
- return -EINVAL;
-
- spin_lock_irqsave(&statuslock, flags);
- /*
- * If the viopath_close somehow gets called before a
- * viopath_open it could decrement to -1 which is a non
- * recoverable state so we'll prevent this from
- * happening.
- */
- if (viopathStatus[remoteLp].users[subtype] > 0)
- viopathStatus[remoteLp].users[subtype]--;
-
- spin_unlock_irqrestore(&statuslock, flags);
-
- parms.used_wait_atomic = 0;
- init_completion(&parms.done);
- mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
- numReq, &viopath_donealloc, &parms);
- wait_for_completion(&parms.done);
-
- spin_lock_irqsave(&statuslock, flags);
- for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
- numOpen += viopathStatus[remoteLp].users[i];
-
- if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
- printk(VIOPATH_KERN_INFO "closing connection to partition %d\n",
- remoteLp);
-
- HvCallEvent_closeLpEventPath(remoteLp,
- HvLpEvent_Type_VirtualIo);
- viopathStatus[remoteLp].isOpen = 0;
- viopathStatus[remoteLp].isActive = 0;
-
- for (i = 0; i < VIO_MAX_SUBTYPES; i++)
- atomic_set(&event_buffer_available[i], 0);
- event_buffer_initialised = 0;
- }
- spin_unlock_irqrestore(&statuslock, flags);
- return 0;
-}
-EXPORT_SYMBOL(viopath_close);
-
-void *vio_get_event_buffer(int subtype)
-{
- subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
- if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
- return NULL;
-
- if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
- return &event_buffer[subtype * 256];
- else
- return NULL;
-}
-EXPORT_SYMBOL(vio_get_event_buffer);
-
-void vio_free_event_buffer(int subtype, void *buffer)
-{
- subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
- if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
- printk(VIOPATH_KERN_WARN
- "unexpected subtype %d freeing event buffer\n", subtype);
- return;
- }
-
- if (atomic_read(&event_buffer_available[subtype]) != 0) {
- printk(VIOPATH_KERN_WARN
- "freeing unallocated event buffer, subtype %d\n",
- subtype);
- return;
- }
-
- if (buffer != &event_buffer[subtype * 256]) {
- printk(VIOPATH_KERN_WARN
- "freeing invalid event buffer, subtype %d\n", subtype);
- }
-
- atomic_set(&event_buffer_available[subtype], 1);
-}
-EXPORT_SYMBOL(vio_free_event_buffer);
-
-static const struct vio_error_entry vio_no_error =
- { 0, 0, "Non-VIO Error" };
-static const struct vio_error_entry vio_unknown_error =
- { 0, EIO, "Unknown Error" };
-
-static const struct vio_error_entry vio_default_errors[] = {
- {0x0001, EIO, "No Connection"},
- {0x0002, EIO, "No Receiver"},
- {0x0003, EIO, "No Buffer Available"},
- {0x0004, EBADRQC, "Invalid Message Type"},
- {0x0000, 0, NULL},
-};
-
-const struct vio_error_entry *vio_lookup_rc(
- const struct vio_error_entry *local_table, u16 rc)
-{
- const struct vio_error_entry *cur;
-
- if (!rc)
- return &vio_no_error;
- if (local_table)
- for (cur = local_table; cur->rc; ++cur)
- if (cur->rc == rc)
- return cur;
- for (cur = vio_default_errors; cur->rc; ++cur)
- if (cur->rc == rc)
- return cur;
- return &vio_unknown_error;
-}
-EXPORT_SYMBOL(vio_lookup_rc);
diff --git a/arch/powerpc/platforms/iseries/vpd_areas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
deleted file mode 100644
index feb001f3a5fe..000000000000
--- a/arch/powerpc/platforms/iseries/vpd_areas.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _ISERIES_VPD_AREAS_H
-#define _ISERIES_VPD_AREAS_H
-
-/*
- * This file defines the address and length of all of the VPD area passed to
- * the OS from PLIC (most of which start from the SP).
- */
-
-#include <asm/types.h>
-
-/* VPD Entry index is carved in stone - cannot be changed (easily). */
-#define ItVpdCecVpd 0
-#define ItVpdDynamicSpace 1
-#define ItVpdExtVpd 2
-#define ItVpdExtVpdOnPanel 3
-#define ItVpdFirstPaca 4
-#define ItVpdIoVpd 5
-#define ItVpdIplParms 6
-#define ItVpdMsVpd 7
-#define ItVpdPanelVpd 8
-#define ItVpdLpNaca 9
-#define ItVpdBackplaneAndMaybeClockCardVpd 10
-#define ItVpdRecoveryLogBuffer 11
-#define ItVpdSpCommArea 12
-#define ItVpdSpLogBuffer 13
-#define ItVpdSpLogBufferSave 14
-#define ItVpdSpCardVpd 15
-#define ItVpdFirstProcVpd 16
-#define ItVpdApModelVpd 17
-#define ItVpdClockCardVpd 18
-#define ItVpdBusExtCardVpd 19
-#define ItVpdProcCapacityVpd 20
-#define ItVpdInteractiveCapacityVpd 21
-#define ItVpdFirstSlotLabel 22
-#define ItVpdFirstLpQueue 23
-#define ItVpdFirstL3CacheVpd 24
-#define ItVpdFirstProcFruVpd 25
-
-#define ItVpdMaxEntries 26
-
-#define ItDmaMaxEntries 10
-
-#define ItVpdAreasMaxSlotLabels 192
-
-
-struct ItVpdAreas {
- u32 xSlicDesc; // Descriptor 000-003
- u16 xSlicSize; // Size of this control block 004-005
- u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface006-007
- u16 xRsvd1:15; // Reserved bits ...
- u16 xSlicVpdEntries; // Number of VPD entries 008-009
- u16 xSlicDmaEntries; // Number of DMA entries 00A-00B
- u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D
- u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F
- u16 xSlicDmaToksOffset; // Offset into this of array 010-011
- u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013
- u16 xSlicDmaLensOffset; // Offset into this of array 014-015
- u16 xSlicVpdLensOffset; // Offset into this of array 016-017
- u16 xSlicMaxSlotLabels; // Maximum number of slot labels018-019
- u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B
- u8 xRsvd2[4]; // Reserved 01C-01F
- u64 xRsvd3[12]; // Reserved 020-07F
- u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7
- u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF
- u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F
- const void *xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
-};
-
-extern const struct ItVpdAreas itVpdAreas;
-
-#endif /* _ISERIES_VPD_AREAS_H */
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 401e3f3f74c8..465ee8f5c086 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -620,7 +620,7 @@ void __init maple_pci_init(void)
}
/* Tell pci.c to not change any resource allocations. */
- pci_probe_only = 1;
+ pci_add_flags(PCI_PROBE_ONLY);
}
int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 0bcbfe7b2c55..3b7545a51aa9 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -262,7 +262,7 @@ static void __init maple_init_IRQ(void)
flags |= MPIC_BIG_ENDIAN;
/* XXX Maple specific bits */
- flags |= MPIC_U3_HT_IRQS | MPIC_WANTS_RESET;
+ flags |= MPIC_U3_HT_IRQS;
/* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */
flags |= MPIC_BIG_ENDIAN;
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index b6a0ec45c695..aa862713258c 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -229,9 +229,6 @@ void __init pas_pci_init(void)
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
-
- /* Use the common resource allocation mechanism */
- pci_probe_only = 1;
}
void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 98b7a7c13176..e777ad471a48 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -224,7 +224,7 @@ static __init void pas_init_IRQ(void)
openpic_addr = of_read_number(opprop, naddr);
printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
- mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS;
+ mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS | MPIC_NO_RESET;
nmiprop = of_get_property(mpic_node, "nmi-source", NULL);
if (nmiprop)
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 54d227127c9f..da18b26dcc6f 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -279,7 +279,7 @@ static u32 core99_check(u8* datas)
static int sm_erase_bank(int bank)
{
- int stat, i;
+ int stat;
unsigned long timeout;
u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
@@ -301,11 +301,10 @@ static int sm_erase_bank(int bank)
out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
out_8(base, SM_FLASH_CMD_RESET);
- for (i=0; i<NVRAM_SIZE; i++)
- if (base[i] != 0xff) {
- printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
- return -ENXIO;
- }
+ if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
+ return -ENXIO;
+ }
return 0;
}
@@ -336,17 +335,16 @@ static int sm_write_bank(int bank, u8* datas)
}
out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
out_8(base, SM_FLASH_CMD_RESET);
- for (i=0; i<NVRAM_SIZE; i++)
- if (base[i] != datas[i]) {
- printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
- return -ENXIO;
- }
+ if (memcmp(base, datas, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
+ return -ENXIO;
+ }
return 0;
}
static int amd_erase_bank(int bank)
{
- int i, stat = 0;
+ int stat = 0;
unsigned long timeout;
u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
@@ -382,12 +380,11 @@ static int amd_erase_bank(int bank)
/* Reset */
out_8(base, 0xf0);
udelay(1);
-
- for (i=0; i<NVRAM_SIZE; i++)
- if (base[i] != 0xff) {
- printk(KERN_ERR "nvram: AMD flash erase failed !\n");
- return -ENXIO;
- }
+
+ if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: AMD flash erase failed !\n");
+ return -ENXIO;
+ }
return 0;
}
@@ -429,11 +426,10 @@ static int amd_write_bank(int bank, u8* datas)
out_8(base, 0xf0);
udelay(1);
- for (i=0; i<NVRAM_SIZE; i++)
- if (base[i] != datas[i]) {
- printk(KERN_ERR "nvram: AMD flash write failed !\n");
- return -ENXIO;
- }
+ if (memcmp(base, datas, NVRAM_SIZE)) {
+ printk(KERN_ERR "nvram: AMD flash write failed !\n");
+ return -ENXIO;
+ }
return 0;
}
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 31a7d3a7ce25..43bbe1bda939 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1059,9 +1059,6 @@ void __init pmac_pci_init(void)
}
/* pmac_check_ht_link(); */
- /* We can allocate missing resources if any */
- pci_probe_only = 0;
-
#else /* CONFIG_PPC64 */
init_p2pbridge();
init_second_ohare();
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 7761aabfc293..66ad93de1d55 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -61,7 +61,7 @@ static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
static int pmac_irq_cascade = -1;
-static struct irq_host *pmac_pic_host;
+static struct irq_domain *pmac_pic_host;
static void __pmac_retrigger(unsigned int irq_nr)
{
@@ -268,13 +268,13 @@ static struct irqaction gatwick_cascade_action = {
.name = "cascade",
};
-static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
{
/* We match all, we don't always have a node anyway */
return 1;
}
-static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
+static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
if (hw >= max_irqs)
@@ -288,21 +288,10 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_flags)
-
-{
- *out_flags = IRQ_TYPE_NONE;
- *out_hwirq = *intspec;
- return 0;
-}
-
-static struct irq_host_ops pmac_pic_host_ops = {
+static const struct irq_domain_ops pmac_pic_host_ops = {
.match = pmac_pic_host_match,
.map = pmac_pic_host_map,
- .xlate = pmac_pic_host_xlate,
+ .xlate = irq_domain_xlate_onecell,
};
static void __init pmac_pic_probe_oldstyle(void)
@@ -352,9 +341,8 @@ static void __init pmac_pic_probe_oldstyle(void)
/*
* Allocate an irq host
*/
- pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs,
- &pmac_pic_host_ops,
- max_irqs);
+ pmac_pic_host = irq_domain_add_linear(master, max_irqs,
+ &pmac_pic_host_ops, NULL);
BUG_ON(pmac_pic_host == NULL);
irq_set_default_host(pmac_pic_host);
@@ -469,7 +457,6 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
- flags |= MPIC_WANTS_RESET;
if (of_get_property(np, "big-endian", NULL))
flags |= MPIC_BIG_ENDIAN;
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 44d769258ebf..a81e5a88fbdf 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -125,7 +125,7 @@ static volatile u32 __iomem *psurge_start;
static int psurge_type = PSURGE_NONE;
/* irq for secondary cpus to report */
-static struct irq_host *psurge_host;
+static struct irq_domain *psurge_host;
int psurge_secondary_virq;
/*
@@ -176,7 +176,7 @@ static void smp_psurge_cause_ipi(int cpu, unsigned long data)
psurge_set_ipi(cpu);
}
-static int psurge_host_map(struct irq_host *h, unsigned int virq,
+static int psurge_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
@@ -184,7 +184,7 @@ static int psurge_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-struct irq_host_ops psurge_host_ops = {
+static const struct irq_domain_ops psurge_host_ops = {
.map = psurge_host_map,
};
@@ -192,8 +192,7 @@ static int psurge_secondary_ipi_init(void)
{
int rc = -ENOMEM;
- psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
- &psurge_host_ops, 0);
+ psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
if (psurge_host)
psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index f31162cfdaa9..fbdd74dac3ac 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -204,11 +204,10 @@ static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
pr_devel(" -> OBR %s [%x] +%016llx\n",
bus->self ? pci_name(bus->self) : "root", flags, offset);
- for (i = 0; i < 2; i++) {
- r = bus->resource[i];
+ pci_bus_for_each_resource(bus, r, i) {
if (r && (r->flags & flags)) {
- bus->resource[i]->start += offset;
- bus->resource[i]->end += offset;
+ r->start += offset;
+ r->end += offset;
}
}
list_for_each_entry(dev, &bus->devices, bus_list)
@@ -288,12 +287,17 @@ static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
* assignment algorithm is going to be uber-trivial for now, we
* can try to be smarter later at filling out holes.
*/
- start = bus->self ? 0 : bus->resource[bres]->start;
-
- /* Don't hand out IO 0 */
- if ((flags & IORESOURCE_IO) && !bus->self)
- start += 0x1000;
-
+ if (bus->self) {
+ /* No offset for downstream bridges */
+ start = 0;
+ } else {
+ /* Offset from the root */
+ if (flags & IORESOURCE_IO)
+ /* Don't hand out IO 0 */
+ start = hose->io_resource.start + 0x1000;
+ else
+ start = hose->mem_resources[0].start;
+ }
while(!list_empty(&head)) {
w = list_first_entry(&head, struct resource_wrap, link);
list_del(&w->link);
@@ -321,13 +325,20 @@ static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
empty:
/* Only setup P2P's, not the PHB itself */
if (bus->self) {
- WARN_ON(bus->resource[bres] == NULL);
- bus->resource[bres]->start = 0;
- bus->resource[bres]->flags = (*size) ? flags : 0;
- bus->resource[bres]->end = (*size) ? (*size - 1) : 0;
+ struct resource *res = bus->resource[bres];
+
+ if (WARN_ON(res == NULL))
+ return;
- /* Clear prefetch bus resources for now */
- bus->resource[2]->flags = 0;
+ /*
+ * FIXME: We should probably export and call
+ * pci_bridge_check_ranges() to properly re-initialize
+ * the PCI portion of the flags here, and to detect
+ * what the bridge actually supports.
+ */
+ res->start = 0;
+ res->flags = (*size) ? flags : 0;
+ res->end = (*size) ? (*size - 1) : 0;
}
pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
@@ -1288,15 +1299,14 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
/* Setup MSI support */
pnv_pci_init_ioda_msis(phb);
- /* We set both probe_only and PCI_REASSIGN_ALL_RSRC. This is an
+ /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an
* odd combination which essentially means that we skip all resource
* fixups and assignments in the generic code, and do it all
* ourselves here
*/
- pci_probe_only = 1;
ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb;
ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
- pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+ pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC);
/* Reset IODA tables to a clean state */
rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a70bc1e385eb..be3cfc5ceabb 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -31,6 +31,7 @@
#include <asm/iommu.h>
#include <asm/tce.h>
#include <asm/abs_addr.h>
+#include <asm/firmware.h>
#include "powernv.h"
#include "pci.h"
@@ -52,32 +53,38 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
{
- unsigned int id;
+ unsigned long flags;
+ unsigned int id, rc;
+
+ spin_lock_irqsave(&phb->lock, flags);
- spin_lock(&phb->lock);
id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
if (id >= phb->msi_count && phb->msi_next)
id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
if (id >= phb->msi_count) {
- spin_unlock(&phb->lock);
- return 0;
+ rc = 0;
+ goto out;
}
__set_bit(id, phb->msi_map);
- spin_unlock(&phb->lock);
- return id + phb->msi_base;
+ rc = id + phb->msi_base;
+out:
+ spin_unlock_irqrestore(&phb->lock, flags);
+ return rc;
}
static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
{
+ unsigned long flags;
unsigned int id;
if (WARN_ON(hwirq < phb->msi_base ||
hwirq >= (phb->msi_base + phb->msi_count)))
return;
id = hwirq - phb->msi_base;
- spin_lock(&phb->lock);
+
+ spin_lock_irqsave(&phb->lock, flags);
__clear_bit(id, phb->msi_map);
- spin_unlock(&phb->lock);
+ spin_unlock_irqrestore(&phb->lock, flags);
}
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -555,10 +562,7 @@ void __init pnv_pci_init(void)
{
struct device_node *np;
- pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
-
- /* We do not want to just probe */
- pci_probe_only = 0;
+ pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
/* OPAL absent, try POPAL first then RTAS detection of PHBs */
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 467bd4ac6824..db1ad1c8f68f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -31,7 +31,6 @@
#include <asm/xics.h>
#include <asm/rtas.h>
#include <asm/opal.h>
-#include <asm/xics.h>
#include "powernv.h"
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 617efa12a3a5..2a4ff86cc21f 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -667,7 +667,7 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
-static int ps3_host_map(struct irq_host *h, unsigned int virq,
+static int ps3_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
@@ -678,13 +678,13 @@ static int ps3_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int ps3_host_match(struct irq_host *h, struct device_node *np)
+static int ps3_host_match(struct irq_domain *h, struct device_node *np)
{
/* Match all */
return 1;
}
-static struct irq_host_ops ps3_host_ops = {
+static const struct irq_domain_ops ps3_host_ops = {
.map = ps3_host_map,
.match = ps3_host_match,
};
@@ -751,10 +751,9 @@ void __init ps3_init_IRQ(void)
{
int result;
unsigned cpu;
- struct irq_host *host;
+ struct irq_domain *host;
- host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops,
- PS3_INVALID_OUTLET);
+ host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
irq_set_default_host(host);
irq_set_virq_count(PS3_PLUG_MAX + 1);
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index ae7b6d41fed3..aadbe4f6d537 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -3,6 +3,7 @@ config PPC_PSERIES
bool "IBM pSeries & new (POWER5-based) iSeries"
select HAVE_PCSPKR_PLATFORM
select MPIC
+ select OF_DYNAMIC
select PCI_MSI
select PPC_XICS
select PPC_ICP_NATIVE
@@ -72,7 +73,7 @@ config IO_EVENT_IRQ
config LPARCFG
bool "LPAR Configuration Data"
- depends on PPC_PSERIES || PPC_ISERIES
+ depends on PPC_PSERIES
help
Provide system capacity information via human readable
<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
@@ -122,7 +123,7 @@ config DTL
Say N if you are unsure.
config PSERIES_IDLE
- tristate "Cpuidle driver for pSeries platforms"
+ bool "Cpuidle driver for pSeries platforms"
depends on CPU_IDLE
depends on PPC_PSERIES
default y
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 236db46b4078..c222189f5bb2 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,7 +6,8 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
firmware.o power.o dlpar.o mobility.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
-obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
+obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \
+ eeh_event.o eeh_sysfs.o eeh_pseries.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
obj-$(CONFIG_PSERIES_MSI) += msi.o
@@ -18,7 +19,6 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
-obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 565869022e3d..309d38ef7322 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1,8 +1,8 @@
/*
- * eeh.c
* Copyright IBM Corporation 2001, 2005, 2006
* Copyright Dave Engebretsen & Todd Inglett 2001
* Copyright Linas Vepstas 2005, 2006
+ * Copyright 2001-2012 IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@
*/
#include <linux/delay.h>
-#include <linux/sched.h> /* for init_mm */
+#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -86,16 +86,8 @@
/* Time to wait for a PCI slot to report status, in milliseconds */
#define PCI_BUS_RESET_WAIT_MSEC (60*1000)
-/* RTAS tokens */
-static int ibm_set_eeh_option;
-static int ibm_set_slot_reset;
-static int ibm_read_slot_reset_state;
-static int ibm_read_slot_reset_state2;
-static int ibm_slot_error_detail;
-static int ibm_get_config_addr_info;
-static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
-static int ibm_configure_pe;
+/* Platform dependent EEH operations */
+struct eeh_ops *eeh_ops = NULL;
int eeh_subsystem_enabled;
EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -103,14 +95,6 @@ EXPORT_SYMBOL(eeh_subsystem_enabled);
/* Lock to avoid races due to multiple reports of an error */
static DEFINE_RAW_SPINLOCK(confirm_error_lock);
-/* Buffer for reporting slot-error-detail rtas calls. Its here
- * in BSS, and not dynamically alloced, so that it ends up in
- * RMO where RTAS can access it.
- */
-static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
-static DEFINE_SPINLOCK(slot_errbuf_lock);
-static int eeh_error_buf_size;
-
/* Buffer for reporting pci register dumps. Its here in BSS, and
* not dynamically alloced, so that it ends up in RMO where RTAS
* can access it.
@@ -118,74 +102,50 @@ static int eeh_error_buf_size;
#define EEH_PCI_REGS_LOG_LEN 4096
static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
-/* System monitoring statistics */
-static unsigned long no_device;
-static unsigned long no_dn;
-static unsigned long no_cfg_addr;
-static unsigned long ignored_check;
-static unsigned long total_mmio_ffs;
-static unsigned long false_positives;
-static unsigned long slot_resets;
-
-#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
+/*
+ * The struct is used to maintain the EEH global statistic
+ * information. Besides, the EEH global statistics will be
+ * exported to user space through procfs
+ */
+struct eeh_stats {
+ u64 no_device; /* PCI device not found */
+ u64 no_dn; /* OF node not found */
+ u64 no_cfg_addr; /* Config address not found */
+ u64 ignored_check; /* EEH check skipped */
+ u64 total_mmio_ffs; /* Total EEH checks */
+ u64 false_positives; /* Unnecessary EEH checks */
+ u64 slot_resets; /* PE reset */
+};
-/* --------------------------------------------------------------- */
-/* Below lies the EEH event infrastructure */
+static struct eeh_stats eeh_stats;
-static void rtas_slot_error_detail(struct pci_dn *pdn, int severity,
- char *driver_log, size_t loglen)
-{
- int config_addr;
- unsigned long flags;
- int rc;
-
- /* Log the error with the rtas logger */
- spin_lock_irqsave(&slot_errbuf_lock, flags);
- memset(slot_errbuf, 0, eeh_error_buf_size);
-
- /* Use PE configuration address, if present */
- config_addr = pdn->eeh_config_addr;
- if (pdn->eeh_pe_config_addr)
- config_addr = pdn->eeh_pe_config_addr;
-
- rc = rtas_call(ibm_slot_error_detail,
- 8, 1, NULL, config_addr,
- BUID_HI(pdn->phb->buid),
- BUID_LO(pdn->phb->buid),
- virt_to_phys(driver_log), loglen,
- virt_to_phys(slot_errbuf),
- eeh_error_buf_size,
- severity);
-
- if (rc == 0)
- log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
- spin_unlock_irqrestore(&slot_errbuf_lock, flags);
-}
+#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
/**
- * gather_pci_data - copy assorted PCI config space registers to buff
- * @pdn: device to report data for
+ * eeh_gather_pci_data - Copy assorted PCI config space registers to buff
+ * @edev: device to report data for
* @buf: point to buffer in which to log
* @len: amount of room in buffer
*
* This routine captures assorted PCI configuration space data,
* and puts them into a buffer for RTAS error logging.
*/
-static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
+static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
{
- struct pci_dev *dev = pdn->pcidev;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
u32 cfg;
int cap, i;
int n = 0;
- n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name);
- printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name);
+ n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
+ printk(KERN_WARNING "EEH: of node=%s\n", dn->full_name);
- rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
+ eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
- rtas_read_config(pdn, PCI_COMMAND, 4, &cfg);
+ eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
@@ -196,11 +156,11 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
/* Gather bridge-specific registers */
if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
- rtas_read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
+ eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg);
- rtas_read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
+ eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg);
}
@@ -208,11 +168,11 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
/* Dump out the PCI-X command and status regs */
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (cap) {
- rtas_read_config(pdn, cap, 4, &cfg);
+ eeh_ops->read_config(dn, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
- rtas_read_config(pdn, cap+4, 4, &cfg);
+ eeh_ops->read_config(dn, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
}
@@ -225,7 +185,7 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
"EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
- rtas_read_config(pdn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
}
@@ -237,7 +197,7 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
"EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<14; i++) {
- rtas_read_config(pdn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
}
@@ -246,111 +206,46 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
/* Gather status on devices under the bridge */
if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
- struct device_node *dn;
+ struct device_node *child;
- for_each_child_of_node(pdn->node, dn) {
- pdn = PCI_DN(dn);
- if (pdn)
- n += gather_pci_data(pdn, buf+n, len-n);
+ for_each_child_of_node(dn, child) {
+ if (of_node_to_eeh_dev(child))
+ n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n);
}
}
return n;
}
-void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
-{
- size_t loglen = 0;
- pci_regs_buf[0] = 0;
-
- rtas_pci_enable(pdn, EEH_THAW_MMIO);
- rtas_configure_bridge(pdn);
- eeh_restore_bars(pdn);
- loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
-
- rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
-}
-
-/**
- * read_slot_reset_state - Read the reset state of a device node's slot
- * @dn: device node to read
- * @rets: array to return results in
- */
-static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
-{
- int token, outputs;
- int config_addr;
-
- if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
- token = ibm_read_slot_reset_state2;
- outputs = 4;
- } else {
- token = ibm_read_slot_reset_state;
- rets[2] = 0; /* fake PE Unavailable info */
- outputs = 3;
- }
-
- /* Use PE configuration address, if present */
- config_addr = pdn->eeh_config_addr;
- if (pdn->eeh_pe_config_addr)
- config_addr = pdn->eeh_pe_config_addr;
-
- return rtas_call(token, 3, outputs, rets, config_addr,
- BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
-}
-
/**
- * eeh_wait_for_slot_status - returns error status of slot
- * @pdn pci device node
- * @max_wait_msecs maximum number to millisecs to wait
+ * eeh_slot_error_detail - Generate combined log including driver log and error log
+ * @edev: device to report error log for
+ * @severity: temporary or permanent error log
*
- * Return negative value if a permanent error, else return
- * Partition Endpoint (PE) status value.
- *
- * If @max_wait_msecs is positive, then this routine will
- * sleep until a valid status can be obtained, or until
- * the max allowed wait time is exceeded, in which case
- * a -2 is returned.
+ * This routine should be called to generate the combined log, which
+ * is comprised of driver log and error log. The driver log is figured
+ * out from the config space of the corresponding PCI device, while
+ * the error log is fetched through platform dependent function call.
*/
-int
-eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs)
+void eeh_slot_error_detail(struct eeh_dev *edev, int severity)
{
- int rc;
- int rets[3];
- int mwait;
-
- while (1) {
- rc = read_slot_reset_state(pdn, rets);
- if (rc) return rc;
- if (rets[1] == 0) return -1; /* EEH is not supported */
-
- if (rets[0] != 5) return rets[0]; /* return actual status */
-
- if (rets[2] == 0) return -1; /* permanently unavailable */
+ size_t loglen = 0;
+ pci_regs_buf[0] = 0;
- if (max_wait_msecs <= 0) break;
+ eeh_pci_enable(edev, EEH_OPT_THAW_MMIO);
+ eeh_ops->configure_bridge(eeh_dev_to_of_node(edev));
+ eeh_restore_bars(edev);
+ loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
- mwait = rets[2];
- if (mwait <= 0) {
- printk (KERN_WARNING
- "EEH: Firmware returned bad wait value=%d\n", mwait);
- mwait = 1000;
- } else if (mwait > 300*1000) {
- printk (KERN_WARNING
- "EEH: Firmware is taking too long, time=%d\n", mwait);
- mwait = 300*1000;
- }
- max_wait_msecs -= mwait;
- msleep (mwait);
- }
-
- printk(KERN_WARNING "EEH: Timed out waiting for slot status\n");
- return -2;
+ eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen);
}
/**
- * eeh_token_to_phys - convert EEH address token to phys address
- * @token i/o token, should be address in the form 0xA....
+ * eeh_token_to_phys - Convert EEH address token to phys address
+ * @token: I/O token, should be address in the form 0xA....
+ *
+ * This routine should be called to convert virtual I/O address
+ * to physical one.
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
@@ -365,36 +260,43 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
return pa | (token & (PAGE_SIZE-1));
}
-/**
- * Return the "partitionable endpoint" (pe) under which this device lies
+/**
+ * eeh_find_device_pe - Retrieve the PE for the given device
+ * @dn: device node
+ *
+ * Return the PE under which this device lies
*/
-struct device_node * find_device_pe(struct device_node *dn)
+struct device_node *eeh_find_device_pe(struct device_node *dn)
{
- while ((dn->parent) && PCI_DN(dn->parent) &&
- (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
+ while (dn->parent && of_node_to_eeh_dev(dn->parent) &&
+ (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
dn = dn->parent;
}
return dn;
}
-/** Mark all devices that are children of this device as failed.
- * Mark the device driver too, so that it can see the failure
- * immediately; this is critical, since some drivers poll
- * status registers in interrupts ... If a driver is polling,
- * and the slot is frozen, then the driver can deadlock in
- * an interrupt context, which is bad.
+/**
+ * __eeh_mark_slot - Mark all child devices as failed
+ * @parent: parent device
+ * @mode_flag: failure flag
+ *
+ * Mark all devices that are children of this device as failed.
+ * Mark the device driver too, so that it can see the failure
+ * immediately; this is critical, since some drivers poll
+ * status registers in interrupts ... If a driver is polling,
+ * and the slot is frozen, then the driver can deadlock in
+ * an interrupt context, which is bad.
*/
-
static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
{
struct device_node *dn;
for_each_child_of_node(parent, dn) {
- if (PCI_DN(dn)) {
+ if (of_node_to_eeh_dev(dn)) {
/* Mark the pci device driver too */
- struct pci_dev *dev = PCI_DN(dn)->pcidev;
+ struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
- PCI_DN(dn)->eeh_mode |= mode_flag;
+ of_node_to_eeh_dev(dn)->mode |= mode_flag;
if (dev && dev->driver)
dev->error_state = pci_channel_io_frozen;
@@ -404,92 +306,81 @@ static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
}
}
-void eeh_mark_slot (struct device_node *dn, int mode_flag)
+/**
+ * eeh_mark_slot - Mark the indicated device and its children as failed
+ * @dn: parent device
+ * @mode_flag: failure flag
+ *
+ * Mark the indicated device and its child devices as failed.
+ * The device drivers are marked as failed as well.
+ */
+void eeh_mark_slot(struct device_node *dn, int mode_flag)
{
struct pci_dev *dev;
- dn = find_device_pe (dn);
+ dn = eeh_find_device_pe(dn);
/* Back up one, since config addrs might be shared */
- if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
+ if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
dn = dn->parent;
- PCI_DN(dn)->eeh_mode |= mode_flag;
+ of_node_to_eeh_dev(dn)->mode |= mode_flag;
/* Mark the pci device too */
- dev = PCI_DN(dn)->pcidev;
+ dev = of_node_to_eeh_dev(dn)->pdev;
if (dev)
dev->error_state = pci_channel_io_frozen;
__eeh_mark_slot(dn, mode_flag);
}
+/**
+ * __eeh_clear_slot - Clear failure flag for the child devices
+ * @parent: parent device
+ * @mode_flag: flag to be cleared
+ *
+ * Clear failure flag for the child devices.
+ */
static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
{
struct device_node *dn;
for_each_child_of_node(parent, dn) {
- if (PCI_DN(dn)) {
- PCI_DN(dn)->eeh_mode &= ~mode_flag;
- PCI_DN(dn)->eeh_check_count = 0;
+ if (of_node_to_eeh_dev(dn)) {
+ of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
+ of_node_to_eeh_dev(dn)->check_count = 0;
__eeh_clear_slot(dn, mode_flag);
}
}
}
-void eeh_clear_slot (struct device_node *dn, int mode_flag)
+/**
+ * eeh_clear_slot - Clear failure flag for the indicated device and its children
+ * @dn: parent device
+ * @mode_flag: flag to be cleared
+ *
+ * Clear failure flag for the indicated device and its children.
+ */
+void eeh_clear_slot(struct device_node *dn, int mode_flag)
{
unsigned long flags;
raw_spin_lock_irqsave(&confirm_error_lock, flags);
- dn = find_device_pe (dn);
+ dn = eeh_find_device_pe(dn);
/* Back up one, since config addrs might be shared */
- if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
+ if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
dn = dn->parent;
- PCI_DN(dn)->eeh_mode &= ~mode_flag;
- PCI_DN(dn)->eeh_check_count = 0;
+ of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
+ of_node_to_eeh_dev(dn)->check_count = 0;
__eeh_clear_slot(dn, mode_flag);
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
}
-void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
-{
- struct device_node *dn;
-
- for_each_child_of_node(parent, dn) {
- if (PCI_DN(dn)) {
-
- struct pci_dev *dev = PCI_DN(dn)->pcidev;
-
- if (dev && dev->driver)
- *freset |= dev->needs_freset;
-
- __eeh_set_pe_freset(dn, freset);
- }
- }
-}
-
-void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
-{
- struct pci_dev *dev;
- dn = find_device_pe(dn);
-
- /* Back up one, since config addrs might be shared */
- if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
- dn = dn->parent;
-
- dev = PCI_DN(dn)->pcidev;
- if (dev)
- *freset |= dev->needs_freset;
-
- __eeh_set_pe_freset(dn, freset);
-}
-
/**
- * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
- * @dn device node
- * @dev pci device, if known
+ * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
+ * @dn: device node
+ * @dev: pci device, if known
*
* Check for an EEH failure for the given device node. Call this
* routine if the result of a read was all 0xff's and you want to
@@ -504,35 +395,34 @@ void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
{
int ret;
- int rets[3];
unsigned long flags;
- struct pci_dn *pdn;
+ struct eeh_dev *edev;
int rc = 0;
const char *location;
- total_mmio_ffs++;
+ eeh_stats.total_mmio_ffs++;
if (!eeh_subsystem_enabled)
return 0;
if (!dn) {
- no_dn++;
+ eeh_stats.no_dn++;
return 0;
}
- dn = find_device_pe(dn);
- pdn = PCI_DN(dn);
+ dn = eeh_find_device_pe(dn);
+ edev = of_node_to_eeh_dev(dn);
/* Access to IO BARs might get this far and still not want checking. */
- if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
- pdn->eeh_mode & EEH_MODE_NOCHECK) {
- ignored_check++;
+ if (!(edev->mode & EEH_MODE_SUPPORTED) ||
+ edev->mode & EEH_MODE_NOCHECK) {
+ eeh_stats.ignored_check++;
pr_debug("EEH: Ignored check (%x) for %s %s\n",
- pdn->eeh_mode, eeh_pci_name(dev), dn->full_name);
+ edev->mode, eeh_pci_name(dev), dn->full_name);
return 0;
}
- if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) {
- no_cfg_addr++;
+ if (!edev->config_addr && !edev->pe_config_addr) {
+ eeh_stats.no_cfg_addr++;
return 0;
}
@@ -544,16 +434,16 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
*/
raw_spin_lock_irqsave(&confirm_error_lock, flags);
rc = 1;
- if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
- pdn->eeh_check_count ++;
- if (pdn->eeh_check_count % EEH_MAX_FAILS == 0) {
+ if (edev->mode & EEH_MODE_ISOLATED) {
+ edev->check_count++;
+ if (edev->check_count % EEH_MAX_FAILS == 0) {
location = of_get_property(dn, "ibm,loc-code", NULL);
- printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
+ printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
- pdn->eeh_check_count, location,
- dev->driver->name, eeh_pci_name(dev));
- printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
- dev->driver->name);
+ edev->check_count, location,
+ eeh_driver_name(dev), eeh_pci_name(dev));
+ printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
+ eeh_driver_name(dev));
dump_stack();
}
goto dn_unlock;
@@ -566,58 +456,39 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* function zero of a multi-function device.
* In any case they must share a common PHB.
*/
- ret = read_slot_reset_state(pdn, rets);
-
- /* If the call to firmware failed, punt */
- if (ret != 0) {
- printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
- ret, dn->full_name);
- false_positives++;
- pdn->eeh_false_positives ++;
- rc = 0;
- goto dn_unlock;
- }
+ ret = eeh_ops->get_state(dn, NULL);
/* Note that config-io to empty slots may fail;
- * they are empty when they don't have children. */
- if ((rets[0] == 5) && (rets[2] == 0) && (dn->child == NULL)) {
- false_positives++;
- pdn->eeh_false_positives ++;
- rc = 0;
- goto dn_unlock;
- }
-
- /* If EEH is not supported on this device, punt. */
- if (rets[1] != 1) {
- printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
- ret, dn->full_name);
- false_positives++;
- pdn->eeh_false_positives ++;
- rc = 0;
- goto dn_unlock;
- }
-
- /* If not the kind of error we know about, punt. */
- if (rets[0] != 1 && rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
- false_positives++;
- pdn->eeh_false_positives ++;
+ * they are empty when they don't have children.
+ * We will punt with the following conditions: Failure to get
+ * PE's state, EEH not support and Permanently unavailable
+ * state, PE is in good state.
+ */
+ if ((ret < 0) ||
+ (ret == EEH_STATE_NOT_SUPPORT) ||
+ (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
+ (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
+ eeh_stats.false_positives++;
+ edev->false_positives ++;
rc = 0;
goto dn_unlock;
}
- slot_resets++;
+ eeh_stats.slot_resets++;
/* Avoid repeated reports of this failure, including problems
* with other functions on this device, and functions under
- * bridges. */
- eeh_mark_slot (dn, EEH_MODE_ISOLATED);
+ * bridges.
+ */
+ eeh_mark_slot(dn, EEH_MODE_ISOLATED);
raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
- eeh_send_failure_event (dn, dev);
+ eeh_send_failure_event(edev);
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
- * out what happened. So print that out. */
+ * out what happened. So print that out.
+ */
dump_stack();
return 1;
@@ -629,9 +500,9 @@ dn_unlock:
EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
/**
- * eeh_check_failure - check if all 1's data is due to EEH slot freeze
- * @token i/o token, should be address in the form 0xA....
- * @val value, should be all 1's (XXX why do we need this arg??)
+ * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
+ * @token: I/O token, should be address in the form 0xA....
+ * @val: value, should be all 1's (XXX why do we need this arg??)
*
* Check for an EEH failure at the given token address. Call this
* routine if the result of a read was all 0xff's and you want to
@@ -648,14 +519,14 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
/* Finding the phys addr + pci device; this is pretty quick. */
addr = eeh_token_to_phys((unsigned long __force) token);
- dev = pci_get_device_by_addr(addr);
+ dev = pci_addr_cache_get_device(addr);
if (!dev) {
- no_device++;
+ eeh_stats.no_device++;
return val;
}
dn = pci_device_to_OF_node(dev);
- eeh_dn_check_failure (dn, dev);
+ eeh_dn_check_failure(dn, dev);
pci_dev_put(dev);
return val;
@@ -663,115 +534,54 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
EXPORT_SYMBOL(eeh_check_failure);
-/* ------------------------------------------------------------- */
-/* The code below deals with error recovery */
/**
- * rtas_pci_enable - enable MMIO or DMA transfers for this slot
- * @pdn pci device node
+ * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
+ * @edev: pci device node
+ *
+ * This routine should be called to reenable frozen MMIO or DMA
+ * so that it would work correctly again. It's useful while doing
+ * recovery or log collection on the indicated device.
*/
-
-int
-rtas_pci_enable(struct pci_dn *pdn, int function)
+int eeh_pci_enable(struct eeh_dev *edev, int function)
{
- int config_addr;
int rc;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
- /* Use PE configuration address, if present */
- config_addr = pdn->eeh_config_addr;
- if (pdn->eeh_pe_config_addr)
- config_addr = pdn->eeh_pe_config_addr;
-
- rc = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
- config_addr,
- BUID_HI(pdn->phb->buid),
- BUID_LO(pdn->phb->buid),
- function);
-
+ rc = eeh_ops->set_option(dn, function);
if (rc)
printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n",
- function, rc, pdn->node->full_name);
+ function, rc, dn->full_name);
- rc = eeh_wait_for_slot_status (pdn, PCI_BUS_RESET_WAIT_MSEC);
- if ((rc == 4) && (function == EEH_THAW_MMIO))
+ rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
+ if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
+ (function == EEH_OPT_THAW_MMIO))
return 0;
return rc;
}
/**
- * rtas_pci_slot_reset - raises/lowers the pci #RST line
- * @pdn pci device node
- * @state: 1/0 to raise/lower the #RST
- *
- * Clear the EEH-frozen condition on a slot. This routine
- * asserts the PCI #RST line if the 'state' argument is '1',
- * and drops the #RST line if 'state is '0'. This routine is
- * safe to call in an interrupt context.
- *
- */
-
-static void
-rtas_pci_slot_reset(struct pci_dn *pdn, int state)
-{
- int config_addr;
- int rc;
-
- BUG_ON (pdn==NULL);
-
- if (!pdn->phb) {
- printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
- pdn->node->full_name);
- return;
- }
-
- /* Use PE configuration address, if present */
- config_addr = pdn->eeh_config_addr;
- if (pdn->eeh_pe_config_addr)
- config_addr = pdn->eeh_pe_config_addr;
-
- rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
- config_addr,
- BUID_HI(pdn->phb->buid),
- BUID_LO(pdn->phb->buid),
- state);
-
- /* Fundamental-reset not supported on this PE, try hot-reset */
- if (rc == -8 && state == 3) {
- rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
- config_addr,
- BUID_HI(pdn->phb->buid),
- BUID_LO(pdn->phb->buid), 1);
- if (rc)
- printk(KERN_WARNING
- "EEH: Unable to reset the failed slot,"
- " #RST=%d dn=%s\n",
- rc, pdn->node->full_name);
- }
-}
-
-/**
* pcibios_set_pcie_slot_reset - Set PCI-E reset state
- * @dev: pci device struct
- * @state: reset state to enter
+ * @dev: pci device struct
+ * @state: reset state to enter
*
* Return value:
* 0 if success
- **/
+ */
int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
struct device_node *dn = pci_device_to_OF_node(dev);
- struct pci_dn *pdn = PCI_DN(dn);
switch (state) {
case pcie_deassert_reset:
- rtas_pci_slot_reset(pdn, 0);
+ eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
break;
case pcie_hot_reset:
- rtas_pci_slot_reset(pdn, 1);
+ eeh_ops->reset(dn, EEH_RESET_HOT);
break;
case pcie_warm_reset:
- rtas_pci_slot_reset(pdn, 3);
+ eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
break;
default:
return -EINVAL;
@@ -781,13 +591,66 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
}
/**
- * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
- * @pdn: pci device node to be reset.
+ * __eeh_set_pe_freset - Check the required reset for child devices
+ * @parent: parent device
+ * @freset: return value
+ *
+ * Each device might have its preferred reset type: fundamental or
+ * hot reset. The routine is used to collect the information from
+ * the child devices so that they could be reset accordingly.
+ */
+void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
+{
+ struct device_node *dn;
+
+ for_each_child_of_node(parent, dn) {
+ if (of_node_to_eeh_dev(dn)) {
+ struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
+
+ if (dev && dev->driver)
+ *freset |= dev->needs_freset;
+
+ __eeh_set_pe_freset(dn, freset);
+ }
+ }
+}
+
+/**
+ * eeh_set_pe_freset - Check the required reset for the indicated device and its children
+ * @dn: parent device
+ * @freset: return value
+ *
+ * Each device might have its preferred reset type: fundamental or
+ * hot reset. The routine is used to collected the information for
+ * the indicated device and its children so that the bunch of the
+ * devices could be reset properly.
*/
+void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
+{
+ struct pci_dev *dev;
+ dn = eeh_find_device_pe(dn);
-static void __rtas_set_slot_reset(struct pci_dn *pdn)
+ /* Back up one, since config addrs might be shared */
+ if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
+ dn = dn->parent;
+
+ dev = of_node_to_eeh_dev(dn)->pdev;
+ if (dev)
+ *freset |= dev->needs_freset;
+
+ __eeh_set_pe_freset(dn, freset);
+}
+
+/**
+ * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
+ * @edev: pci device node to be reset.
+ *
+ * Assert the PCI #RST line for 1/4 second.
+ */
+static void eeh_reset_pe_once(struct eeh_dev *edev)
{
unsigned int freset = 0;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
/* Determine type of EEH reset required for
* Partitionable Endpoint, a hot-reset (1)
@@ -795,58 +658,68 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn)
* A fundamental reset required by any device under
* Partitionable Endpoint trumps hot-reset.
*/
- eeh_set_pe_freset(pdn->node, &freset);
+ eeh_set_pe_freset(dn, &freset);
if (freset)
- rtas_pci_slot_reset(pdn, 3);
+ eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
else
- rtas_pci_slot_reset(pdn, 1);
+ eeh_ops->reset(dn, EEH_RESET_HOT);
/* The PCI bus requires that the reset be held high for at least
- * a 100 milliseconds. We wait a bit longer 'just in case'. */
-
+ * a 100 milliseconds. We wait a bit longer 'just in case'.
+ */
#define PCI_BUS_RST_HOLD_TIME_MSEC 250
- msleep (PCI_BUS_RST_HOLD_TIME_MSEC);
+ msleep(PCI_BUS_RST_HOLD_TIME_MSEC);
/* We might get hit with another EEH freeze as soon as the
* pci slot reset line is dropped. Make sure we don't miss
- * these, and clear the flag now. */
- eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED);
+ * these, and clear the flag now.
+ */
+ eeh_clear_slot(dn, EEH_MODE_ISOLATED);
- rtas_pci_slot_reset (pdn, 0);
+ eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
/* After a PCI slot has been reset, the PCI Express spec requires
* a 1.5 second idle time for the bus to stabilize, before starting
- * up traffic. */
+ * up traffic.
+ */
#define PCI_BUS_SETTLE_TIME_MSEC 1800
- msleep (PCI_BUS_SETTLE_TIME_MSEC);
+ msleep(PCI_BUS_SETTLE_TIME_MSEC);
}
-int rtas_set_slot_reset(struct pci_dn *pdn)
+/**
+ * eeh_reset_pe - Reset the indicated PE
+ * @edev: PCI device associated EEH device
+ *
+ * This routine should be called to reset indicated device, including
+ * PE. A PE might include multiple PCI devices and sometimes PCI bridges
+ * might be involved as well.
+ */
+int eeh_reset_pe(struct eeh_dev *edev)
{
int i, rc;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
/* Take three shots at resetting the bus */
for (i=0; i<3; i++) {
- __rtas_set_slot_reset(pdn);
+ eeh_reset_pe_once(edev);
- rc = eeh_wait_for_slot_status(pdn, PCI_BUS_RESET_WAIT_MSEC);
- if (rc == 0)
+ rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
+ if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
return 0;
if (rc < 0) {
printk(KERN_ERR "EEH: unrecoverable slot failure %s\n",
- pdn->node->full_name);
+ dn->full_name);
return -1;
}
printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n",
- i+1, pdn->node->full_name, rc);
+ i+1, dn->full_name, rc);
}
return -1;
}
-/* ------------------------------------------------------- */
/** Save and restore of PCI BARs
*
* Although firmware will set up BARs during boot, it doesn't
@@ -856,181 +729,122 @@ int rtas_set_slot_reset(struct pci_dn *pdn)
*/
/**
- * __restore_bars - Restore the Base Address Registers
- * @pdn: pci device node
+ * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
+ * @edev: PCI device associated EEH device
*
* Loads the PCI configuration space base address registers,
* the expansion ROM base address, the latency timer, and etc.
* from the saved values in the device node.
*/
-static inline void __restore_bars (struct pci_dn *pdn)
+static inline void eeh_restore_one_device_bars(struct eeh_dev *edev)
{
int i;
u32 cmd;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
+
+ if (!edev->phb)
+ return;
- if (NULL==pdn->phb) return;
for (i=4; i<10; i++) {
- rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
+ eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
}
/* 12 == Expansion ROM Address */
- rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
+ eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
-#define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
+#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
- rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- rtas_write_config (pdn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
- rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
+ eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
/* Restore PERR & SERR bits, some devices require it,
- don't touch the other command bits */
- rtas_read_config(pdn, PCI_COMMAND, 4, &cmd);
- if (pdn->config_space[1] & PCI_COMMAND_PARITY)
+ * don't touch the other command bits
+ */
+ eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
+ if (edev->config_space[1] & PCI_COMMAND_PARITY)
cmd |= PCI_COMMAND_PARITY;
else
cmd &= ~PCI_COMMAND_PARITY;
- if (pdn->config_space[1] & PCI_COMMAND_SERR)
+ if (edev->config_space[1] & PCI_COMMAND_SERR)
cmd |= PCI_COMMAND_SERR;
else
cmd &= ~PCI_COMMAND_SERR;
- rtas_write_config(pdn, PCI_COMMAND, 4, cmd);
+ eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
}
/**
- * eeh_restore_bars - restore the PCI config space info
+ * eeh_restore_bars - Restore the PCI config space info
+ * @edev: EEH device
*
* This routine performs a recursive walk to the children
* of this device as well.
*/
-void eeh_restore_bars(struct pci_dn *pdn)
+void eeh_restore_bars(struct eeh_dev *edev)
{
struct device_node *dn;
- if (!pdn)
+ if (!edev)
return;
- if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code))
- __restore_bars (pdn);
+ if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code))
+ eeh_restore_one_device_bars(edev);
- for_each_child_of_node(pdn->node, dn)
- eeh_restore_bars (PCI_DN(dn));
+ for_each_child_of_node(eeh_dev_to_of_node(edev), dn)
+ eeh_restore_bars(of_node_to_eeh_dev(dn));
}
/**
- * eeh_save_bars - save device bars
+ * eeh_save_bars - Save device bars
+ * @edev: PCI device associated EEH device
*
* Save the values of the device bars. Unlike the restore
* routine, this routine is *not* recursive. This is because
* PCI devices are added individually; but, for the restore,
* an entire slot is reset at a time.
*/
-static void eeh_save_bars(struct pci_dn *pdn)
+static void eeh_save_bars(struct eeh_dev *edev)
{
int i;
+ struct device_node *dn;
- if (!pdn )
+ if (!edev)
return;
+ dn = eeh_dev_to_of_node(edev);
for (i = 0; i < 16; i++)
- rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]);
-}
-
-void
-rtas_configure_bridge(struct pci_dn *pdn)
-{
- int config_addr;
- int rc;
- int token;
-
- /* Use PE configuration address, if present */
- config_addr = pdn->eeh_config_addr;
- if (pdn->eeh_pe_config_addr)
- config_addr = pdn->eeh_pe_config_addr;
-
- /* Use new configure-pe function, if supported */
- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
- token = ibm_configure_pe;
- else
- token = ibm_configure_bridge;
-
- rc = rtas_call(token, 3, 1, NULL,
- config_addr,
- BUID_HI(pdn->phb->buid),
- BUID_LO(pdn->phb->buid));
- if (rc) {
- printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
- rc, pdn->node->full_name);
- }
+ eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
}
-/* ------------------------------------------------------------- */
-/* The code below deals with enabling EEH for devices during the
- * early boot sequence. EEH must be enabled before any PCI probing
- * can be done.
+/**
+ * eeh_early_enable - Early enable EEH on the indicated device
+ * @dn: device node
+ * @data: BUID
+ *
+ * Enable EEH functionality on the specified PCI device. The function
+ * is expected to be called before real PCI probing is done. However,
+ * the PHBs have been initialized at this point.
*/
-
-#define EEH_ENABLE 1
-
-struct eeh_early_enable_info {
- unsigned int buid_hi;
- unsigned int buid_lo;
-};
-
-static int get_pe_addr (int config_addr,
- struct eeh_early_enable_info *info)
-{
- unsigned int rets[3];
- int ret;
-
- /* Use latest config-addr token on power6 */
- if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
- /* Make sure we have a PE in hand */
- ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets,
- config_addr, info->buid_hi, info->buid_lo, 1);
- if (ret || (rets[0]==0))
- return 0;
-
- ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets,
- config_addr, info->buid_hi, info->buid_lo, 0);
- if (ret)
- return 0;
- return rets[0];
- }
-
- /* Use older config-addr token on power5 */
- if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
- ret = rtas_call (ibm_get_config_addr_info, 4, 2, rets,
- config_addr, info->buid_hi, info->buid_lo, 0);
- if (ret)
- return 0;
- return rets[0];
- }
- return 0;
-}
-
-/* Enable eeh for the given device node. */
-static void *early_enable_eeh(struct device_node *dn, void *data)
+static void *eeh_early_enable(struct device_node *dn, void *data)
{
- unsigned int rets[3];
- struct eeh_early_enable_info *info = data;
int ret;
const u32 *class_code = of_get_property(dn, "class-code", NULL);
const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
const u32 *device_id = of_get_property(dn, "device-id", NULL);
const u32 *regs;
int enable;
- struct pci_dn *pdn = PCI_DN(dn);
+ struct eeh_dev *edev = of_node_to_eeh_dev(dn);
- pdn->class_code = 0;
- pdn->eeh_mode = 0;
- pdn->eeh_check_count = 0;
- pdn->eeh_freeze_count = 0;
- pdn->eeh_false_positives = 0;
+ edev->class_code = 0;
+ edev->mode = 0;
+ edev->check_count = 0;
+ edev->freeze_count = 0;
+ edev->false_positives = 0;
if (!of_device_is_available(dn))
return NULL;
@@ -1041,54 +855,56 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
/* There is nothing to check on PCI to ISA bridges */
if (dn->type && !strcmp(dn->type, "isa")) {
- pdn->eeh_mode |= EEH_MODE_NOCHECK;
+ edev->mode |= EEH_MODE_NOCHECK;
return NULL;
}
- pdn->class_code = *class_code;
+ edev->class_code = *class_code;
/* Ok... see if this device supports EEH. Some do, some don't,
- * and the only way to find out is to check each and every one. */
+ * and the only way to find out is to check each and every one.
+ */
regs = of_get_property(dn, "reg", NULL);
if (regs) {
/* First register entry is addr (00BBSS00) */
/* Try to enable eeh */
- ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
- regs[0], info->buid_hi, info->buid_lo,
- EEH_ENABLE);
+ ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
enable = 0;
if (ret == 0) {
- pdn->eeh_config_addr = regs[0];
+ edev->config_addr = regs[0];
/* If the newer, better, ibm,get-config-addr-info is supported,
- * then use that instead. */
- pdn->eeh_pe_config_addr = get_pe_addr(pdn->eeh_config_addr, info);
+ * then use that instead.
+ */
+ edev->pe_config_addr = eeh_ops->get_pe_addr(dn);
/* Some older systems (Power4) allow the
* ibm,set-eeh-option call to succeed even on nodes
* where EEH is not supported. Verify support
- * explicitly. */
- ret = read_slot_reset_state(pdn, rets);
- if ((ret == 0) && (rets[1] == 1))
+ * explicitly.
+ */
+ ret = eeh_ops->get_state(dn, NULL);
+ if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
enable = 1;
}
if (enable) {
eeh_subsystem_enabled = 1;
- pdn->eeh_mode |= EEH_MODE_SUPPORTED;
+ edev->mode |= EEH_MODE_SUPPORTED;
pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
- dn->full_name, pdn->eeh_config_addr,
- pdn->eeh_pe_config_addr);
+ dn->full_name, edev->config_addr,
+ edev->pe_config_addr);
} else {
/* This device doesn't support EEH, but it may have an
- * EEH parent, in which case we mark it as supported. */
- if (dn->parent && PCI_DN(dn->parent)
- && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
+ * EEH parent, in which case we mark it as supported.
+ */
+ if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
+ (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
/* Parent supports EEH. */
- pdn->eeh_mode |= EEH_MODE_SUPPORTED;
- pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr;
+ edev->mode |= EEH_MODE_SUPPORTED;
+ edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
return NULL;
}
}
@@ -1097,11 +913,63 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
dn->full_name);
}
- eeh_save_bars(pdn);
+ eeh_save_bars(edev);
return NULL;
}
-/*
+/**
+ * eeh_ops_register - Register platform dependent EEH operations
+ * @ops: platform dependent EEH operations
+ *
+ * Register the platform dependent EEH operation callback
+ * functions. The platform should call this function before
+ * any other EEH operations.
+ */
+int __init eeh_ops_register(struct eeh_ops *ops)
+{
+ if (!ops->name) {
+ pr_warning("%s: Invalid EEH ops name for %p\n",
+ __func__, ops);
+ return -EINVAL;
+ }
+
+ if (eeh_ops && eeh_ops != ops) {
+ pr_warning("%s: EEH ops of platform %s already existing (%s)\n",
+ __func__, eeh_ops->name, ops->name);
+ return -EEXIST;
+ }
+
+ eeh_ops = ops;
+
+ return 0;
+}
+
+/**
+ * eeh_ops_unregister - Unreigster platform dependent EEH operations
+ * @name: name of EEH platform operations
+ *
+ * Unregister the platform dependent EEH operation callback
+ * functions.
+ */
+int __exit eeh_ops_unregister(const char *name)
+{
+ if (!name || !strlen(name)) {
+ pr_warning("%s: Invalid EEH ops name\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (eeh_ops && !strcmp(eeh_ops->name, name)) {
+ eeh_ops = NULL;
+ return 0;
+ }
+
+ return -EEXIST;
+}
+
+/**
+ * eeh_init - EEH initialization
+ *
* Initialize EEH by trying to enable it for all of the adapters in the system.
* As a side effect we can determine here if eeh is supported at all.
* Note that we leave EEH on so failed config cycles won't cause a machine
@@ -1116,51 +984,27 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
*/
void __init eeh_init(void)
{
- struct device_node *phb, *np;
- struct eeh_early_enable_info info;
-
- raw_spin_lock_init(&confirm_error_lock);
- spin_lock_init(&slot_errbuf_lock);
+ struct pci_controller *hose, *tmp;
+ struct device_node *phb;
+ int ret;
- np = of_find_node_by_path("/rtas");
- if (np == NULL)
+ /* call platform initialization function */
+ if (!eeh_ops) {
+ pr_warning("%s: Platform EEH operation not found\n",
+ __func__);
return;
-
- ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
- ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
- ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
- ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
- ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
- ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
- ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
- ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
- ibm_configure_pe = rtas_token("ibm,configure-pe");
-
- if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
+ } else if ((ret = eeh_ops->init())) {
+ pr_warning("%s: Failed to call platform init function (%d)\n",
+ __func__, ret);
return;
-
- eeh_error_buf_size = rtas_token("rtas-error-log-max");
- if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
- eeh_error_buf_size = 1024;
- }
- if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
- printk(KERN_WARNING "EEH: rtas-error-log-max is bigger than allocated "
- "buffer ! (%d vs %d)", eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
- eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
}
- /* Enable EEH for all adapters. Note that eeh requires buid's */
- for (phb = of_find_node_by_name(NULL, "pci"); phb;
- phb = of_find_node_by_name(phb, "pci")) {
- unsigned long buid;
-
- buid = get_phb_buid(phb);
- if (buid == 0 || PCI_DN(phb) == NULL)
- continue;
+ raw_spin_lock_init(&confirm_error_lock);
- info.buid_lo = BUID_LO(buid);
- info.buid_hi = BUID_HI(buid);
- traverse_pci_devices(phb, early_enable_eeh, &info);
+ /* Enable EEH for all adapters */
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ phb = hose->dn;
+ traverse_pci_devices(phb, eeh_early_enable, NULL);
}
if (eeh_subsystem_enabled)
@@ -1170,7 +1014,7 @@ void __init eeh_init(void)
}
/**
- * eeh_add_device_early - enable EEH for the indicated device_node
+ * eeh_add_device_early - Enable EEH for the indicated device_node
* @dn: device node for which to set up EEH
*
* This routine must be used to perform EEH initialization for PCI
@@ -1184,21 +1028,26 @@ void __init eeh_init(void)
static void eeh_add_device_early(struct device_node *dn)
{
struct pci_controller *phb;
- struct eeh_early_enable_info info;
- if (!dn || !PCI_DN(dn))
+ if (!dn || !of_node_to_eeh_dev(dn))
return;
- phb = PCI_DN(dn)->phb;
+ phb = of_node_to_eeh_dev(dn)->phb;
/* USB Bus children of PCI devices will not have BUID's */
if (NULL == phb || 0 == phb->buid)
return;
- info.buid_hi = BUID_HI(phb->buid);
- info.buid_lo = BUID_LO(phb->buid);
- early_enable_eeh(dn, &info);
+ eeh_early_enable(dn, NULL);
}
+/**
+ * eeh_add_device_tree_early - Enable EEH for the indicated device
+ * @dn: device node
+ *
+ * This routine must be used to perform EEH initialization for the
+ * indicated PCI device that was added after system boot (e.g.
+ * hotplug, dlpar).
+ */
void eeh_add_device_tree_early(struct device_node *dn)
{
struct device_node *sib;
@@ -1210,7 +1059,7 @@ void eeh_add_device_tree_early(struct device_node *dn)
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
/**
- * eeh_add_device_late - perform EEH initialization for the indicated pci device
+ * eeh_add_device_late - Perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
*
* This routine must be used to complete EEH initialization for PCI
@@ -1219,7 +1068,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
static void eeh_add_device_late(struct pci_dev *dev)
{
struct device_node *dn;
- struct pci_dn *pdn;
+ struct eeh_dev *edev;
if (!dev || !eeh_subsystem_enabled)
return;
@@ -1227,20 +1076,29 @@ static void eeh_add_device_late(struct pci_dev *dev)
pr_debug("EEH: Adding device %s\n", pci_name(dev));
dn = pci_device_to_OF_node(dev);
- pdn = PCI_DN(dn);
- if (pdn->pcidev == dev) {
+ edev = pci_dev_to_eeh_dev(dev);
+ if (edev->pdev == dev) {
pr_debug("EEH: Already referenced !\n");
return;
}
- WARN_ON(pdn->pcidev);
+ WARN_ON(edev->pdev);
- pci_dev_get (dev);
- pdn->pcidev = dev;
+ pci_dev_get(dev);
+ edev->pdev = dev;
+ dev->dev.archdata.edev = edev;
pci_addr_cache_insert_device(dev);
eeh_sysfs_add_device(dev);
}
+/**
+ * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
+ * @bus: PCI bus
+ *
+ * This routine must be used to perform EEH initialization for PCI
+ * devices which are attached to the indicated PCI bus. The PCI bus
+ * is added after system boot through hotplug or dlpar.
+ */
void eeh_add_device_tree_late(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1257,7 +1115,7 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
/**
- * eeh_remove_device - undo EEH setup for the indicated pci device
+ * eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
*
* This routine should be called when a device is removed from
@@ -1268,25 +1126,35 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
*/
static void eeh_remove_device(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct eeh_dev *edev;
+
if (!dev || !eeh_subsystem_enabled)
return;
+ edev = pci_dev_to_eeh_dev(dev);
/* Unregister the device with the EEH/PCI address search system */
pr_debug("EEH: Removing device %s\n", pci_name(dev));
- dn = pci_device_to_OF_node(dev);
- if (PCI_DN(dn)->pcidev == NULL) {
+ if (!edev || !edev->pdev) {
pr_debug("EEH: Not referenced !\n");
return;
}
- PCI_DN(dn)->pcidev = NULL;
- pci_dev_put (dev);
+ edev->pdev = NULL;
+ dev->dev.archdata.edev = NULL;
+ pci_dev_put(dev);
pci_addr_cache_remove_device(dev);
eeh_sysfs_remove_device(dev);
}
+/**
+ * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
+ * @dev: PCI device
+ *
+ * This routine must be called when a device is removed from the
+ * running system through hotplug or dlpar. The corresponding
+ * PCI address cache will be removed.
+ */
void eeh_remove_bus_device(struct pci_dev *dev)
{
struct pci_bus *bus = dev->subordinate;
@@ -1305,21 +1173,24 @@ static int proc_eeh_show(struct seq_file *m, void *v)
{
if (0 == eeh_subsystem_enabled) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
- seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs);
+ seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {
seq_printf(m, "EEH Subsystem is enabled\n");
seq_printf(m,
- "no device=%ld\n"
- "no device node=%ld\n"
- "no config address=%ld\n"
- "check not wanted=%ld\n"
- "eeh_total_mmio_ffs=%ld\n"
- "eeh_false_positives=%ld\n"
- "eeh_slot_resets=%ld\n",
- no_device, no_dn, no_cfg_addr,
- ignored_check, total_mmio_ffs,
- false_positives,
- slot_resets);
+ "no device=%llu\n"
+ "no device node=%llu\n"
+ "no config address=%llu\n"
+ "check not wanted=%llu\n"
+ "eeh_total_mmio_ffs=%llu\n"
+ "eeh_false_positives=%llu\n"
+ "eeh_slot_resets=%llu\n",
+ eeh_stats.no_device,
+ eeh_stats.no_dn,
+ eeh_stats.no_cfg_addr,
+ eeh_stats.ignored_check,
+ eeh_stats.total_mmio_ffs,
+ eeh_stats.false_positives,
+ eeh_stats.slot_resets);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index fc5ae767989e..e5ae1c687c66 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -1,5 +1,4 @@
/*
- * eeh_cache.c
* PCI address cache; allows the lookup of PCI devices based on I/O address
*
* Copyright IBM Corporation 2004
@@ -47,8 +46,7 @@
* than any hash algo I could think of for this problem, even
* with the penalty of slow pointer chases for d-cache misses).
*/
-struct pci_io_addr_range
-{
+struct pci_io_addr_range {
struct rb_node rb_node;
unsigned long addr_lo;
unsigned long addr_hi;
@@ -56,13 +54,12 @@ struct pci_io_addr_range
unsigned int flags;
};
-static struct pci_io_addr_cache
-{
+static struct pci_io_addr_cache {
struct rb_root rb_root;
spinlock_t piar_lock;
} pci_io_addr_cache_root;
-static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
+static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
{
struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
@@ -86,7 +83,7 @@ static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
}
/**
- * pci_get_device_by_addr - Get device, given only address
+ * pci_addr_cache_get_device - Get device, given only address
* @addr: mmio (PIO) phys address or i/o port number
*
* Given an mmio phys address, or a port number, find a pci device
@@ -95,13 +92,13 @@ static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
* from zero (that is, they do *not* have pci_io_addr added in).
* It is safe to call this function within an interrupt.
*/
-struct pci_dev *pci_get_device_by_addr(unsigned long addr)
+struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
{
struct pci_dev *dev;
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- dev = __pci_get_device_by_addr(addr);
+ dev = __pci_addr_cache_get_device(addr);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
return dev;
}
@@ -166,7 +163,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
#ifdef DEBUG
printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
- alo, ahi, pci_name (dev));
+ alo, ahi, pci_name(dev));
#endif
rb_link_node(&piar->rb_node, parent, p);
@@ -178,7 +175,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
static void __pci_addr_cache_insert_device(struct pci_dev *dev)
{
struct device_node *dn;
- struct pci_dn *pdn;
+ struct eeh_dev *edev;
int i;
dn = pci_device_to_OF_node(dev);
@@ -187,13 +184,19 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
return;
}
+ edev = of_node_to_eeh_dev(dn);
+ if (!edev) {
+ pr_warning("PCI: no EEH dev found for dn=%s\n",
+ dn->full_name);
+ return;
+ }
+
/* Skip any devices for which EEH is not enabled. */
- pdn = PCI_DN(dn);
- if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
- pdn->eeh_mode & EEH_MODE_NOCHECK) {
+ if (!(edev->mode & EEH_MODE_SUPPORTED) ||
+ edev->mode & EEH_MODE_NOCHECK) {
#ifdef DEBUG
- printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
- pci_name(dev), pdn->node->full_name);
+ pr_info("PCI: skip building address cache for=%s - %s\n",
+ pci_name(dev), dn->full_name);
#endif
return;
}
@@ -284,6 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
void __init pci_addr_cache_build(void)
{
struct device_node *dn;
+ struct eeh_dev *edev;
struct pci_dev *dev = NULL;
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
@@ -294,8 +298,14 @@ void __init pci_addr_cache_build(void)
dn = pci_device_to_OF_node(dev);
if (!dn)
continue;
+
+ edev = of_node_to_eeh_dev(dn);
+ if (!edev)
+ continue;
+
pci_dev_get(dev); /* matching put is in eeh_remove_device() */
- PCI_DN(dn)->pcidev = dev;
+ dev->dev.archdata.edev = edev;
+ edev->pdev = dev;
eeh_sysfs_add_device(dev);
}
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c
new file mode 100644
index 000000000000..c4507d095900
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_dev.c
@@ -0,0 +1,102 @@
+/*
+ * The file intends to implement dynamic creation of EEH device, which will
+ * be bound with OF node and PCI device simutaneously. The EEH devices would
+ * be foundamental information for EEH core components to work proerly. Besides,
+ * We have to support multiple situations where dynamic creation of EEH device
+ * is required:
+ *
+ * 1) Before PCI emunation starts, we need create EEH devices according to the
+ * PCI sensitive OF nodes.
+ * 2) When PCI emunation is done, we need do the binding between PCI device and
+ * the associated EEH device.
+ * 3) DR (Dynamic Reconfiguration) would create PCI sensitive OF node. EEH device
+ * will be created while PCI sensitive OF node is detected from DR.
+ * 4) PCI hotplug needs redoing the binding between PCI device and EEH device. If
+ * PHB is newly inserted, we also need create EEH devices accordingly.
+ *
+ * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/export.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+/**
+ * eeh_dev_init - Create EEH device according to OF node
+ * @dn: device node
+ * @data: PHB
+ *
+ * It will create EEH device according to the given OF node. The function
+ * might be called by PCI emunation, DR, PHB hotplug.
+ */
+void * __devinit eeh_dev_init(struct device_node *dn, void *data)
+{
+ struct pci_controller *phb = data;
+ struct eeh_dev *edev;
+
+ /* Allocate EEH device */
+ edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL);
+ if (!edev) {
+ pr_warning("%s: out of memory\n", __func__);
+ return NULL;
+ }
+
+ /* Associate EEH device with OF node */
+ PCI_DN(dn)->edev = edev;
+ edev->dn = dn;
+ edev->phb = phb;
+
+ return NULL;
+}
+
+/**
+ * eeh_dev_phb_init_dynamic - Create EEH devices for devices included in PHB
+ * @phb: PHB
+ *
+ * Scan the PHB OF node and its child association, then create the
+ * EEH devices accordingly
+ */
+void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
+{
+ struct device_node *dn = phb->dn;
+
+ /* EEH device for PHB */
+ eeh_dev_init(dn, phb);
+
+ /* EEH devices for children OF nodes */
+ traverse_pci_devices(dn, eeh_dev_init, phb);
+}
+
+/**
+ * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
+ *
+ * Scan all the existing PHBs and create EEH devices for their OF
+ * nodes and their children OF nodes
+ */
+void __init eeh_dev_phb_init(void)
+{
+ struct pci_controller *phb, *tmp;
+
+ list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
+ eeh_dev_phb_init_dynamic(phb);
+}
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 1b6cb10589e0..baf92cd9dfab 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -33,8 +33,14 @@
#include <asm/prom.h>
#include <asm/rtas.h>
-
-static inline const char * pcid_name (struct pci_dev *pdev)
+/**
+ * eeh_pcid_name - Retrieve name of PCI device driver
+ * @pdev: PCI device
+ *
+ * This routine is used to retrieve the name of PCI device driver
+ * if that's valid.
+ */
+static inline const char *eeh_pcid_name(struct pci_dev *pdev)
{
if (pdev && pdev->dev.driver)
return pdev->dev.driver->name;
@@ -64,48 +70,59 @@ static void print_device_node_tree(struct pci_dn *pdn, int dent)
#endif
/**
- * eeh_disable_irq - disable interrupt for the recovering device
+ * eeh_disable_irq - Disable interrupt for the recovering device
+ * @dev: PCI device
+ *
+ * This routine must be called when reporting temporary or permanent
+ * error to the particular PCI device to disable interrupt of that
+ * device. If the device has enabled MSI or MSI-X interrupt, we needn't
+ * do real work because EEH should freeze DMA transfers for those PCI
+ * devices encountering EEH errors, which includes MSI or MSI-X.
*/
static void eeh_disable_irq(struct pci_dev *dev)
{
- struct device_node *dn = pci_device_to_OF_node(dev);
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
/* Don't disable MSI and MSI-X interrupts. They are
* effectively disabled by the DMA Stopped state
* when an EEH error occurs.
- */
+ */
if (dev->msi_enabled || dev->msix_enabled)
return;
if (!irq_has_action(dev->irq))
return;
- PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
+ edev->mode |= EEH_MODE_IRQ_DISABLED;
disable_irq_nosync(dev->irq);
}
/**
- * eeh_enable_irq - enable interrupt for the recovering device
+ * eeh_enable_irq - Enable interrupt for the recovering device
+ * @dev: PCI device
+ *
+ * This routine must be called to enable interrupt while failed
+ * device could be resumed.
*/
static void eeh_enable_irq(struct pci_dev *dev)
{
- struct device_node *dn = pci_device_to_OF_node(dev);
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
- if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
- PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
+ if ((edev->mode) & EEH_MODE_IRQ_DISABLED) {
+ edev->mode &= ~EEH_MODE_IRQ_DISABLED;
enable_irq(dev->irq);
}
}
-/* ------------------------------------------------------- */
/**
- * eeh_report_error - report pci error to each device driver
+ * eeh_report_error - Report pci error to each device driver
+ * @dev: PCI device
+ * @userdata: return value
*
* Report an EEH error to each device driver, collect up and
* merge the device driver responses. Cumulative response
* passed back in "userdata".
*/
-
static int eeh_report_error(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
@@ -122,7 +139,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
!driver->err_handler->error_detected)
return 0;
- rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
+ rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
@@ -132,13 +149,14 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
}
/**
- * eeh_report_mmio_enabled - tell drivers that MMIO has been enabled
+ * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
+ * @dev: PCI device
+ * @userdata: return value
*
* Tells each device driver that IO ports, MMIO and config space I/O
* are now enabled. Collects up and merges the device driver responses.
* Cumulative response passed back in "userdata".
*/
-
static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
@@ -149,7 +167,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
!driver->err_handler->mmio_enabled)
return 0;
- rc = driver->err_handler->mmio_enabled (dev);
+ rc = driver->err_handler->mmio_enabled(dev);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
@@ -159,9 +177,15 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
}
/**
- * eeh_report_reset - tell device that slot has been reset
+ * eeh_report_reset - Tell device that slot has been reset
+ * @dev: PCI device
+ * @userdata: return value
+ *
+ * This routine must be called while EEH tries to reset particular
+ * PCI device so that the associated PCI device driver could take
+ * some actions, usually to save data the driver needs so that the
+ * driver can work again while the device is recovered.
*/
-
static int eeh_report_reset(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
@@ -188,9 +212,14 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
}
/**
- * eeh_report_resume - tell device to resume normal operations
+ * eeh_report_resume - Tell device to resume normal operations
+ * @dev: PCI device
+ * @userdata: return value
+ *
+ * This routine must be called to notify the device driver that it
+ * could resume so that the device driver can do some initialization
+ * to make the recovered device work again.
*/
-
static int eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
@@ -212,12 +241,13 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
}
/**
- * eeh_report_failure - tell device driver that device is dead.
+ * eeh_report_failure - Tell device driver that device is dead.
+ * @dev: PCI device
+ * @userdata: return value
*
* This informs the device driver that the device is permanently
* dead, and that no further recovery attempts will be made on it.
*/
-
static int eeh_report_failure(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
@@ -238,65 +268,46 @@ static int eeh_report_failure(struct pci_dev *dev, void *userdata)
return 0;
}
-/* ------------------------------------------------------- */
/**
- * handle_eeh_events -- reset a PCI device after hard lockup.
- *
- * pSeries systems will isolate a PCI slot if the PCI-Host
- * bridge detects address or data parity errors, DMA's
- * occurring to wild addresses (which usually happen due to
- * bugs in device drivers or in PCI adapter firmware).
- * Slot isolations also occur if #SERR, #PERR or other misc
- * PCI-related errors are detected.
+ * eeh_reset_device - Perform actual reset of a pci slot
+ * @edev: PE associated EEH device
+ * @bus: PCI bus corresponding to the isolcated slot
*
- * Recovery process consists of unplugging the device driver
- * (which generated hotplug events to userspace), then issuing
- * a PCI #RST to the device, then reconfiguring the PCI config
- * space for all bridges & devices under this slot, and then
- * finally restarting the device drivers (which cause a second
- * set of hotplug events to go out to userspace).
+ * This routine must be called to do reset on the indicated PE.
+ * During the reset, udev might be invoked because those affected
+ * PCI devices will be removed and then added.
*/
-
-/**
- * eeh_reset_device() -- perform actual reset of a pci slot
- * @bus: pointer to the pci bus structure corresponding
- * to the isolated slot. A non-null value will
- * cause all devices under the bus to be removed
- * and then re-added.
- * @pe_dn: pointer to a "Partionable Endpoint" device node.
- * This is the top-level structure on which pci
- * bus resets can be performed.
- */
-
-static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
+static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
{
struct device_node *dn;
int cnt, rc;
/* pcibios will clear the counter; save the value */
- cnt = pe_dn->eeh_freeze_count;
+ cnt = edev->freeze_count;
if (bus)
pcibios_remove_pci_devices(bus);
/* Reset the pci controller. (Asserts RST#; resets config space).
* Reconfigure bridges and devices. Don't try to bring the system
- * up if the reset failed for some reason. */
- rc = rtas_set_slot_reset(pe_dn);
+ * up if the reset failed for some reason.
+ */
+ rc = eeh_reset_pe(edev);
if (rc)
return rc;
- /* Walk over all functions on this device. */
- dn = pe_dn->node;
- if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
+ /* Walk over all functions on this device. */
+ dn = eeh_dev_to_of_node(edev);
+ if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
dn = dn->parent->child;
while (dn) {
- struct pci_dn *ppe = PCI_DN(dn);
+ struct eeh_dev *pedev = of_node_to_eeh_dev(dn);
+
/* On Power4, always true because eeh_pe_config_addr=0 */
- if (pe_dn->eeh_pe_config_addr == ppe->eeh_pe_config_addr) {
- rtas_configure_bridge(ppe);
- eeh_restore_bars(ppe);
+ if (edev->pe_config_addr == pedev->pe_config_addr) {
+ eeh_ops->configure_bridge(dn);
+ eeh_restore_bars(pedev);
}
dn = dn->sibling;
}
@@ -308,10 +319,10 @@ static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
* potentially weird things happen.
*/
if (bus) {
- ssleep (5);
+ ssleep(5);
pcibios_add_pci_devices(bus);
}
- pe_dn->eeh_freeze_count = cnt;
+ edev->freeze_count = cnt;
return 0;
}
@@ -321,23 +332,39 @@ static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
*/
#define MAX_WAIT_FOR_RECOVERY 150
-struct pci_dn * handle_eeh_events (struct eeh_event *event)
+/**
+ * eeh_handle_event - Reset a PCI device after hard lockup.
+ * @event: EEH event
+ *
+ * While PHB detects address or data parity errors on particular PCI
+ * slot, the associated PE will be frozen. Besides, DMA's occurring
+ * to wild addresses (which usually happen due to bugs in device
+ * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
+ * #PERR or other misc PCI-related errors also can trigger EEH errors.
+ *
+ * Recovery process consists of unplugging the device driver (which
+ * generated hotplug events to userspace), then issuing a PCI #RST to
+ * the device, then reconfiguring the PCI config space for all bridges
+ * & devices under this slot, and then finally restarting the device
+ * drivers (which cause a second set of hotplug events to go out to
+ * userspace).
+ */
+struct eeh_dev *handle_eeh_events(struct eeh_event *event)
{
struct device_node *frozen_dn;
- struct pci_dn *frozen_pdn;
+ struct eeh_dev *frozen_edev;
struct pci_bus *frozen_bus;
int rc = 0;
enum pci_ers_result result = PCI_ERS_RESULT_NONE;
const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
- frozen_dn = find_device_pe(event->dn);
+ frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev));
if (!frozen_dn) {
-
- location = of_get_property(event->dn, "ibm,loc-code", NULL);
+ location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL);
location = location ? location : "unknown";
printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
"for location=%s pci addr=%s\n",
- location, eeh_pci_name(event->dev));
+ location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev)));
return NULL;
}
@@ -350,9 +377,10 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
* which was always an EADS pci bridge. In the new style,
* there might not be any EADS bridges, and even when there are,
* the firmware marks them as "EEH incapable". So another
- * two-step is needed to find the pci bus.. */
+ * two-step is needed to find the pci bus..
+ */
if (!frozen_bus)
- frozen_bus = pcibios_find_pci_bus (frozen_dn->parent);
+ frozen_bus = pcibios_find_pci_bus(frozen_dn->parent);
if (!frozen_bus) {
printk(KERN_ERR "EEH: Cannot find PCI bus "
@@ -361,22 +389,21 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
return NULL;
}
- frozen_pdn = PCI_DN(frozen_dn);
- frozen_pdn->eeh_freeze_count++;
+ frozen_edev = of_node_to_eeh_dev(frozen_dn);
+ frozen_edev->freeze_count++;
+ pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev));
+ drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev));
- pci_str = eeh_pci_name(event->dev);
- drv_str = pcid_name(event->dev);
-
- if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
+ if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES)
goto excess_failures;
printk(KERN_WARNING
"EEH: This PCI device has failed %d times in the last hour:\n",
- frozen_pdn->eeh_freeze_count);
+ frozen_edev->freeze_count);
- if (frozen_pdn->pcidev) {
- bus_pci_str = pci_name(frozen_pdn->pcidev);
- bus_drv_str = pcid_name(frozen_pdn->pcidev);
+ if (frozen_edev->pdev) {
+ bus_pci_str = pci_name(frozen_edev->pdev);
+ bus_drv_str = eeh_pcid_name(frozen_edev->pdev);
printk(KERN_WARNING
"EEH: Bus location=%s driver=%s pci addr=%s\n",
location, bus_drv_str, bus_pci_str);
@@ -395,9 +422,10 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
pci_walk_bus(frozen_bus, eeh_report_error, &result);
/* Get the current PCI slot state. This can take a long time,
- * sometimes over 3 seconds for certain systems. */
- rc = eeh_wait_for_slot_status (frozen_pdn, MAX_WAIT_FOR_RECOVERY*1000);
- if (rc < 0) {
+ * sometimes over 3 seconds for certain systems.
+ */
+ rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000);
+ if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
printk(KERN_WARNING "EEH: Permanent failure\n");
goto hard_fail;
}
@@ -406,14 +434,14 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
* don't post the error log until after all dev drivers
* have been informed.
*/
- eeh_slot_error_detail(frozen_pdn, EEH_LOG_TEMP_FAILURE);
+ eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
* go down willingly, without panicing the system.
*/
if (result == PCI_ERS_RESULT_NONE) {
- rc = eeh_reset_device(frozen_pdn, frozen_bus);
+ rc = eeh_reset_device(frozen_edev, frozen_bus);
if (rc) {
printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
goto hard_fail;
@@ -422,7 +450,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
/* If all devices reported they can proceed, then re-enable MMIO */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
- rc = rtas_pci_enable(frozen_pdn, EEH_THAW_MMIO);
+ rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO);
if (rc < 0)
goto hard_fail;
@@ -436,7 +464,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
/* If all devices reported they can proceed, then re-enable DMA */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
- rc = rtas_pci_enable(frozen_pdn, EEH_THAW_DMA);
+ rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA);
if (rc < 0)
goto hard_fail;
@@ -454,7 +482,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
/* If any device called out for a reset, then reset the slot */
if (result == PCI_ERS_RESULT_NEED_RESET) {
- rc = eeh_reset_device(frozen_pdn, NULL);
+ rc = eeh_reset_device(frozen_edev, NULL);
if (rc) {
printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
goto hard_fail;
@@ -473,7 +501,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
/* Tell all device drivers that they can resume operations */
pci_walk_bus(frozen_bus, eeh_report_resume, NULL);
- return frozen_pdn;
+ return frozen_edev;
excess_failures:
/*
@@ -486,7 +514,7 @@ excess_failures:
"has failed %d times in the last hour "
"and has been permanently disabled.\n"
"Please try reseating this device or replacing it.\n",
- location, drv_str, pci_str, frozen_pdn->eeh_freeze_count);
+ location, drv_str, pci_str, frozen_edev->freeze_count);
goto perm_error;
hard_fail:
@@ -497,7 +525,7 @@ hard_fail:
location, drv_str, pci_str);
perm_error:
- eeh_slot_error_detail(frozen_pdn, EEH_LOG_PERM_FAILURE);
+ eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM);
/* Notify all devices that they're about to go down. */
pci_walk_bus(frozen_bus, eeh_report_failure, NULL);
@@ -508,4 +536,3 @@ perm_error:
return NULL;
}
-/* ---------- end of file ---------- */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index d2383cfb6dfd..4a4752565856 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -1,6 +1,4 @@
/*
- * eeh_event.c
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -46,7 +44,7 @@ DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
DEFINE_MUTEX(eeh_event_mutex);
/**
- * eeh_event_handler - dispatch EEH events.
+ * eeh_event_handler - Dispatch EEH events.
* @dummy - unused
*
* The detection of a frozen slot can occur inside an interrupt,
@@ -58,10 +56,10 @@ DEFINE_MUTEX(eeh_event_mutex);
static int eeh_event_handler(void * dummy)
{
unsigned long flags;
- struct eeh_event *event;
- struct pci_dn *pdn;
+ struct eeh_event *event;
+ struct eeh_dev *edev;
- daemonize ("eehd");
+ daemonize("eehd");
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&eeh_eventlist_lock, flags);
@@ -79,31 +77,37 @@ static int eeh_event_handler(void * dummy)
/* Serialize processing of EEH events */
mutex_lock(&eeh_event_mutex);
- eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
+ edev = event->edev;
+ eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
- eeh_pci_name(event->dev));
+ eeh_pci_name(edev->pdev));
+
+ edev = handle_eeh_events(event);
- pdn = handle_eeh_events(event);
+ eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
+ pci_dev_put(edev->pdev);
- eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
- pci_dev_put(event->dev);
kfree(event);
mutex_unlock(&eeh_event_mutex);
/* If there are no new errors after an hour, clear the counter. */
- if (pdn && pdn->eeh_freeze_count>0) {
- msleep_interruptible (3600*1000);
- if (pdn->eeh_freeze_count>0)
- pdn->eeh_freeze_count--;
+ if (edev && edev->freeze_count>0) {
+ msleep_interruptible(3600*1000);
+ if (edev->freeze_count>0)
+ edev->freeze_count--;
+
}
return 0;
}
/**
- * eeh_thread_launcher
+ * eeh_thread_launcher - Start kernel thread to handle EEH events
* @dummy - unused
+ *
+ * This routine is called to start the kernel thread for processing
+ * EEH event.
*/
static void eeh_thread_launcher(struct work_struct *dummy)
{
@@ -112,18 +116,18 @@ static void eeh_thread_launcher(struct work_struct *dummy)
}
/**
- * eeh_send_failure_event - generate a PCI error event
- * @dev pci device
+ * eeh_send_failure_event - Generate a PCI error event
+ * @edev: EEH device
*
* This routine can be called within an interrupt context;
* the actual event will be delivered in a normal context
* (from a workqueue).
*/
-int eeh_send_failure_event (struct device_node *dn,
- struct pci_dev *dev)
+int eeh_send_failure_event(struct eeh_dev *edev)
{
unsigned long flags;
struct eeh_event *event;
+ struct device_node *dn = eeh_dev_to_of_node(edev);
const char *location;
if (!mem_init_done) {
@@ -135,15 +139,14 @@ int eeh_send_failure_event (struct device_node *dn,
}
event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (event == NULL) {
- printk (KERN_ERR "EEH: out of memory, event not handled\n");
+ printk(KERN_ERR "EEH: out of memory, event not handled\n");
return 1;
}
- if (dev)
- pci_dev_get(dev);
+ if (edev->pdev)
+ pci_dev_get(edev->pdev);
- event->dn = dn;
- event->dev = dev;
+ event->edev = edev;
/* We may or may not be called in an interrupt context */
spin_lock_irqsave(&eeh_eventlist_lock, flags);
@@ -154,5 +157,3 @@ int eeh_send_failure_event (struct device_node *dn,
return 0;
}
-
-/********************** END OF FILE ******************************/
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
new file mode 100644
index 000000000000..8752f79a6af8
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -0,0 +1,565 @@
+/*
+ * The file intends to implement the platform dependent EEH operations on pseries.
+ * Actually, the pseries platform is built based on RTAS heavily. That means the
+ * pseries platform dependent EEH operations will be built on RTAS calls. The functions
+ * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
+ * been done.
+ *
+ * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
+ * Copyright IBM Corporation 2001, 2005, 2006
+ * Copyright Dave Engebretsen & Todd Inglett 2001
+ * Copyright Linas Vepstas 2005, 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <asm/eeh.h>
+#include <asm/eeh_event.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/rtas.h>
+
+/* RTAS tokens */
+static int ibm_set_eeh_option;
+static int ibm_set_slot_reset;
+static int ibm_read_slot_reset_state;
+static int ibm_read_slot_reset_state2;
+static int ibm_slot_error_detail;
+static int ibm_get_config_addr_info;
+static int ibm_get_config_addr_info2;
+static int ibm_configure_bridge;
+static int ibm_configure_pe;
+
+/*
+ * Buffer for reporting slot-error-detail rtas calls. Its here
+ * in BSS, and not dynamically alloced, so that it ends up in
+ * RMO where RTAS can access it.
+ */
+static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
+static DEFINE_SPINLOCK(slot_errbuf_lock);
+static int eeh_error_buf_size;
+
+/**
+ * pseries_eeh_init - EEH platform dependent initialization
+ *
+ * EEH platform dependent initialization on pseries.
+ */
+static int pseries_eeh_init(void)
+{
+ /* figure out EEH RTAS function call tokens */
+ ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
+ ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
+ ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
+ ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
+ ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
+ ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
+ ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
+ ibm_configure_pe = rtas_token("ibm,configure-pe");
+ ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
+
+ /* necessary sanity check */
+ if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
+ __func__);
+ return -EINVAL;
+ } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: RTAS service <ibm, set-slot-reset> invalid\n",
+ __func__);
+ return -EINVAL;
+ } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
+ ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and "
+ "<ibm,read-slot-reset-state> invalid\n",
+ __func__);
+ return -EINVAL;
+ } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
+ __func__);
+ return -EINVAL;
+ } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
+ ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
+ "<ibm,get-config-addr-info> invalid\n",
+ __func__);
+ return -EINVAL;
+ } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
+ ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: RTAS service <ibm,configure-pe> and "
+ "<ibm,configure-bridge> invalid\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Initialize error log lock and size */
+ spin_lock_init(&slot_errbuf_lock);
+ eeh_error_buf_size = rtas_token("rtas-error-log-max");
+ if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
+ pr_warning("%s: unknown EEH error log size\n",
+ __func__);
+ eeh_error_buf_size = 1024;
+ } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
+ pr_warning("%s: EEH error log size %d exceeds the maximal %d\n",
+ __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
+ eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
+ }
+
+ return 0;
+}
+
+/**
+ * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
+ * @dn: device node
+ * @option: operation to be issued
+ *
+ * The function is used to control the EEH functionality globally.
+ * Currently, following options are support according to PAPR:
+ * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
+ */
+static int pseries_eeh_set_option(struct device_node *dn, int option)
+{
+ int ret = 0;
+ struct eeh_dev *edev;
+ const u32 *reg;
+ int config_addr;
+
+ edev = of_node_to_eeh_dev(dn);
+
+ /*
+ * When we're enabling or disabling EEH functioality on
+ * the particular PE, the PE config address is possibly
+ * unavailable. Therefore, we have to figure it out from
+ * the FDT node.
+ */
+ switch (option) {
+ case EEH_OPT_DISABLE:
+ case EEH_OPT_ENABLE:
+ reg = of_get_property(dn, "reg", NULL);
+ config_addr = reg[0];
+ break;
+
+ case EEH_OPT_THAW_MMIO:
+ case EEH_OPT_THAW_DMA:
+ config_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ config_addr = edev->pe_config_addr;
+ break;
+
+ default:
+ pr_err("%s: Invalid option %d\n",
+ __func__, option);
+ return -EINVAL;
+ }
+
+ ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid), option);
+
+ return ret;
+}
+
+/**
+ * pseries_eeh_get_pe_addr - Retrieve PE address
+ * @dn: device node
+ *
+ * Retrieve the assocated PE address. Actually, there're 2 RTAS
+ * function calls dedicated for the purpose. We need implement
+ * it through the new function and then the old one. Besides,
+ * you should make sure the config address is figured out from
+ * FDT node before calling the function.
+ *
+ * It's notable that zero'ed return value means invalid PE config
+ * address.
+ */
+static int pseries_eeh_get_pe_addr(struct device_node *dn)
+{
+ struct eeh_dev *edev;
+ int ret = 0;
+ int rets[3];
+
+ edev = of_node_to_eeh_dev(dn);
+
+ if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
+ /*
+ * First of all, we need to make sure there has one PE
+ * associated with the device. Otherwise, PE address is
+ * meaningless.
+ */
+ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
+ edev->config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid), 1);
+ if (ret || (rets[0] == 0))
+ return 0;
+
+ /* Retrieve the associated PE config address */
+ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
+ edev->config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid), 0);
+ if (ret) {
+ pr_warning("%s: Failed to get PE address for %s\n",
+ __func__, dn->full_name);
+ return 0;
+ }
+
+ return rets[0];
+ }
+
+ if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
+ edev->config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid), 0);
+ if (ret) {
+ pr_warning("%s: Failed to get PE address for %s\n",
+ __func__, dn->full_name);
+ return 0;
+ }
+
+ return rets[0];
+ }
+
+ return ret;
+}
+
+/**
+ * pseries_eeh_get_state - Retrieve PE state
+ * @dn: PE associated device node
+ * @state: return value
+ *
+ * Retrieve the state of the specified PE. On RTAS compliant
+ * pseries platform, there already has one dedicated RTAS function
+ * for the purpose. It's notable that the associated PE config address
+ * might be ready when calling the function. Therefore, endeavour to
+ * use the PE config address if possible. Further more, there're 2
+ * RTAS calls for the purpose, we need to try the new one and back
+ * to the old one if the new one couldn't work properly.
+ */
+static int pseries_eeh_get_state(struct device_node *dn, int *state)
+{
+ struct eeh_dev *edev;
+ int config_addr;
+ int ret;
+ int rets[4];
+ int result;
+
+ /* Figure out PE config address if possible */
+ edev = of_node_to_eeh_dev(dn);
+ config_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ config_addr = edev->pe_config_addr;
+
+ if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid));
+ } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
+ /* Fake PE unavailable info */
+ rets[2] = 0;
+ ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid));
+ } else {
+ return EEH_STATE_NOT_SUPPORT;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Parse the result out */
+ result = 0;
+ if (rets[1]) {
+ switch(rets[0]) {
+ case 0:
+ result &= ~EEH_STATE_RESET_ACTIVE;
+ result |= EEH_STATE_MMIO_ACTIVE;
+ result |= EEH_STATE_DMA_ACTIVE;
+ break;
+ case 1:
+ result |= EEH_STATE_RESET_ACTIVE;
+ result |= EEH_STATE_MMIO_ACTIVE;
+ result |= EEH_STATE_DMA_ACTIVE;
+ break;
+ case 2:
+ result &= ~EEH_STATE_RESET_ACTIVE;
+ result &= ~EEH_STATE_MMIO_ACTIVE;
+ result &= ~EEH_STATE_DMA_ACTIVE;
+ break;
+ case 4:
+ result &= ~EEH_STATE_RESET_ACTIVE;
+ result &= ~EEH_STATE_MMIO_ACTIVE;
+ result &= ~EEH_STATE_DMA_ACTIVE;
+ result |= EEH_STATE_MMIO_ENABLED;
+ break;
+ case 5:
+ if (rets[2]) {
+ if (state) *state = rets[2];
+ result = EEH_STATE_UNAVAILABLE;
+ } else {
+ result = EEH_STATE_NOT_SUPPORT;
+ }
+ default:
+ result = EEH_STATE_NOT_SUPPORT;
+ }
+ } else {
+ result = EEH_STATE_NOT_SUPPORT;
+ }
+
+ return result;
+}
+
+/**
+ * pseries_eeh_reset - Reset the specified PE
+ * @dn: PE associated device node
+ * @option: reset option
+ *
+ * Reset the specified PE
+ */
+static int pseries_eeh_reset(struct device_node *dn, int option)
+{
+ struct eeh_dev *edev;
+ int config_addr;
+ int ret;
+
+ /* Figure out PE address */
+ edev = of_node_to_eeh_dev(dn);
+ config_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ config_addr = edev->pe_config_addr;
+
+ /* Reset PE through RTAS call */
+ ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid), option);
+
+ /* If fundamental-reset not supported, try hot-reset */
+ if (option == EEH_RESET_FUNDAMENTAL &&
+ ret == -8) {
+ ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid), EEH_RESET_HOT);
+ }
+
+ return ret;
+}
+
+/**
+ * pseries_eeh_wait_state - Wait for PE state
+ * @dn: PE associated device node
+ * @max_wait: maximal period in microsecond
+ *
+ * Wait for the state of associated PE. It might take some time
+ * to retrieve the PE's state.
+ */
+static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
+{
+ int ret;
+ int mwait;
+
+ /*
+ * According to PAPR, the state of PE might be temporarily
+ * unavailable. Under the circumstance, we have to wait
+ * for indicated time determined by firmware. The maximal
+ * wait time is 5 minutes, which is acquired from the original
+ * EEH implementation. Also, the original implementation
+ * also defined the minimal wait time as 1 second.
+ */
+#define EEH_STATE_MIN_WAIT_TIME (1000)
+#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
+
+ while (1) {
+ ret = pseries_eeh_get_state(dn, &mwait);
+
+ /*
+ * If the PE's state is temporarily unavailable,
+ * we have to wait for the specified time. Otherwise,
+ * the PE's state will be returned immediately.
+ */
+ if (ret != EEH_STATE_UNAVAILABLE)
+ return ret;
+
+ if (max_wait <= 0) {
+ pr_warning("%s: Timeout when getting PE's state (%d)\n",
+ __func__, max_wait);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+
+ if (mwait <= 0) {
+ pr_warning("%s: Firmware returned bad wait value %d\n",
+ __func__, mwait);
+ mwait = EEH_STATE_MIN_WAIT_TIME;
+ } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
+ pr_warning("%s: Firmware returned too long wait value %d\n",
+ __func__, mwait);
+ mwait = EEH_STATE_MAX_WAIT_TIME;
+ }
+
+ max_wait -= mwait;
+ msleep(mwait);
+ }
+
+ return EEH_STATE_NOT_SUPPORT;
+}
+
+/**
+ * pseries_eeh_get_log - Retrieve error log
+ * @dn: device node
+ * @severity: temporary or permanent error log
+ * @drv_log: driver log to be combined with retrieved error log
+ * @len: length of driver log
+ *
+ * Retrieve the temporary or permanent error from the PE.
+ * Actually, the error will be retrieved through the dedicated
+ * RTAS call.
+ */
+static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len)
+{
+ struct eeh_dev *edev;
+ int config_addr;
+ unsigned long flags;
+ int ret;
+
+ edev = of_node_to_eeh_dev(dn);
+ spin_lock_irqsave(&slot_errbuf_lock, flags);
+ memset(slot_errbuf, 0, eeh_error_buf_size);
+
+ /* Figure out the PE address */
+ config_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ config_addr = edev->pe_config_addr;
+
+ ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
+ BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid),
+ virt_to_phys(drv_log), len,
+ virt_to_phys(slot_errbuf), eeh_error_buf_size,
+ severity);
+ if (!ret)
+ log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
+ spin_unlock_irqrestore(&slot_errbuf_lock, flags);
+
+ return ret;
+}
+
+/**
+ * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
+ * @dn: PE associated device node
+ *
+ * The function will be called to reconfigure the bridges included
+ * in the specified PE so that the mulfunctional PE would be recovered
+ * again.
+ */
+static int pseries_eeh_configure_bridge(struct device_node *dn)
+{
+ struct eeh_dev *edev;
+ int config_addr;
+ int ret;
+
+ /* Figure out the PE address */
+ edev = of_node_to_eeh_dev(dn);
+ config_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ config_addr = edev->pe_config_addr;
+
+ /* Use new configure-pe function, if supported */
+ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid));
+ } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
+ config_addr, BUID_HI(edev->phb->buid),
+ BUID_LO(edev->phb->buid));
+ } else {
+ return -EFAULT;
+ }
+
+ if (ret)
+ pr_warning("%s: Unable to configure bridge %d for %s\n",
+ __func__, ret, dn->full_name);
+
+ return ret;
+}
+
+/**
+ * pseries_eeh_read_config - Read PCI config space
+ * @dn: device node
+ * @where: PCI address
+ * @size: size to read
+ * @val: return value
+ *
+ * Read config space from the speicifed device
+ */
+static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
+{
+ struct pci_dn *pdn;
+
+ pdn = PCI_DN(dn);
+
+ return rtas_read_config(pdn, where, size, val);
+}
+
+/**
+ * pseries_eeh_write_config - Write PCI config space
+ * @dn: device node
+ * @where: PCI address
+ * @size: size to write
+ * @val: value to be written
+ *
+ * Write config space to the specified device
+ */
+static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
+{
+ struct pci_dn *pdn;
+
+ pdn = PCI_DN(dn);
+
+ return rtas_write_config(pdn, where, size, val);
+}
+
+static struct eeh_ops pseries_eeh_ops = {
+ .name = "pseries",
+ .init = pseries_eeh_init,
+ .set_option = pseries_eeh_set_option,
+ .get_pe_addr = pseries_eeh_get_pe_addr,
+ .get_state = pseries_eeh_get_state,
+ .reset = pseries_eeh_reset,
+ .wait_state = pseries_eeh_wait_state,
+ .get_log = pseries_eeh_get_log,
+ .configure_bridge = pseries_eeh_configure_bridge,
+ .read_config = pseries_eeh_read_config,
+ .write_config = pseries_eeh_write_config
+};
+
+/**
+ * eeh_pseries_init - Register platform dependent EEH operations
+ *
+ * EEH initialization on pseries platform. This function should be
+ * called before any EEH related functions.
+ */
+int __init eeh_pseries_init(void)
+{
+ return eeh_ops_register(&pseries_eeh_ops);
+}
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
index eb744ee234da..243b3510d70f 100644
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -28,7 +28,7 @@
#include <asm/pci-bridge.h>
/**
- * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic
+ * EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic
* @_name: name of file in sysfs directory
* @_memb: name of member in struct pci_dn to access
* @_format: printf format for display
@@ -41,24 +41,21 @@ static ssize_t eeh_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
- struct device_node *dn = pci_device_to_OF_node(pdev); \
- struct pci_dn *pdn; \
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); \
\
- if (!dn || PCI_DN(dn) == NULL) \
- return 0; \
+ if (!edev) \
+ return 0; \
\
- pdn = PCI_DN(dn); \
- return sprintf(buf, _format "\n", pdn->_memb); \
+ return sprintf(buf, _format "\n", edev->_memb); \
} \
static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
-
-EEH_SHOW_ATTR(eeh_mode, eeh_mode, "0x%x");
-EEH_SHOW_ATTR(eeh_config_addr, eeh_config_addr, "0x%x");
-EEH_SHOW_ATTR(eeh_pe_config_addr, eeh_pe_config_addr, "0x%x");
-EEH_SHOW_ATTR(eeh_check_count, eeh_check_count, "%d");
-EEH_SHOW_ATTR(eeh_freeze_count, eeh_freeze_count, "%d");
-EEH_SHOW_ATTR(eeh_false_positives, eeh_false_positives, "%d");
+EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
+EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
+EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
+EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" );
+EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" );
+EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" );
void eeh_sysfs_add_device(struct pci_dev *pdev)
{
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index 1a709bc48ce1..ef9d9d84c7d5 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -63,73 +63,9 @@ EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
static int ioei_check_exception_token;
-/* pSeries event log format */
-
-/* Two bytes ASCII section IDs */
-#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
-#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
-#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
-#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
-#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
-#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
-#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
-#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
-#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
-#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
-#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
-#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
-#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
-#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
-#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
-#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
-
-/* Vendor specific Platform Event Log Format, Version 6, section header */
-struct pseries_elog_section {
- uint16_t id; /* 0x00 2-byte ASCII section ID */
- uint16_t length; /* 0x02 Section length in bytes */
- uint8_t version; /* 0x04 Section version */
- uint8_t subtype; /* 0x05 Section subtype */
- uint16_t creator_component; /* 0x06 Creator component ID */
- uint8_t data[]; /* 0x08 Start of section data */
-};
-
static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
/**
- * Find data portion of a specific section in RTAS extended event log.
- * @elog: RTAS error/event log.
- * @sect_id: secsion ID.
- *
- * Return:
- * pointer to the section data of the specified section
- * NULL if not found
- */
-static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog,
- uint16_t sect_id)
-{
- struct rtas_ext_event_log_v6 *xelog =
- (struct rtas_ext_event_log_v6 *) elog->buffer;
- struct pseries_elog_section *sect;
- unsigned char *p, *log_end;
-
- /* Check that we understand the format */
- if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
- xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
- xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
- return NULL;
-
- log_end = elog->buffer + elog->extended_log_length;
- p = xelog->vendor_log;
- while (p < log_end) {
- sect = (struct pseries_elog_section *)p;
- if (sect->id == sect_id)
- return sect;
- p += sect->length;
- }
- return NULL;
-}
-
-/**
* Find the data portion of an IO Event section from event log.
* @elog: RTAS error/event log.
*
@@ -138,7 +74,7 @@ static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *el
*/
static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
{
- struct pseries_elog_section *sect;
+ struct pseries_errorlog *sect;
/* We should only ever get called for io-event interrupts, but if
* we do get called for another type then something went wrong so
@@ -152,7 +88,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
return NULL;
}
- sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
+ sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
if (unlikely(!sect)) {
printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
"log does not contain an IO Event section. "
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index c442f2b1980f..0915b1ad66ce 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -809,8 +809,7 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_query_response *query)
{
- struct device_node *dn;
- struct pci_dn *pcidn;
+ struct eeh_dev *edev;
u32 cfg_addr;
u64 buid;
int ret;
@@ -821,12 +820,12 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
* Retrieve them from the pci device, not the node with the
* dma-window property
*/
- dn = pci_device_to_OF_node(dev);
- pcidn = PCI_DN(dn);
- cfg_addr = pcidn->eeh_config_addr;
- if (pcidn->eeh_pe_config_addr)
- cfg_addr = pcidn->eeh_pe_config_addr;
- buid = pcidn->phb->buid;
+ edev = pci_dev_to_eeh_dev(dev);
+ cfg_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ cfg_addr = edev->pe_config_addr;
+ buid = edev->phb->buid;
+
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
@@ -839,8 +838,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_create_response *create, int page_shift,
int window_shift)
{
- struct device_node *dn;
- struct pci_dn *pcidn;
+ struct eeh_dev *edev;
u32 cfg_addr;
u64 buid;
int ret;
@@ -851,12 +849,11 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
* Retrieve them from the pci device, not the node with the
* dma-window property
*/
- dn = pci_device_to_OF_node(dev);
- pcidn = PCI_DN(dn);
- cfg_addr = pcidn->eeh_config_addr;
- if (pcidn->eeh_pe_config_addr)
- cfg_addr = pcidn->eeh_pe_config_addr;
- buid = pcidn->phb->buid;
+ edev = pci_dev_to_eeh_dev(dev);
+ cfg_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ cfg_addr = edev->pe_config_addr;
+ buid = edev->phb->buid;
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 7bc73af6c7b9..5f3ef876ded2 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -41,6 +41,7 @@
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/trace.h>
+#include <asm/firmware.h>
#include "plpar_wrappers.h"
#include "pseries.h"
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 38d24e7e7bb1..109fdb75578d 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -217,7 +217,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
if (!dn)
return NULL;
- dn = find_device_pe(dn);
+ dn = eeh_find_device_pe(dn);
if (!dn)
return NULL;
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 55d4ec1bd1ac..8b7bafa489c2 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -84,7 +84,7 @@ void pcibios_remove_pci_devices(struct pci_bus *bus)
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
pr_debug(" * Removing %s...\n", pci_name(dev));
eeh_remove_bus_device(dev);
- pci_remove_bus_device(dev);
+ pci_stop_and_remove_bus_device(dev);
}
}
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
@@ -147,6 +147,9 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
pci_devs_phb_init_dynamic(phb);
+ /* Create EEH devices for the PHB */
+ eeh_dev_phb_init_dynamic(phb);
+
if (dn->child)
eeh_add_device_tree_early(dn);
diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c
deleted file mode 100644
index 6e7742da0072..000000000000
--- a/arch/powerpc/platforms/pseries/phyp_dump.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Hypervisor-assisted dump
- *
- * Linas Vepstas, Manish Ahuja 2008
- * Copyright 2008 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-#include <linux/of.h>
-#include <linux/pfn.h>
-#include <linux/swap.h>
-#include <linux/sysfs.h>
-
-#include <asm/page.h>
-#include <asm/phyp_dump.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/rtas.h>
-
-/* Variables, used to communicate data between early boot and late boot */
-static struct phyp_dump phyp_dump_vars;
-struct phyp_dump *phyp_dump_info = &phyp_dump_vars;
-
-static int ibm_configure_kernel_dump;
-/* ------------------------------------------------- */
-/* RTAS interfaces to declare the dump regions */
-
-struct dump_section {
- u32 dump_flags;
- u16 source_type;
- u16 error_flags;
- u64 source_address;
- u64 source_length;
- u64 length_copied;
- u64 destination_address;
-};
-
-struct phyp_dump_header {
- u32 version;
- u16 num_of_sections;
- u16 status;
-
- u32 first_offset_section;
- u32 dump_disk_section;
- u64 block_num_dd;
- u64 num_of_blocks_dd;
- u32 offset_dd;
- u32 maxtime_to_auto;
- /* No dump disk path string used */
-
- struct dump_section cpu_data;
- struct dump_section hpte_data;
- struct dump_section kernel_data;
-};
-
-/* The dump header *must be* in low memory, so .bss it */
-static struct phyp_dump_header phdr;
-
-#define NUM_DUMP_SECTIONS 3
-#define DUMP_HEADER_VERSION 0x1
-#define DUMP_REQUEST_FLAG 0x1
-#define DUMP_SOURCE_CPU 0x0001
-#define DUMP_SOURCE_HPTE 0x0002
-#define DUMP_SOURCE_RMO 0x0011
-#define DUMP_ERROR_FLAG 0x2000
-#define DUMP_TRIGGERED 0x4000
-#define DUMP_PERFORMED 0x8000
-
-
-/**
- * init_dump_header() - initialize the header declaring a dump
- * Returns: length of dump save area.
- *
- * When the hypervisor saves crashed state, it needs to put
- * it somewhere. The dump header tells the hypervisor where
- * the data can be saved.
- */
-static unsigned long init_dump_header(struct phyp_dump_header *ph)
-{
- unsigned long addr_offset = 0;
-
- /* Set up the dump header */
- ph->version = DUMP_HEADER_VERSION;
- ph->num_of_sections = NUM_DUMP_SECTIONS;
- ph->status = 0;
-
- ph->first_offset_section =
- (u32)offsetof(struct phyp_dump_header, cpu_data);
- ph->dump_disk_section = 0;
- ph->block_num_dd = 0;
- ph->num_of_blocks_dd = 0;
- ph->offset_dd = 0;
-
- ph->maxtime_to_auto = 0; /* disabled */
-
- /* The first two sections are mandatory */
- ph->cpu_data.dump_flags = DUMP_REQUEST_FLAG;
- ph->cpu_data.source_type = DUMP_SOURCE_CPU;
- ph->cpu_data.source_address = 0;
- ph->cpu_data.source_length = phyp_dump_info->cpu_state_size;
- ph->cpu_data.destination_address = addr_offset;
- addr_offset += phyp_dump_info->cpu_state_size;
-
- ph->hpte_data.dump_flags = DUMP_REQUEST_FLAG;
- ph->hpte_data.source_type = DUMP_SOURCE_HPTE;
- ph->hpte_data.source_address = 0;
- ph->hpte_data.source_length = phyp_dump_info->hpte_region_size;
- ph->hpte_data.destination_address = addr_offset;
- addr_offset += phyp_dump_info->hpte_region_size;
-
- /* This section describes the low kernel region */
- ph->kernel_data.dump_flags = DUMP_REQUEST_FLAG;
- ph->kernel_data.source_type = DUMP_SOURCE_RMO;
- ph->kernel_data.source_address = PHYP_DUMP_RMR_START;
- ph->kernel_data.source_length = PHYP_DUMP_RMR_END;
- ph->kernel_data.destination_address = addr_offset;
- addr_offset += ph->kernel_data.source_length;
-
- return addr_offset;
-}
-
-static void print_dump_header(const struct phyp_dump_header *ph)
-{
-#ifdef DEBUG
- if (ph == NULL)
- return;
-
- printk(KERN_INFO "dump header:\n");
- /* setup some ph->sections required */
- printk(KERN_INFO "version = %d\n", ph->version);
- printk(KERN_INFO "Sections = %d\n", ph->num_of_sections);
- printk(KERN_INFO "Status = 0x%x\n", ph->status);
-
- /* No ph->disk, so all should be set to 0 */
- printk(KERN_INFO "Offset to first section 0x%x\n",
- ph->first_offset_section);
- printk(KERN_INFO "dump disk sections should be zero\n");
- printk(KERN_INFO "dump disk section = %d\n", ph->dump_disk_section);
- printk(KERN_INFO "block num = %lld\n", ph->block_num_dd);
- printk(KERN_INFO "number of blocks = %lld\n", ph->num_of_blocks_dd);
- printk(KERN_INFO "dump disk offset = %d\n", ph->offset_dd);
- printk(KERN_INFO "Max auto time= %d\n", ph->maxtime_to_auto);
-
- /*set cpu state and hpte states as well scratch pad area */
- printk(KERN_INFO " CPU AREA\n");
- printk(KERN_INFO "cpu dump_flags =%d\n", ph->cpu_data.dump_flags);
- printk(KERN_INFO "cpu source_type =%d\n", ph->cpu_data.source_type);
- printk(KERN_INFO "cpu error_flags =%d\n", ph->cpu_data.error_flags);
- printk(KERN_INFO "cpu source_address =%llx\n",
- ph->cpu_data.source_address);
- printk(KERN_INFO "cpu source_length =%llx\n",
- ph->cpu_data.source_length);
- printk(KERN_INFO "cpu length_copied =%llx\n",
- ph->cpu_data.length_copied);
-
- printk(KERN_INFO " HPTE AREA\n");
- printk(KERN_INFO "HPTE dump_flags =%d\n", ph->hpte_data.dump_flags);
- printk(KERN_INFO "HPTE source_type =%d\n", ph->hpte_data.source_type);
- printk(KERN_INFO "HPTE error_flags =%d\n", ph->hpte_data.error_flags);
- printk(KERN_INFO "HPTE source_address =%llx\n",
- ph->hpte_data.source_address);
- printk(KERN_INFO "HPTE source_length =%llx\n",
- ph->hpte_data.source_length);
- printk(KERN_INFO "HPTE length_copied =%llx\n",
- ph->hpte_data.length_copied);
-
- printk(KERN_INFO " SRSD AREA\n");
- printk(KERN_INFO "SRSD dump_flags =%d\n", ph->kernel_data.dump_flags);
- printk(KERN_INFO "SRSD source_type =%d\n", ph->kernel_data.source_type);
- printk(KERN_INFO "SRSD error_flags =%d\n", ph->kernel_data.error_flags);
- printk(KERN_INFO "SRSD source_address =%llx\n",
- ph->kernel_data.source_address);
- printk(KERN_INFO "SRSD source_length =%llx\n",
- ph->kernel_data.source_length);
- printk(KERN_INFO "SRSD length_copied =%llx\n",
- ph->kernel_data.length_copied);
-#endif
-}
-
-static ssize_t show_phyp_dump_active(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
-
- /* create filesystem entry so kdump is phyp-dump aware */
- return sprintf(buf, "%lx\n", phyp_dump_info->phyp_dump_at_boot);
-}
-
-static struct kobj_attribute pdl = __ATTR(phyp_dump_active, 0600,
- show_phyp_dump_active,
- NULL);
-
-static void register_dump_area(struct phyp_dump_header *ph, unsigned long addr)
-{
- int rc;
-
- /* Add addr value if not initialized before */
- if (ph->cpu_data.destination_address == 0) {
- ph->cpu_data.destination_address += addr;
- ph->hpte_data.destination_address += addr;
- ph->kernel_data.destination_address += addr;
- }
-
- /* ToDo Invalidate kdump and free memory range. */
-
- do {
- rc = rtas_call(ibm_configure_kernel_dump, 3, 1, NULL,
- 1, ph, sizeof(struct phyp_dump_header));
- } while (rtas_busy_delay(rc));
-
- if (rc) {
- printk(KERN_ERR "phyp-dump: unexpected error (%d) on "
- "register\n", rc);
- print_dump_header(ph);
- return;
- }
-
- rc = sysfs_create_file(kernel_kobj, &pdl.attr);
- if (rc)
- printk(KERN_ERR "phyp-dump: unable to create sysfs"
- " file (%d)\n", rc);
-}
-
-static
-void invalidate_last_dump(struct phyp_dump_header *ph, unsigned long addr)
-{
- int rc;
-
- /* Add addr value if not initialized before */
- if (ph->cpu_data.destination_address == 0) {
- ph->cpu_data.destination_address += addr;
- ph->hpte_data.destination_address += addr;
- ph->kernel_data.destination_address += addr;
- }
-
- do {
- rc = rtas_call(ibm_configure_kernel_dump, 3, 1, NULL,
- 2, ph, sizeof(struct phyp_dump_header));
- } while (rtas_busy_delay(rc));
-
- if (rc) {
- printk(KERN_ERR "phyp-dump: unexpected error (%d) "
- "on invalidate\n", rc);
- print_dump_header(ph);
- }
-}
-
-/* ------------------------------------------------- */
-/**
- * release_memory_range -- release memory previously memblock_reserved
- * @start_pfn: starting physical frame number
- * @nr_pages: number of pages to free.
- *
- * This routine will release memory that had been previously
- * memblock_reserved in early boot. The released memory becomes
- * available for genreal use.
- */
-static void release_memory_range(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- struct page *rpage;
- unsigned long end_pfn;
- long i;
-
- end_pfn = start_pfn + nr_pages;
-
- for (i = start_pfn; i <= end_pfn; i++) {
- rpage = pfn_to_page(i);
- if (PageReserved(rpage)) {
- ClearPageReserved(rpage);
- init_page_count(rpage);
- __free_page(rpage);
- totalram_pages++;
- }
- }
-}
-
-/**
- * track_freed_range -- Counts the range being freed.
- * Once the counter goes to zero, it re-registers dump for
- * future use.
- */
-static void
-track_freed_range(unsigned long addr, unsigned long length)
-{
- static unsigned long scratch_area_size, reserved_area_size;
-
- if (addr < phyp_dump_info->init_reserve_start)
- return;
-
- if ((addr >= phyp_dump_info->init_reserve_start) &&
- (addr <= phyp_dump_info->init_reserve_start +
- phyp_dump_info->init_reserve_size))
- reserved_area_size += length;
-
- if ((addr >= phyp_dump_info->reserved_scratch_addr) &&
- (addr <= phyp_dump_info->reserved_scratch_addr +
- phyp_dump_info->reserved_scratch_size))
- scratch_area_size += length;
-
- if ((reserved_area_size == phyp_dump_info->init_reserve_size) &&
- (scratch_area_size == phyp_dump_info->reserved_scratch_size)) {
-
- invalidate_last_dump(&phdr,
- phyp_dump_info->reserved_scratch_addr);
- register_dump_area(&phdr,
- phyp_dump_info->reserved_scratch_addr);
- }
-}
-
-/* ------------------------------------------------- */
-/**
- * sysfs_release_region -- sysfs interface to release memory range.
- *
- * Usage:
- * "echo <start addr> <length> > /sys/kernel/release_region"
- *
- * Example:
- * "echo 0x40000000 0x10000000 > /sys/kernel/release_region"
- *
- * will release 256MB starting at 1GB.
- */
-static ssize_t store_release_region(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned long start_addr, length, end_addr;
- unsigned long start_pfn, nr_pages;
- ssize_t ret;
-
- ret = sscanf(buf, "%lx %lx", &start_addr, &length);
- if (ret != 2)
- return -EINVAL;
-
- track_freed_range(start_addr, length);
-
- /* Range-check - don't free any reserved memory that
- * wasn't reserved for phyp-dump */
- if (start_addr < phyp_dump_info->init_reserve_start)
- start_addr = phyp_dump_info->init_reserve_start;
-
- end_addr = phyp_dump_info->init_reserve_start +
- phyp_dump_info->init_reserve_size;
- if (start_addr+length > end_addr)
- length = end_addr - start_addr;
-
- /* Release the region of memory assed in by user */
- start_pfn = PFN_DOWN(start_addr);
- nr_pages = PFN_DOWN(length);
- release_memory_range(start_pfn, nr_pages);
-
- return count;
-}
-
-static ssize_t show_release_region(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- u64 second_addr_range;
-
- /* total reserved size - start of scratch area */
- second_addr_range = phyp_dump_info->init_reserve_size -
- phyp_dump_info->reserved_scratch_size;
- return sprintf(buf, "CPU:0x%llx-0x%llx: HPTE:0x%llx-0x%llx:"
- " DUMP:0x%llx-0x%llx, 0x%lx-0x%llx:\n",
- phdr.cpu_data.destination_address,
- phdr.cpu_data.length_copied,
- phdr.hpte_data.destination_address,
- phdr.hpte_data.length_copied,
- phdr.kernel_data.destination_address,
- phdr.kernel_data.length_copied,
- phyp_dump_info->init_reserve_start,
- second_addr_range);
-}
-
-static struct kobj_attribute rr = __ATTR(release_region, 0600,
- show_release_region,
- store_release_region);
-
-static int __init phyp_dump_setup(void)
-{
- struct device_node *rtas;
- const struct phyp_dump_header *dump_header = NULL;
- unsigned long dump_area_start;
- unsigned long dump_area_length;
- int header_len = 0;
- int rc;
-
- /* If no memory was reserved in early boot, there is nothing to do */
- if (phyp_dump_info->init_reserve_size == 0)
- return 0;
-
- /* Return if phyp dump not supported */
- if (!phyp_dump_info->phyp_dump_configured)
- return -ENOSYS;
-
- /* Is there dump data waiting for us? If there isn't,
- * then register a new dump area, and release all of
- * the rest of the reserved ram.
- *
- * The /rtas/ibm,kernel-dump rtas node is present only
- * if there is dump data waiting for us.
- */
- rtas = of_find_node_by_path("/rtas");
- if (rtas) {
- dump_header = of_get_property(rtas, "ibm,kernel-dump",
- &header_len);
- of_node_put(rtas);
- }
-
- ibm_configure_kernel_dump = rtas_token("ibm,configure-kernel-dump");
-
- print_dump_header(dump_header);
- dump_area_length = init_dump_header(&phdr);
- /* align down */
- dump_area_start = phyp_dump_info->init_reserve_start & PAGE_MASK;
-
- if (dump_header == NULL) {
- register_dump_area(&phdr, dump_area_start);
- return 0;
- }
-
- /* re-register the dump area, if old dump was invalid */
- if ((dump_header) && (dump_header->status & DUMP_ERROR_FLAG)) {
- invalidate_last_dump(&phdr, dump_area_start);
- register_dump_area(&phdr, dump_area_start);
- return 0;
- }
-
- if (dump_header) {
- phyp_dump_info->reserved_scratch_addr =
- dump_header->cpu_data.destination_address;
- phyp_dump_info->reserved_scratch_size =
- dump_header->cpu_data.source_length +
- dump_header->hpte_data.source_length +
- dump_header->kernel_data.source_length;
- }
-
- /* Should we create a dump_subsys, analogous to s390/ipl.c ? */
- rc = sysfs_create_file(kernel_kobj, &rr.attr);
- if (rc)
- printk(KERN_ERR "phyp-dump: unable to create sysfs file (%d)\n",
- rc);
-
- /* ToDo: re-register the dump area, for next time. */
- return 0;
-}
-machine_subsys_initcall(pseries, phyp_dump_setup);
-
-int __init early_init_dt_scan_phyp_dump(unsigned long node,
- const char *uname, int depth, void *data)
-{
- const unsigned int *sizes;
-
- phyp_dump_info->phyp_dump_configured = 0;
- phyp_dump_info->phyp_dump_is_active = 0;
-
- if (depth != 1 || strcmp(uname, "rtas") != 0)
- return 0;
-
- if (of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL))
- phyp_dump_info->phyp_dump_configured++;
-
- if (of_get_flat_dt_prop(node, "ibm,dump-kernel", NULL))
- phyp_dump_info->phyp_dump_is_active++;
-
- sizes = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
- NULL);
- if (!sizes)
- return 0;
-
- if (sizes[0] == 1)
- phyp_dump_info->cpu_state_size = *((unsigned long *)&sizes[1]);
-
- if (sizes[3] == 2)
- phyp_dump_info->hpte_region_size =
- *((unsigned long *)&sizes[4]);
- return 1;
-}
-
-/* Look for phyp_dump= cmdline option */
-static int __init early_phyp_dump_enabled(char *p)
-{
- phyp_dump_info->phyp_dump_at_boot = 1;
-
- if (!p)
- return 0;
-
- if (strncmp(p, "1", 1) == 0)
- phyp_dump_info->phyp_dump_at_boot = 1;
- else if (strncmp(p, "0", 1) == 0)
- phyp_dump_info->phyp_dump_at_boot = 0;
-
- return 0;
-}
-early_param("phyp_dump", early_phyp_dump_enabled);
-
-/* Look for phyp_dump_reserve_size= cmdline option */
-static int __init early_phyp_dump_reserve_size(char *p)
-{
- if (p)
- phyp_dump_info->reserve_bootvar = memparse(p, &p);
-
- return 0;
-}
-early_param("phyp_dump_reserve_size", early_phyp_dump_reserve_size);
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 085fd3f45ad2..a12e95af6933 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -96,6 +96,20 @@ out:
return index;
}
+static void check_and_cede_processor(void)
+{
+ /*
+ * Interrupts are soft-disabled at this point,
+ * but not hard disabled. So an interrupt might have
+ * occurred before entering NAP, and would be potentially
+ * lost (edge events, decrementer events, etc...) unless
+ * we first hard disable then check.
+ */
+ hard_irq_disable();
+ if (get_paca()->irq_happened == 0)
+ cede_processor();
+}
+
static int dedicated_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@@ -108,7 +122,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
ppc64_runlatch_off();
HMT_medium();
- cede_processor();
+ check_and_cede_processor();
get_lppaca()->donate_dedicated_cpu = 0;
dev->last_residency =
@@ -132,7 +146,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
* processor. When returning here, external interrupts
* are enabled.
*/
- cede_processor();
+ check_and_cede_processor();
dev->last_residency =
(int)idle_loop_epilog(in_purr, kt_before);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 086d2ae4e06a..c4dfccd3a3d9 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -16,37 +16,15 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* Change Activity:
- * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
- * End Change Activity
- */
-
-#include <linux/errno.h>
-#include <linux/threads.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
#include <linux/sched.h>
-#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/irq.h>
-#include <linux/random.h>
-#include <linux/sysrq.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/cache.h>
-#include <asm/prom.h>
-#include <asm/ptrace.h>
+#include <linux/of.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+
#include <asm/machdep.h>
#include <asm/rtas.h>
-#include <asm/udbg.h>
#include <asm/firmware.h>
#include "pseries.h"
@@ -57,7 +35,6 @@ static DEFINE_SPINLOCK(ras_log_buf_lock);
static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_PER_CPU(__u64, mce_data_buf);
-static int ras_get_sensor_state_token;
static int ras_check_exception_token;
#define EPOW_SENSOR_TOKEN 9
@@ -75,7 +52,6 @@ static int __init init_ras_IRQ(void)
{
struct device_node *np;
- ras_get_sensor_state_token = rtas_token("get-sensor-state");
ras_check_exception_token = rtas_token("check-exception");
/* Internal Errors */
@@ -95,26 +71,126 @@ static int __init init_ras_IRQ(void)
return 0;
}
-__initcall(init_ras_IRQ);
+subsys_initcall(init_ras_IRQ);
-/*
- * Handle power subsystem events (EPOW).
- *
- * Presently we just log the event has occurred. This should be fixed
- * to examine the type of power failure and take appropriate action where
- * the time horizon permits something useful to be done.
- */
+#define EPOW_SHUTDOWN_NORMAL 1
+#define EPOW_SHUTDOWN_ON_UPS 2
+#define EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS 3
+#define EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH 4
+
+static void handle_system_shutdown(char event_modifier)
+{
+ switch (event_modifier) {
+ case EPOW_SHUTDOWN_NORMAL:
+ pr_emerg("Firmware initiated power off");
+ orderly_poweroff(1);
+ break;
+
+ case EPOW_SHUTDOWN_ON_UPS:
+ pr_emerg("Loss of power reported by firmware, system is "
+ "running on UPS/battery");
+ break;
+
+ case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
+ pr_emerg("Loss of system critical functions reported by "
+ "firmware");
+ pr_emerg("Check RTAS error log for details");
+ orderly_poweroff(1);
+ break;
+
+ case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
+ pr_emerg("Ambient temperature too high reported by firmware");
+ pr_emerg("Check RTAS error log for details");
+ orderly_poweroff(1);
+ break;
+
+ default:
+ pr_err("Unknown power/cooling shutdown event (modifier %d)",
+ event_modifier);
+ }
+}
+
+struct epow_errorlog {
+ unsigned char sensor_value;
+ unsigned char event_modifier;
+ unsigned char extended_modifier;
+ unsigned char reserved;
+ unsigned char platform_reason;
+};
+
+#define EPOW_RESET 0
+#define EPOW_WARN_COOLING 1
+#define EPOW_WARN_POWER 2
+#define EPOW_SYSTEM_SHUTDOWN 3
+#define EPOW_SYSTEM_HALT 4
+#define EPOW_MAIN_ENCLOSURE 5
+#define EPOW_POWER_OFF 7
+
+void rtas_parse_epow_errlog(struct rtas_error_log *log)
+{
+ struct pseries_errorlog *pseries_log;
+ struct epow_errorlog *epow_log;
+ char action_code;
+ char modifier;
+
+ pseries_log = get_pseries_errorlog(log, PSERIES_ELOG_SECT_ID_EPOW);
+ if (pseries_log == NULL)
+ return;
+
+ epow_log = (struct epow_errorlog *)pseries_log->data;
+ action_code = epow_log->sensor_value & 0xF; /* bottom 4 bits */
+ modifier = epow_log->event_modifier & 0xF; /* bottom 4 bits */
+
+ switch (action_code) {
+ case EPOW_RESET:
+ pr_err("Non critical power or cooling issue cleared");
+ break;
+
+ case EPOW_WARN_COOLING:
+ pr_err("Non critical cooling issue reported by firmware");
+ pr_err("Check RTAS error log for details");
+ break;
+
+ case EPOW_WARN_POWER:
+ pr_err("Non critical power issue reported by firmware");
+ pr_err("Check RTAS error log for details");
+ break;
+
+ case EPOW_SYSTEM_SHUTDOWN:
+ handle_system_shutdown(epow_log->event_modifier);
+ break;
+
+ case EPOW_SYSTEM_HALT:
+ pr_emerg("Firmware initiated power off");
+ orderly_poweroff(1);
+ break;
+
+ case EPOW_MAIN_ENCLOSURE:
+ case EPOW_POWER_OFF:
+ pr_emerg("Critical power/cooling issue reported by firmware");
+ pr_emerg("Check RTAS error log for details");
+ pr_emerg("Immediate power off");
+ emergency_sync();
+ kernel_power_off();
+ break;
+
+ default:
+ pr_err("Unknown power/cooling event (action code %d)",
+ action_code);
+ }
+}
+
+/* Handle environmental and power warning (EPOW) interrupts. */
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
- int status = 0xdeadbeef;
- int state = 0;
+ int status;
+ int state;
int critical;
- status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
- EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
+ status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
if (state > 3)
- critical = 1; /* Time Critical */
+ critical = 1; /* Time Critical */
else
critical = 0;
@@ -123,18 +199,14 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq),
- RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
+ RTAS_EPOW_WARNING,
critical, __pa(&ras_log_buf),
rtas_get_error_log_max());
- udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status, state);
- printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status, state);
-
- /* format and print the extended information */
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
+ rtas_parse_epow_errlog((struct rtas_error_log *)ras_log_buf);
+
spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
@@ -150,7 +222,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
{
struct rtas_error_log *rtas_elog;
- int status = 0xdeadbeef;
+ int status;
int fatal;
spin_lock(&ras_log_buf_lock);
@@ -158,7 +230,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq),
- RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
+ RTAS_INTERNAL_ERROR, 1 /* Time Critical */,
__pa(&ras_log_buf),
rtas_get_error_log_max());
@@ -173,24 +245,13 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
if (fatal) {
- udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
- printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
-
-#ifndef DEBUG_RTAS_POWER_OFF
- /* Don't actually power off when debugging so we can test
- * without actually failing while injecting errors.
- * Error data will not be logged to syslog.
- */
- ppc_md.power_off();
-#endif
+ pr_emerg("Fatal hardware error reported by firmware");
+ pr_emerg("Check RTAS error log for details");
+ pr_emerg("Immediate power off");
+ emergency_sync();
+ kernel_power_off();
} else {
- udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
- printk(KERN_WARNING
- "Warning: Recoverable hardware error <0x%lx 0x%x>\n",
- *((unsigned long *)&ras_log_buf), status);
+ pr_err("Recoverable hardware error reported by firmware");
}
spin_unlock(&ras_log_buf_lock);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index f79f1278dfca..51ecac920dd8 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -190,9 +190,8 @@ static void __init pseries_mpic_init_IRQ(void)
BUG_ON(openpic_addr == 0);
/* Setup the openpic driver */
- mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 0,
- 16, 250, /* isu size, irq count */
- " MPIC ");
+ mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
+ MPIC_NO_RESET, 16, 0, " MPIC ");
BUG_ON(mpic == NULL);
/* Add ISUs */
@@ -261,8 +260,12 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
switch (action) {
case PSERIES_RECONFIG_ADD:
pci = np->parent->data;
- if (pci)
+ if (pci) {
update_dn_pci_info(np, pci->phb);
+
+ /* Create EEH device for the OF node */
+ eeh_dev_init(np, pci->phb);
+ }
break;
default:
err = NOTIFY_DONE;
@@ -380,8 +383,12 @@ static void __init pSeries_setup_arch(void)
fwnmi_init();
+ /* By default, only probe PCI (can be overriden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
+
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
+ eeh_pseries_init();
find_and_init_phbs();
pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
eeh_init();
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index b84a8b2238dd..47226e04126d 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -24,6 +24,7 @@
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/rtas.h>
+#include <asm/topology.h>
static u64 stream_id;
static struct device suspend_dev;
@@ -138,8 +139,11 @@ static ssize_t store_hibernate(struct device *dev,
ssleep(1);
} while (rc == -EAGAIN);
- if (!rc)
+ if (!rc) {
+ stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
+ start_topology_update();
+ }
stream_id = 0;
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index 57d22a2f4ba9..79d2225b7608 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -25,6 +25,7 @@ config PPC_CHROMA
bool "PowerEN PCIe Chroma Card"
select EPAPR_BOOT
select PPC_WSP
+ select OF_DYNAMIC
default y
endmenu
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 576874392543..97fe82ee8633 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -346,7 +346,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
* For the moment only implement delivery to all cpus or one cpu.
* Get current irq_server for the given irq
*/
- ret = cache_hwirq_map(ics, d->irq, cpumask);
+ ret = cache_hwirq_map(ics, hw_irq, cpumask);
if (ret == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index 19f353dfcd03..cb565bf93650 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -30,7 +30,7 @@
static int opb_index = 0;
struct opb_pic {
- struct irq_host *host;
+ struct irq_domain *host;
void *regs;
int index;
spinlock_t lock;
@@ -179,7 +179,7 @@ static struct irq_chip opb_irq_chip = {
.irq_set_type = opb_set_irq_type
};
-static int opb_host_map(struct irq_host *host, unsigned int virq,
+static int opb_host_map(struct irq_domain *host, unsigned int virq,
irq_hw_number_t hwirq)
{
struct opb_pic *opb;
@@ -196,20 +196,9 @@ static int opb_host_map(struct irq_host *host, unsigned int virq,
return 0;
}
-static int opb_host_xlate(struct irq_host *host, struct device_node *dn,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
-{
- /* Interrupt size must == 2 */
- BUG_ON(intsize != 2);
- *out_hwirq = intspec[0];
- *out_type = intspec[1];
- return 0;
-}
-
-static struct irq_host_ops opb_host_ops = {
+static const struct irq_domain_ops opb_host_ops = {
.map = opb_host_map,
- .xlate = opb_host_xlate,
+ .xlate = irq_domain_xlate_twocell,
};
irqreturn_t opb_irq_handler(int irq, void *private)
@@ -263,13 +252,11 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
goto free_opb;
}
- /* Allocate an irq host so that Linux knows that despite only
+ /* Allocate an irq domain so that Linux knows that despite only
* having one interrupt to issue, we're the controller for multiple
* hardware IRQs, so later we can lookup their virtual IRQs. */
- opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR,
- OPB_NR_IRQS, &opb_host_ops, -1);
-
+ opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
if (!opb->host) {
printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
goto free_regs;
@@ -277,7 +264,6 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
opb->index = opb_index++;
spin_lock_init(&opb->lock);
- opb->host->host_data = opb;
/* Disable all interrupts by default */
opb_out(opb, OPB_MLSASIER, 0);
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
index 71bd105f3863..0ba103ae83a5 100644
--- a/arch/powerpc/platforms/wsp/smp.c
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -71,7 +71,7 @@ int __devinit smp_a2_kick_cpu(int nr)
static int __init smp_a2_probe(void)
{
- return cpus_weight(cpu_possible_map);
+ return num_possible_cpus();
}
static struct smp_ops_t a2_smp_ops = {
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index e0262cd0e2d3..763014cd1e62 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -468,15 +468,15 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
#define DUMP_REG(x) \
pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
-#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS
- /* WSP DD1 has a bogus class code by default in the PCI-E
- * root complex's built-in P2P bridge */
+ /*
+ * Some WSP variants has a bogus class code by default in the PCI-E
+ * root complex's built-in P2P bridge
+ */
val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
(val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
-#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
/* XXX Disable TCE caching, it doesn't work on DD1 */
@@ -682,7 +682,6 @@ static int __init wsp_setup_one_phb(struct device_node *np)
/* XXX Force re-assigning of everything for now */
pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
PCI_ENABLE_PROC_DOMAINS);
- pci_probe_only = 0;
/* Calculate how the TCE space is divided */
phb->dma32_base = 0;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 7b4df37ac381..a84fecf63c4d 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -29,3 +29,7 @@ config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
depends on PPC_SCOM
default n
+
+config GE_FPGA
+ bool
+ default n
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5e37b4717864..1bd7ecb24620 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,6 +4,8 @@ ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o
obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
+mpic-msgr-obj-$(CONFIG_MPIC_MSGR) += mpic_msgr.o
+obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) $(mpic-msgr-obj-y)
obj-$(CONFIG_PPC_EPAPR_HV_PIC) += ehv_pic.o
fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o
obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o
@@ -65,3 +67,5 @@ obj-$(CONFIG_PPC_SCOM) += scom.o
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PPC_XICS) += xics/
+
+obj-$(CONFIG_GE_FPGA) += ge/
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 5d7d59a43c4c..d4fa03f2b6ac 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -54,7 +54,7 @@ cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
immap_t __iomem *mpc8xx_immr;
static cpic8xx_t __iomem *cpic_reg;
-static struct irq_host *cpm_pic_host;
+static struct irq_domain *cpm_pic_host;
static void cpm_mask_irq(struct irq_data *d)
{
@@ -98,7 +98,7 @@ int cpm_get_irq(void)
return irq_linear_revmap(cpm_pic_host, cpm_vec);
}
-static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
+static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -123,7 +123,7 @@ static struct irqaction cpm_error_irqaction = {
.name = "error",
};
-static struct irq_host_ops cpm_pic_host_ops = {
+static const struct irq_domain_ops cpm_pic_host_ops = {
.map = cpm_pic_host_map,
};
@@ -164,8 +164,7 @@ unsigned int cpm_pic_init(void)
out_be32(&cpic_reg->cpic_cimr, 0);
- cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
- 64, &cpm_pic_host_ops, 64);
+ cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
if (cpm_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
sirq = NO_IRQ;
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index bcab50e2a9eb..d3be961e2ae7 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -50,7 +50,7 @@
static intctl_cpm2_t __iomem *cpm2_intctl;
-static struct irq_host *cpm2_pic_host;
+static struct irq_domain *cpm2_pic_host;
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
@@ -214,7 +214,7 @@ unsigned int cpm2_get_irq(void)
return irq_linear_revmap(cpm2_pic_host, irq);
}
-static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
+static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -224,21 +224,9 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_flags)
-{
- *out_hwirq = intspec[0];
- if (intsize > 1)
- *out_flags = intspec[1];
- else
- *out_flags = IRQ_TYPE_NONE;
- return 0;
-}
-
-static struct irq_host_ops cpm2_pic_host_ops = {
+static const struct irq_domain_ops cpm2_pic_host_ops = {
.map = cpm2_pic_host_map,
- .xlate = cpm2_pic_host_xlate,
+ .xlate = irq_domain_xlate_onetwocell,
};
void cpm2_pic_init(struct device_node *node)
@@ -275,8 +263,7 @@ void cpm2_pic_init(struct device_node *node)
out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
/* create a legacy host */
- cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
- 64, &cpm2_pic_host_ops, 64);
+ cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
if (cpm2_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
return;
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index b6731e4a6646..6e0e1005227f 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -182,13 +182,13 @@ unsigned int ehv_pic_get_irq(void)
return irq_linear_revmap(global_ehv_pic->irqhost, irq);
}
-static int ehv_pic_host_match(struct irq_host *h, struct device_node *node)
+static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
{
/* Exact match, unless ehv_pic node is NULL */
return h->of_node == NULL || h->of_node == node;
}
-static int ehv_pic_host_map(struct irq_host *h, unsigned int virq,
+static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ehv_pic *ehv_pic = h->host_data;
@@ -217,7 +217,7 @@ static int ehv_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct,
+static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -248,7 +248,7 @@ static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops ehv_pic_host_ops = {
+static const struct irq_domain_ops ehv_pic_host_ops = {
.match = ehv_pic_host_match,
.map = ehv_pic_host_map,
.xlate = ehv_pic_host_xlate,
@@ -275,9 +275,8 @@ void __init ehv_pic_init(void)
return;
}
- ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
- NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0);
-
+ ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
+ &ehv_pic_host_ops, ehv_pic);
if (!ehv_pic->irqhost) {
of_node_put(np);
kfree(ehv_pic);
@@ -293,7 +292,6 @@ void __init ehv_pic_init(void)
of_node_put(np2);
}
- ehv_pic->irqhost->host_data = ehv_pic;
ehv_pic->hc_irq = ehv_pic_irq_chip;
ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
ehv_pic->coreint_flag = coreint_flag;
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 116415899176..37a69097e022 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -24,6 +24,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of_platform.h>
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 5f88797dce73..cedabd0f4bfe 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -21,6 +21,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
#include <asm/io.h>
@@ -200,6 +201,9 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = {
{
.compatible = "fsl,p1022-l2-cache-controller",
},
+ {
+ .compatible = "fsl,mpc8548-l2-cache-controller",
+ },
{},
};
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index ecb5c1946d22..6e097de00e09 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -60,7 +60,7 @@ static struct irq_chip fsl_msi_chip = {
.name = "FSL-MSI",
};
-static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
+static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct fsl_msi *msi_data = h->host_data;
@@ -74,7 +74,7 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops fsl_msi_host_ops = {
+static const struct irq_domain_ops fsl_msi_host_ops = {
.map = fsl_msi_host_map,
};
@@ -387,8 +387,8 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
}
platform_set_drvdata(dev, msi);
- msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR,
- NR_MSI_IRQS, &fsl_msi_host_ops, 0);
+ msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
+ NR_MSI_IRQS, &fsl_msi_host_ops, msi);
if (msi->irqhost == NULL) {
dev_err(&dev->dev, "No memory for MSI irqhost\n");
@@ -410,6 +410,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
msi->msi_regs = ioremap(res.start, resource_size(&res));
if (!msi->msi_regs) {
+ err = -ENOMEM;
dev_err(&dev->dev, "could not map node %s\n",
dev->dev.of_node->full_name);
goto error_out;
@@ -420,8 +421,6 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
msi->feature = features->fsl_pic_ip;
- msi->irqhost->host_data = msi;
-
/*
* Remember the phandle, so that we can match with any PCI nodes
* that have an "fsl,msi" property.
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index f6c646a52541..8225f8653f78 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -26,7 +26,7 @@
#define FSL_PIC_IP_VMPIC 0x00000003
struct fsl_msi {
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
unsigned long cascade_irq;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 3b61e8cf3421..6073288fed29 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -205,12 +205,12 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
if (paddr_hi == paddr_lo) {
pr_err("%s: No outbound window space\n", name);
- return ;
+ goto out;
}
if (paddr_lo == 0) {
pr_err("%s: No space for inbound window\n", name);
- return ;
+ goto out;
}
/* setup PCSRBAR/PEXCSRBAR */
@@ -357,6 +357,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
(u64)hose->dma_window_size);
}
+out:
iounmap(pci);
}
@@ -384,26 +385,36 @@ static void __init setup_pci_cmd(struct pci_controller *hose)
void fsl_pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
- int i;
-
- if ((bus->parent == hose->bus) &&
- ((fsl_pcie_bus_fixup &&
- early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) ||
- (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)))
- {
- for (i = 0; i < 4; ++i) {
+ int i, is_pcie = 0, no_link;
+
+ /* The root complex bridge comes up with bogus resources,
+ * we copy the PHB ones in.
+ *
+ * With the current generic PCI code, the PHB bus no longer
+ * has bus->resource[0..4] set, so things are a bit more
+ * tricky.
+ */
+
+ if (fsl_pcie_bus_fixup)
+ is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
+ no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
+
+ if (bus->parent == hose->bus && (is_pcie || no_link)) {
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
struct resource *res = bus->resource[i];
- struct resource *par = bus->parent->resource[i];
- if (res) {
- res->start = 0;
- res->end = 0;
- res->flags = 0;
- }
- if (res && par) {
- res->start = par->start;
- res->end = par->end;
- res->flags = par->flags;
- }
+ struct resource *par;
+
+ if (!res)
+ continue;
+ if (i == 0)
+ par = &hose->io_resource;
+ else if (i < 4)
+ par = &hose->mem_resources[i-1];
+ else par = NULL;
+
+ res->start = par ? par->start : 0;
+ res->end = par ? par->end : 0;
+ res->flags = par ? par->flags : 0;
}
}
}
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index a4c4f4a932d8..5b6f556094dd 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -66,8 +66,8 @@
" li %0,%3\n" \
" b 2b\n" \
".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,3b\n" \
+ PPC_LONG_ALIGN "\n" \
+ PPC_LONG "1b,3b\n" \
".text" \
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 15485789e9db..14bd5221f28a 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -100,14 +100,8 @@
#define DOORBELL_DSR_TE 0x00000080
#define DOORBELL_DSR_QFI 0x00000010
#define DOORBELL_DSR_DIQI 0x00000001
-#define DOORBELL_TID_OFFSET 0x02
-#define DOORBELL_SID_OFFSET 0x04
-#define DOORBELL_INFO_OFFSET 0x06
#define DOORBELL_MESSAGE_SIZE 0x08
-#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
-#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
-#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
struct rio_msg_regs {
u32 omr;
@@ -193,6 +187,13 @@ struct fsl_rmu {
int rxirq;
};
+struct rio_dbell_msg {
+ u16 pad1;
+ u16 tid;
+ u16 sid;
+ u16 info;
+};
+
/**
* fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
* @irq: Linux interrupt number
@@ -311,8 +312,8 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
/* XXX Need to check/dispatch until queue empty */
if (dsr & DOORBELL_DSR_DIQI) {
- u32 dmsg =
- (u32) fsl_dbell->dbell_ring.virt +
+ struct rio_dbell_msg *dmsg =
+ fsl_dbell->dbell_ring.virt +
(in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
struct rio_dbell *dbell;
int found = 0;
@@ -320,25 +321,25 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
pr_debug
("RIO: processing doorbell,"
" sid %2.2x tid %2.2x info %4.4x\n",
- DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+ dmsg->sid, dmsg->tid, dmsg->info);
for (i = 0; i < MAX_PORT_NUM; i++) {
if (fsl_dbell->mport[i]) {
list_for_each_entry(dbell,
&fsl_dbell->mport[i]->dbells, node) {
if ((dbell->res->start
- <= DBELL_INF(dmsg))
+ <= dmsg->info)
&& (dbell->res->end
- >= DBELL_INF(dmsg))) {
+ >= dmsg->info)) {
found = 1;
break;
}
}
if (found && dbell->dinb) {
dbell->dinb(fsl_dbell->mport[i],
- dbell->dev_id, DBELL_SID(dmsg),
- DBELL_TID(dmsg),
- DBELL_INF(dmsg));
+ dbell->dev_id, dmsg->sid,
+ dmsg->tid,
+ dmsg->info);
break;
}
}
@@ -348,8 +349,8 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
pr_debug
("RIO: spurious doorbell,"
" sid %2.2x tid %2.2x info %4.4x\n",
- DBELL_SID(dmsg), DBELL_TID(dmsg),
- DBELL_INF(dmsg));
+ dmsg->sid, dmsg->tid,
+ dmsg->info);
}
setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
@@ -657,7 +658,7 @@ fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
int ret = 0;
pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
- "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len);
+ "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
ret = -EINVAL;
goto out;
@@ -972,7 +973,8 @@ out:
void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
{
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
- u32 phys_buf, virt_buf;
+ u32 phys_buf;
+ void *virt_buf;
void *buf = NULL;
int buf_idx;
@@ -982,7 +984,7 @@ void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
goto out2;
- virt_buf = (u32) rmu->msg_rx_ring.virt + (phys_buf
+ virt_buf = rmu->msg_rx_ring.virt + (phys_buf
- rmu->msg_rx_ring.phys);
buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
@@ -994,7 +996,7 @@ void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
}
/* Copy max message size, caller is expected to allocate that big */
- memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
+ memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
/* Clear the available buffer */
rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
diff --git a/arch/powerpc/sysdev/ge/Makefile b/arch/powerpc/sysdev/ge/Makefile
new file mode 100644
index 000000000000..8731ffcb79b9
--- /dev/null
+++ b/arch/powerpc/sysdev/ge/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_GE_FPGA) += ge_pic.o
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index 94594e58594c..2bcb78bb3a15 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -22,7 +22,7 @@
#include <asm/prom.h>
#include <asm/irq.h>
-#include "gef_pic.h"
+#include "ge_pic.h"
#define DEBUG
#undef DEBUG
@@ -50,7 +50,7 @@
static DEFINE_RAW_SPINLOCK(gef_pic_lock);
static void __iomem *gef_pic_irq_reg_base;
-static struct irq_host *gef_pic_irq_host;
+static struct irq_domain *gef_pic_irq_host;
static int gef_pic_cascade_irq;
/*
@@ -153,7 +153,7 @@ static struct irq_chip gef_pic_chip = {
/* When an interrupt is being configured, this call allows some flexibilty
* in deciding which irq_chip structure is used
*/
-static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
+static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
@@ -163,7 +163,7 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
+static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -177,7 +177,7 @@ static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops gef_pic_host_ops = {
+static const struct irq_domain_ops gef_pic_host_ops = {
.map = gef_pic_host_map,
.xlate = gef_pic_host_xlate,
};
@@ -211,10 +211,9 @@ void __init gef_pic_init(struct device_node *np)
return;
}
- /* Setup an irq_host structure */
- gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
- GEF_PIC_NUM_IRQS,
- &gef_pic_host_ops, NO_IRQ);
+ /* Setup an irq_domain structure */
+ gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
+ &gef_pic_host_ops, NULL);
if (gef_pic_irq_host == NULL)
return;
diff --git a/arch/powerpc/platforms/86xx/gef_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 6149916da3f4..6149916da3f4 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index d18bb27e4df9..997df6a7ab5d 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -25,7 +25,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
static DEFINE_RAW_SPINLOCK(i8259_lock);
-static struct irq_host *i8259_host;
+static struct irq_domain *i8259_host;
/*
* Acknowledge the IRQ using either the PCI host bridge's interrupt
@@ -163,12 +163,12 @@ static struct resource pic_edgectrl_iores = {
.flags = IORESOURCE_BUSY,
};
-static int i8259_host_match(struct irq_host *h, struct device_node *node)
+static int i8259_host_match(struct irq_domain *h, struct device_node *node)
{
return h->of_node == NULL || h->of_node == node;
}
-static int i8259_host_map(struct irq_host *h, unsigned int virq,
+static int i8259_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
@@ -185,7 +185,7 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
+static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -205,13 +205,13 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops i8259_host_ops = {
+static struct irq_domain_ops i8259_host_ops = {
.match = i8259_host_match,
.map = i8259_host_map,
.xlate = i8259_host_xlate,
};
-struct irq_host *i8259_get_host(void)
+struct irq_domain *i8259_get_host(void)
{
return i8259_host;
}
@@ -263,8 +263,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
- i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY,
- 0, &i8259_host_ops, 0);
+ i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 95da897f05a7..b50f97811c25 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -672,13 +672,13 @@ static struct irq_chip ipic_edge_irq_chip = {
.irq_set_type = ipic_set_irq_type,
};
-static int ipic_host_match(struct irq_host *h, struct device_node *node)
+static int ipic_host_match(struct irq_domain *h, struct device_node *node)
{
/* Exact match, unless ipic node is NULL */
return h->of_node == NULL || h->of_node == node;
}
-static int ipic_host_map(struct irq_host *h, unsigned int virq,
+static int ipic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ipic *ipic = h->host_data;
@@ -692,26 +692,10 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_flags)
-
-{
- /* interrupt sense values coming from the device tree equal either
- * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
- */
- *out_hwirq = intspec[0];
- if (intsize > 1)
- *out_flags = intspec[1];
- else
- *out_flags = IRQ_TYPE_NONE;
- return 0;
-}
-
-static struct irq_host_ops ipic_host_ops = {
+static struct irq_domain_ops ipic_host_ops = {
.match = ipic_host_match,
.map = ipic_host_map,
- .xlate = ipic_host_xlate,
+ .xlate = irq_domain_xlate_onetwocell,
};
struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
@@ -728,9 +712,8 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
if (ipic == NULL)
return NULL;
- ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
- NR_IPIC_INTS,
- &ipic_host_ops, 0);
+ ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
+ &ipic_host_ops, ipic);
if (ipic->irqhost == NULL) {
kfree(ipic);
return NULL;
@@ -738,8 +721,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
ipic->regs = ioremap(res.start, resource_size(&res));
- ipic->irqhost->host_data = ipic;
-
/* init hw */
ipic_write(ipic->regs, IPIC_SICNR, 0x0);
diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h
index 9391c57b0c51..90031d1282e1 100644
--- a/arch/powerpc/sysdev/ipic.h
+++ b/arch/powerpc/sysdev/ipic.h
@@ -43,7 +43,7 @@ struct ipic {
volatile u32 __iomem *regs;
/* The remapper for this IPIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
};
struct ipic_info {
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 2ca0a85fcce9..d5f5416be310 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -17,7 +17,7 @@
extern int cpm_get_irq(struct pt_regs *regs);
-static struct irq_host *mpc8xx_pic_host;
+static struct irq_domain *mpc8xx_pic_host;
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
static sysconf8xx_t __iomem *siu_reg;
@@ -110,7 +110,7 @@ unsigned int mpc8xx_get_irq(void)
}
-static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
+static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -121,7 +121,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
}
-static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
+static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -142,7 +142,7 @@ static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
}
-static struct irq_host_ops mpc8xx_pic_host_ops = {
+static struct irq_domain_ops mpc8xx_pic_host_ops = {
.map = mpc8xx_pic_host_map,
.xlate = mpc8xx_pic_host_xlate,
};
@@ -171,8 +171,7 @@ int mpc8xx_pic_init(void)
goto out;
}
- mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
- 64, &mpc8xx_pic_host_ops, 64);
+ mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
if (mpc8xx_pic_host == NULL) {
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
ret = -ENOMEM;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4e9ccb1015de..9ac71ebd2c40 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -873,7 +873,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
mpic, d->irq, src, flow_type);
- if (src >= mpic->irq_count)
+ if (src >= mpic->num_sources)
return -EINVAL;
if (flow_type == IRQ_TYPE_NONE)
@@ -909,7 +909,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
mpic, virq, src, vector);
- if (src >= mpic->irq_count)
+ if (src >= mpic->num_sources)
return;
vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
@@ -926,7 +926,7 @@ void mpic_set_destination(unsigned int virq, unsigned int cpuid)
DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
mpic, virq, src, cpuid);
- if (src >= mpic->irq_count)
+ if (src >= mpic->num_sources)
return;
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
@@ -965,13 +965,13 @@ static struct irq_chip mpic_irq_ht_chip = {
#endif /* CONFIG_MPIC_U3_HT_IRQS */
-static int mpic_host_match(struct irq_host *h, struct device_node *node)
+static int mpic_host_match(struct irq_domain *h, struct device_node *node)
{
/* Exact match, unless mpic node is NULL */
return h->of_node == NULL || h->of_node == node;
}
-static int mpic_host_map(struct irq_host *h, unsigned int virq,
+static int mpic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct mpic *mpic = h->host_data;
@@ -1006,7 +1006,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
- if (hw >= mpic->irq_count)
+ if (hw >= mpic->num_sources)
return -EINVAL;
mpic_msi_reserve_hwirq(mpic, hw);
@@ -1041,7 +1041,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
+static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -1121,13 +1121,13 @@ static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
BUG_ON(!(mpic->flags & MPIC_SECONDARY));
virq = mpic_get_one_irq(mpic);
- if (virq != NO_IRQ)
+ if (virq)
generic_handle_irq(virq);
chip->irq_eoi(&desc->irq_data);
}
-static struct irq_host_ops mpic_host_ops = {
+static struct irq_domain_ops mpic_host_ops = {
.match = mpic_host_match,
.map = mpic_host_map,
.xlate = mpic_host_xlate,
@@ -1149,6 +1149,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
u32 greg_feature;
const char *vers;
const u32 *psrc;
+ u32 last_irq;
/* Default MPIC search parameters */
static const struct of_device_id __initconst mpic_device_id[] = {
@@ -1182,6 +1183,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
}
+ /* Read extra device-tree properties into the flags variable */
+ if (of_get_property(node, "big-endian", NULL))
+ flags |= MPIC_BIG_ENDIAN;
+ if (of_get_property(node, "pic-no-reset", NULL))
+ flags |= MPIC_NO_RESET;
+ if (of_get_property(node, "single-cpu-affinity", NULL))
+ flags |= MPIC_SINGLE_DEST_CPU;
+ if (of_device_is_compatible(node, "fsl,mpic"))
+ flags |= MPIC_FSL;
+
mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
goto err_of_node_put;
@@ -1189,15 +1200,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->name = name;
mpic->node = node;
mpic->paddr = phys_addr;
+ mpic->flags = flags;
mpic->hc_irq = mpic_irq_chip;
mpic->hc_irq.name = name;
- if (!(flags & MPIC_SECONDARY))
+ if (!(mpic->flags & MPIC_SECONDARY))
mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
#ifdef CONFIG_MPIC_U3_HT_IRQS
mpic->hc_ht_irq = mpic_irq_ht_chip;
mpic->hc_ht_irq.name = name;
- if (!(flags & MPIC_SECONDARY))
+ if (!(mpic->flags & MPIC_SECONDARY))
mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
@@ -1209,12 +1221,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->hc_tm = mpic_tm_chip;
mpic->hc_tm.name = name;
- mpic->flags = flags;
- mpic->isu_size = isu_size;
- mpic->irq_count = irq_count;
mpic->num_sources = 0; /* so far */
- if (flags & MPIC_LARGE_VECTORS)
+ if (mpic->flags & MPIC_LARGE_VECTORS)
intvec_top = 2047;
else
intvec_top = 255;
@@ -1233,12 +1242,6 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->ipi_vecs[3] = intvec_top - 1;
mpic->spurious_vec = intvec_top;
- /* Check for "big-endian" in device-tree */
- if (of_get_property(mpic->node, "big-endian", NULL) != NULL)
- mpic->flags |= MPIC_BIG_ENDIAN;
- if (of_device_is_compatible(mpic->node, "fsl,mpic"))
- mpic->flags |= MPIC_FSL;
-
/* Look for protected sources */
psrc = of_get_property(mpic->node, "protected-sources", &psize);
if (psrc) {
@@ -1254,11 +1257,11 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
#ifdef CONFIG_MPIC_WEIRD
- mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
+ mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)];
#endif
/* default register type */
- if (flags & MPIC_BIG_ENDIAN)
+ if (mpic->flags & MPIC_BIG_ENDIAN)
mpic->reg_type = mpic_access_mmio_be;
else
mpic->reg_type = mpic_access_mmio_le;
@@ -1268,10 +1271,10 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* only if the kernel includes DCR support.
*/
#ifdef CONFIG_PPC_DCR
- if (flags & MPIC_USES_DCR)
+ if (mpic->flags & MPIC_USES_DCR)
mpic->reg_type = mpic_access_dcr;
#else
- BUG_ON(flags & MPIC_USES_DCR);
+ BUG_ON(mpic->flags & MPIC_USES_DCR);
#endif
/* Map the global registers */
@@ -1283,10 +1286,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* When using a device-node, reset requests are only honored if the MPIC
* is allowed to reset.
*/
- if (of_get_property(mpic->node, "pic-no-reset", NULL))
- mpic->flags |= MPIC_NO_RESET;
-
- if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) {
+ if (!(mpic->flags & MPIC_NO_RESET)) {
printk(KERN_DEBUG "mpic: Resetting\n");
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
@@ -1297,31 +1297,17 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
/* CoreInt */
- if (flags & MPIC_ENABLE_COREINT)
+ if (mpic->flags & MPIC_ENABLE_COREINT)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_COREINT);
- if (flags & MPIC_ENABLE_MCK)
+ if (mpic->flags & MPIC_ENABLE_MCK)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_MCK);
/*
- * Read feature register. For non-ISU MPICs, num sources as well. On
- * ISU MPICs, sources are counted as ISUs are added
- */
- greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
- if (isu_size == 0) {
- if (flags & MPIC_BROKEN_FRR_NIRQS)
- mpic->num_sources = mpic->irq_count;
- else
- mpic->num_sources =
- ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
- >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
- }
-
- /*
* The MPIC driver will crash if there are more cores than we
* can initialize, so we may as well catch that problem here.
*/
@@ -1336,19 +1322,42 @@ struct mpic * __init mpic_alloc(struct device_node *node,
0x1000);
}
+ /*
+ * Read feature register. For non-ISU MPICs, num sources as well. On
+ * ISU MPICs, sources are counted as ISUs are added
+ */
+ greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
+
+ /*
+ * By default, the last source number comes from the MPIC, but the
+ * device-tree and board support code can override it on buggy hw.
+ * If we get passed an isu_size (multi-isu MPIC) then we use that
+ * as a default instead of the value read from the HW.
+ */
+ last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
+ >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
+ if (isu_size)
+ last_irq = isu_size * MPIC_MAX_ISU - 1;
+ of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
+ if (irq_count)
+ last_irq = irq_count - 1;
+
/* Initialize main ISU if none provided */
- if (mpic->isu_size == 0) {
- mpic->isu_size = mpic->num_sources;
+ if (!isu_size) {
+ isu_size = last_irq + 1;
+ mpic->num_sources = isu_size;
mpic_map(mpic, mpic->paddr, &mpic->isus[0],
- MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+ MPIC_INFO(IRQ_BASE),
+ MPIC_INFO(IRQ_STRIDE) * isu_size);
}
+
+ mpic->isu_size = isu_size;
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
- mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR,
- isu_size ? isu_size : mpic->num_sources,
- &mpic_host_ops,
- flags & MPIC_LARGE_VECTORS ? 2048 : 256);
+ mpic->irqhost = irq_domain_add_linear(mpic->node,
+ last_irq + 1,
+ &mpic_host_ops, mpic);
/*
* FIXME: The code leaks the MPIC object and mappings here; this
@@ -1357,8 +1366,6 @@ struct mpic * __init mpic_alloc(struct device_node *node,
if (mpic->irqhost == NULL)
return NULL;
- mpic->irqhost->host_data = mpic;
-
/* Display version */
switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
case 1:
@@ -1383,7 +1390,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->next = mpics;
mpics = mpic;
- if (!(flags & MPIC_SECONDARY)) {
+ if (!(mpic->flags & MPIC_SECONDARY)) {
mpic_primary = mpic;
irq_set_default_host(mpic->irqhost);
}
@@ -1450,10 +1457,6 @@ void __init mpic_init(struct mpic *mpic)
(mpic->ipi_vecs[0] + i));
}
- /* Initialize interrupt sources */
- if (mpic->irq_count == 0)
- mpic->irq_count = mpic->num_sources;
-
/* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
new file mode 100644
index 000000000000..6e7fa386e76a
--- /dev/null
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
+ *
+ * Some ideas based on un-pushed work done by Vivek Mahajan, Jason Jin, and
+ * Mingkai Hu from Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/of_platform.h>
+#include <linux/errno.h>
+#include <asm/prom.h>
+#include <asm/hw_irq.h>
+#include <asm/ppc-pci.h>
+#include <asm/mpic_msgr.h>
+
+#define MPIC_MSGR_REGISTERS_PER_BLOCK 4
+#define MPIC_MSGR_STRIDE 0x10
+#define MPIC_MSGR_MER_OFFSET 0x100
+#define MSGR_INUSE 0
+#define MSGR_FREE 1
+
+static struct mpic_msgr **mpic_msgrs;
+static unsigned int mpic_msgr_count;
+
+static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value)
+{
+ out_be32(msgr->mer, value);
+}
+
+static inline u32 _mpic_msgr_mer_read(struct mpic_msgr *msgr)
+{
+ return in_be32(msgr->mer);
+}
+
+static inline void _mpic_msgr_disable(struct mpic_msgr *msgr)
+{
+ u32 mer = _mpic_msgr_mer_read(msgr);
+
+ _mpic_msgr_mer_write(msgr, mer & ~(1 << msgr->num));
+}
+
+struct mpic_msgr *mpic_msgr_get(unsigned int reg_num)
+{
+ unsigned long flags;
+ struct mpic_msgr *msgr;
+
+ /* Assume busy until proven otherwise. */
+ msgr = ERR_PTR(-EBUSY);
+
+ if (reg_num >= mpic_msgr_count)
+ return ERR_PTR(-ENODEV);
+
+ raw_spin_lock_irqsave(&msgr->lock, flags);
+ if (mpic_msgrs[reg_num]->in_use == MSGR_FREE) {
+ msgr = mpic_msgrs[reg_num];
+ msgr->in_use = MSGR_INUSE;
+ }
+ raw_spin_unlock_irqrestore(&msgr->lock, flags);
+
+ return msgr;
+}
+EXPORT_SYMBOL_GPL(mpic_msgr_get);
+
+void mpic_msgr_put(struct mpic_msgr *msgr)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msgr->lock, flags);
+ msgr->in_use = MSGR_FREE;
+ _mpic_msgr_disable(msgr);
+ raw_spin_unlock_irqrestore(&msgr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mpic_msgr_put);
+
+void mpic_msgr_enable(struct mpic_msgr *msgr)
+{
+ unsigned long flags;
+ u32 mer;
+
+ raw_spin_lock_irqsave(&msgr->lock, flags);
+ mer = _mpic_msgr_mer_read(msgr);
+ _mpic_msgr_mer_write(msgr, mer | (1 << msgr->num));
+ raw_spin_unlock_irqrestore(&msgr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mpic_msgr_enable);
+
+void mpic_msgr_disable(struct mpic_msgr *msgr)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&msgr->lock, flags);
+ _mpic_msgr_disable(msgr);
+ raw_spin_unlock_irqrestore(&msgr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mpic_msgr_disable);
+
+/* The following three functions are used to compute the order and number of
+ * the message register blocks. They are clearly very inefficent. However,
+ * they are called *only* a few times during device initialization.
+ */
+static unsigned int mpic_msgr_number_of_blocks(void)
+{
+ unsigned int count;
+ struct device_node *aliases;
+
+ count = 0;
+ aliases = of_find_node_by_name(NULL, "aliases");
+
+ if (aliases) {
+ char buf[32];
+
+ for (;;) {
+ snprintf(buf, sizeof(buf), "mpic-msgr-block%d", count);
+ if (!of_find_property(aliases, buf, NULL))
+ break;
+
+ count += 1;
+ }
+ }
+
+ return count;
+}
+
+static unsigned int mpic_msgr_number_of_registers(void)
+{
+ return mpic_msgr_number_of_blocks() * MPIC_MSGR_REGISTERS_PER_BLOCK;
+}
+
+static int mpic_msgr_block_number(struct device_node *node)
+{
+ struct device_node *aliases;
+ unsigned int index, number_of_blocks;
+ char buf[64];
+
+ number_of_blocks = mpic_msgr_number_of_blocks();
+ aliases = of_find_node_by_name(NULL, "aliases");
+ if (!aliases)
+ return -1;
+
+ for (index = 0; index < number_of_blocks; ++index) {
+ struct property *prop;
+
+ snprintf(buf, sizeof(buf), "mpic-msgr-block%d", index);
+ prop = of_find_property(aliases, buf, NULL);
+ if (node == of_find_node_by_path(prop->value))
+ break;
+ }
+
+ return index == number_of_blocks ? -1 : index;
+}
+
+/* The probe function for a single message register block.
+ */
+static __devinit int mpic_msgr_probe(struct platform_device *dev)
+{
+ void __iomem *msgr_block_addr;
+ int block_number;
+ struct resource rsrc;
+ unsigned int i;
+ unsigned int irq_index;
+ struct device_node *np = dev->dev.of_node;
+ unsigned int receive_mask;
+ const unsigned int *prop;
+
+ if (!np) {
+ dev_err(&dev->dev, "Device OF-Node is NULL");
+ return -EFAULT;
+ }
+
+ /* Allocate the message register array upon the first device
+ * registered.
+ */
+ if (!mpic_msgrs) {
+ mpic_msgr_count = mpic_msgr_number_of_registers();
+ dev_info(&dev->dev, "Found %d message registers\n",
+ mpic_msgr_count);
+
+ mpic_msgrs = kzalloc(sizeof(struct mpic_msgr) * mpic_msgr_count,
+ GFP_KERNEL);
+ if (!mpic_msgrs) {
+ dev_err(&dev->dev,
+ "No memory for message register blocks\n");
+ return -ENOMEM;
+ }
+ }
+ dev_info(&dev->dev, "Of-device full name %s\n", np->full_name);
+
+ /* IO map the message register block. */
+ of_address_to_resource(np, 0, &rsrc);
+ msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
+ if (!msgr_block_addr) {
+ dev_err(&dev->dev, "Failed to iomap MPIC message registers");
+ return -EFAULT;
+ }
+
+ /* Ensure the block has a defined order. */
+ block_number = mpic_msgr_block_number(np);
+ if (block_number < 0) {
+ dev_err(&dev->dev,
+ "Failed to find message register block alias\n");
+ return -ENODEV;
+ }
+ dev_info(&dev->dev, "Setting up message register block %d\n",
+ block_number);
+
+ /* Grab the receive mask which specifies what registers can receive
+ * interrupts.
+ */
+ prop = of_get_property(np, "mpic-msgr-receive-mask", NULL);
+ receive_mask = (prop) ? *prop : 0xF;
+
+ /* Build up the appropriate message register data structures. */
+ for (i = 0, irq_index = 0; i < MPIC_MSGR_REGISTERS_PER_BLOCK; ++i) {
+ struct mpic_msgr *msgr;
+ unsigned int reg_number;
+
+ msgr = kzalloc(sizeof(struct mpic_msgr), GFP_KERNEL);
+ if (!msgr) {
+ dev_err(&dev->dev, "No memory for message register\n");
+ return -ENOMEM;
+ }
+
+ reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
+ msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
+ msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET;
+ msgr->in_use = MSGR_FREE;
+ msgr->num = i;
+ raw_spin_lock_init(&msgr->lock);
+
+ if (receive_mask & (1 << i)) {
+ struct resource irq;
+
+ if (of_irq_to_resource(np, irq_index, &irq) == NO_IRQ) {
+ dev_err(&dev->dev,
+ "Missing interrupt specifier");
+ kfree(msgr);
+ return -EFAULT;
+ }
+ msgr->irq = irq.start;
+ irq_index += 1;
+ } else {
+ msgr->irq = NO_IRQ;
+ }
+
+ mpic_msgrs[reg_number] = msgr;
+ mpic_msgr_disable(msgr);
+ dev_info(&dev->dev, "Register %d initialized: irq %d\n",
+ reg_number, msgr->irq);
+
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mpic_msgr_ids[] = {
+ {
+ .compatible = "fsl,mpic-v3.1-msgr",
+ .data = NULL,
+ },
+ {}
+};
+
+static struct platform_driver mpic_msgr_driver = {
+ .driver = {
+ .name = "mpic-msgr",
+ .owner = THIS_MODULE,
+ .of_match_table = mpic_msgr_ids,
+ },
+ .probe = mpic_msgr_probe,
+};
+
+static __init int mpic_msgr_init(void)
+{
+ return platform_driver_register(&mpic_msgr_driver);
+}
+subsys_initcall(mpic_msgr_init);
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 0f67cd79d481..bbf342c88314 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -32,7 +32,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
irq_hw_number_t hwirq;
- struct irq_host_ops *ops = mpic->irqhost->ops;
+ const struct irq_domain_ops *ops = mpic->irqhost->ops;
struct device_node *np;
int flags, index, i;
struct of_irq oirq;
@@ -54,7 +54,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
for (i = 100; i < 105; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
- for (i = 124; i < mpic->irq_count; i++)
+ for (i = 124; i < mpic->num_sources; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
@@ -83,7 +83,7 @@ int mpic_msi_init_allocator(struct mpic *mpic)
{
int rc;
- rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->irq_count,
+ rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources,
mpic->irqhost->of_node);
if (rc)
return rc;
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 14d130268e7a..8848e99a83f2 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -70,7 +70,7 @@ static u32 mv64x60_cached_low_mask;
static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
static u32 mv64x60_cached_gpp_mask;
-static struct irq_host *mv64x60_irq_host;
+static struct irq_domain *mv64x60_irq_host;
/*
* mv64x60_chip_low functions
@@ -208,7 +208,7 @@ static struct irq_chip *mv64x60_chips[] = {
[MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
};
-static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
+static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
int level1;
@@ -223,7 +223,7 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops mv64x60_host_ops = {
+static struct irq_domain_ops mv64x60_host_ops = {
.map = mv64x60_host_map,
};
@@ -250,9 +250,8 @@ void __init mv64x60_init_irq(void)
paddr = of_translate_address(np, reg);
mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
- mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
- MV64x60_NUM_IRQS,
- &mv64x60_host_ops, MV64x60_NUM_IRQS);
+ mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
+ &mv64x60_host_ops, NULL);
spin_lock_irqsave(&mv64x60_lock, flags);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 4f05f7542346..56e8b3c3c890 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -1050,6 +1050,74 @@ static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
.check_link = ppc4xx_pciex_check_link_sdr,
};
+static int __init apm821xx_pciex_core_init(struct device_node *np)
+{
+ /* Return the number of pcie port */
+ return 1;
+}
+
+static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+{
+ u32 val;
+
+ /*
+ * Do a software reset on PCIe ports.
+ * This code is to fix the issue that pci drivers doesn't re-assign
+ * bus number for PCIE devices after Uboot
+ * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
+ * PT quad port, SAS LSI 1064E)
+ */
+
+ mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
+ mdelay(10);
+
+ if (port->endpoint)
+ val = PTYPE_LEGACY_ENDPOINT << 20;
+ else
+ val = PTYPE_ROOT_PORT << 20;
+
+ val |= LNKW_X1 << 12;
+
+ mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
+ mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
+ mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
+
+ mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
+ mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
+ mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
+
+ mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
+ mdelay(50);
+ mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
+
+ mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
+ mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
+ (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
+
+ /* Poll for PHY reset */
+ val = PESDR0_460EX_RSTSTA - port->sdr_base;
+ if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
+ printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
+ return -EBUSY;
+ } else {
+ mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
+ (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
+ ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
+ PESDRx_RCSSET_RSTPYN);
+
+ port->has_ibpre = 1;
+ return 0;
+ }
+}
+
+static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
+ .want_sdr = true,
+ .core_init = apm821xx_pciex_core_init,
+ .port_init_hw = apm821xx_pciex_init_port_hw,
+ .setup_utl = ppc460ex_pciex_init_utl,
+ .check_link = ppc4xx_pciex_check_link_sdr,
+};
+
static int __init ppc460sx_pciex_core_init(struct device_node *np)
{
/* HSS drive amplitude */
@@ -1362,6 +1430,8 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
+ if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
+ ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 73034bd203c4..2fba6ef2f95e 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -245,13 +245,13 @@ static struct irq_chip qe_ic_irq_chip = {
.irq_mask_ack = qe_ic_mask_irq,
};
-static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
{
/* Exact match, unless qe_ic node is NULL */
return h->of_node == NULL || h->of_node == node;
}
-static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct qe_ic *qe_ic = h->host_data;
@@ -272,23 +272,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
- const u32 * intspec, unsigned int intsize,
- irq_hw_number_t * out_hwirq,
- unsigned int *out_flags)
-{
- *out_hwirq = intspec[0];
- if (intsize > 1)
- *out_flags = intspec[1];
- else
- *out_flags = IRQ_TYPE_NONE;
- return 0;
-}
-
-static struct irq_host_ops qe_ic_host_ops = {
+static struct irq_domain_ops qe_ic_host_ops = {
.match = qe_ic_host_match,
.map = qe_ic_host_map,
- .xlate = qe_ic_host_xlate,
+ .xlate = irq_domain_xlate_onetwocell,
};
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
@@ -339,8 +326,8 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
if (qe_ic == NULL)
return;
- qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
- NR_QE_IC_INTS, &qe_ic_host_ops, 0);
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) {
kfree(qe_ic);
return;
@@ -348,7 +335,6 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
qe_ic->regs = ioremap(res.start, resource_size(&res));
- qe_ic->irqhost->host_data = qe_ic;
qe_ic->hc_irq = qe_ic_irq_chip;
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
index c1361d005a8a..c327872ed35c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -79,7 +79,7 @@ struct qe_ic {
volatile u32 __iomem *regs;
/* The remapper for this QEIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 4d18658116e5..188012c58f7f 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -51,7 +51,7 @@
u32 tsi108_pci_cfg_base;
static u32 tsi108_pci_cfg_phys;
u32 tsi108_csr_vir_base;
-static struct irq_host *pci_irq_host;
+static struct irq_domain *pci_irq_host;
extern u32 get_vir_csrbase(void);
extern u32 tsi108_read_reg(u32 reg_offset);
@@ -376,7 +376,7 @@ static struct irq_chip tsi108_pci_irq = {
.irq_unmask = tsi108_pci_irq_unmask,
};
-static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
+static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -385,7 +385,7 @@ static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
+static int pci_irq_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{ unsigned int irq;
DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
@@ -397,7 +397,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops pci_irq_host_ops = {
+static struct irq_domain_ops pci_irq_domain_ops = {
.map = pci_irq_host_map,
.xlate = pci_irq_host_xlate,
};
@@ -419,10 +419,9 @@ void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY,
- 0, &pci_irq_host_ops, 0);
+ pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
- printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n");
+ printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
}
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 063c901b1265..92033936a8f7 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -49,7 +49,7 @@ struct uic {
raw_spinlock_t lock;
/* The remapper for this UIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
};
static void uic_unmask_irq(struct irq_data *d)
@@ -174,7 +174,7 @@ static struct irq_chip uic_irq_chip = {
.irq_set_type = uic_set_irq_type,
};
-static int uic_host_map(struct irq_host *h, unsigned int virq,
+static int uic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct uic *uic = h->host_data;
@@ -190,21 +190,9 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static int uic_host_xlate(struct irq_host *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
-
-{
- /* UIC intspecs must have 2 cells */
- BUG_ON(intsize != 2);
- *out_hwirq = intspec[0];
- *out_type = intspec[1];
- return 0;
-}
-
-static struct irq_host_ops uic_host_ops = {
+static struct irq_domain_ops uic_host_ops = {
.map = uic_host_map,
- .xlate = uic_host_xlate,
+ .xlate = irq_domain_xlate_twocell,
};
void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
@@ -270,13 +258,11 @@ static struct uic * __init uic_init_one(struct device_node *node)
}
uic->dcrbase = *dcrreg;
- uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR,
- NR_UIC_INTS, &uic_host_ops, -1);
+ uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
+ uic);
if (! uic->irqhost)
return NULL; /* FIXME: panic? */
- uic->irqhost->host_data = uic;
-
/* Start with all interrupts disabled, level and non-critical */
mtdcr(uic->dcrbase + UIC_ER, 0);
mtdcr(uic->dcrbase + UIC_CR, 0);
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index d72eda6a4c05..ea5e204e3450 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -40,7 +40,7 @@ unsigned int xics_interrupt_server_size = 8;
DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
-struct irq_host *xics_host;
+struct irq_domain *xics_host;
static LIST_HEAD(ics_list);
@@ -212,16 +212,16 @@ void xics_migrate_irqs_away(void)
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
- if (!virq_is_host(virq, xics_host))
- continue;
- irq = (unsigned int)virq_to_hw(virq);
- /* We need to get IPIs still. */
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
- continue;
desc = irq_to_desc(virq);
/* We only need to migrate enabled IRQS */
if (!desc || !desc->action)
continue;
+ if (desc->irq_data.domain != xics_host)
+ continue;
+ irq = desc->irq_data.hwirq;
+ /* We need to get IPIs still. */
+ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ continue;
chip = irq_desc_get_chip(desc);
if (!chip || !chip->irq_set_affinity)
continue;
@@ -301,7 +301,7 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
}
#endif /* CONFIG_SMP */
-static int xics_host_match(struct irq_host *h, struct device_node *node)
+static int xics_host_match(struct irq_domain *h, struct device_node *node)
{
struct ics *ics;
@@ -323,7 +323,7 @@ static struct irq_chip xics_ipi_chip = {
.irq_unmask = xics_ipi_unmask,
};
-static int xics_host_map(struct irq_host *h, unsigned int virq,
+static int xics_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ics *ics;
@@ -351,7 +351,7 @@ static int xics_host_map(struct irq_host *h, unsigned int virq,
return -EINVAL;
}
-static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
+static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
@@ -366,7 +366,7 @@ static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static struct irq_host_ops xics_host_ops = {
+static struct irq_domain_ops xics_host_ops = {
.match = xics_host_match,
.map = xics_host_map,
.xlate = xics_host_xlate,
@@ -374,8 +374,7 @@ static struct irq_host_ops xics_host_ops = {
static void __init xics_init_host(void)
{
- xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
- XICS_IRQ_SPURIOUS);
+ xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
BUG_ON(xics_host == NULL);
irq_set_default_host(xics_host);
}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 6183799754af..8d73c3c0bee6 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -40,7 +40,7 @@
#define XINTC_IVR 24 /* Interrupt Vector */
#define XINTC_MER 28 /* Master Enable */
-static struct irq_host *master_irqhost;
+static struct irq_domain *master_irqhost;
#define XILINX_INTC_MAXIRQS (32)
@@ -141,7 +141,7 @@ static struct irq_chip xilinx_intc_edge_irqchip = {
/**
* xilinx_intc_xlate - translate virq# from device tree interrupts property
*/
-static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
+static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
@@ -161,7 +161,7 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
+static int xilinx_intc_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t irq)
{
irq_set_chip_data(virq, h->host_data);
@@ -177,15 +177,15 @@ static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
return 0;
}
-static struct irq_host_ops xilinx_intc_ops = {
+static struct irq_domain_ops xilinx_intc_ops = {
.map = xilinx_intc_map,
.xlate = xilinx_intc_xlate,
};
-struct irq_host * __init
+struct irq_domain * __init
xilinx_intc_init(struct device_node *np)
{
- struct irq_host * irq;
+ struct irq_domain * irq;
void * regs;
/* Find and map the intc registers */
@@ -200,12 +200,11 @@ xilinx_intc_init(struct device_node *np)
out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
- /* Allocate and initialize an irq_host structure. */
- irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS,
- &xilinx_intc_ops, -1);
+ /* Allocate and initialize an irq_domain structure. */
+ irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
+ regs);
if (!irq)
panic(__FILE__ ": Cannot allocate IRQ host\n");
- irq->host_data = regs;
return irq;
}
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index af3780e52e76..6845e91ba04a 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -22,6 +22,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/bug.h>
#include "nonstdio.h"
#include "ppc.h"
diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c
index 530df3d6d7b2..7d37597c4bcd 100644
--- a/arch/powerpc/xmon/spu-opc.c
+++ b/arch/powerpc/xmon/spu-opc.c
@@ -19,6 +19,7 @@
51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
#include <linux/kernel.h>
+#include <linux/bug.h>
#include "spu.h"
/* This file holds the Spu opcode table */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index cb95eea74d3d..68a9cbbab450 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -39,7 +39,6 @@
#include <asm/irq_regs.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
-#include <asm/firmware.h>
#include <asm/setjmp.h>
#include <asm/reg.h>
@@ -1437,7 +1436,8 @@ static void excprint(struct pt_regs *fp)
printf(" current = 0x%lx\n", current);
#ifdef CONFIG_PPC64
- printf(" paca = 0x%lx\n", get_paca());
+ printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n",
+ local_paca, local_paca->soft_enabled, local_paca->irq_happened);
#endif
if (current) {
printf(" pid = %ld, comm = %s\n",
@@ -1634,25 +1634,6 @@ static void super_regs(void)
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- struct paca_struct *ptrPaca;
- struct lppaca *ptrLpPaca;
-
- /* Dump out relevant Paca data areas. */
- printf("Paca: \n");
- ptrPaca = get_paca();
-
- printf(" Local Processor Control Area (LpPaca): \n");
- ptrLpPaca = ptrPaca->lppaca_ptr;
- printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
- ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
- printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
- ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
- printf(" Saved Gpr5=%.16lx \n",
- ptrLpPaca->gpr5_dword.saved_gpr5);
- }
-#endif
return;
}
@@ -2644,7 +2625,7 @@ static void dump_slb(void)
static void dump_stab(void)
{
int i;
- unsigned long *tmp = (unsigned long *)get_paca()->stab_addr;
+ unsigned long *tmp = (unsigned long *)local_paca->stab_addr;
printf("Segment table contents of cpu %x\n", smp_processor_id());
@@ -2855,10 +2836,6 @@ static void dump_tlb_book3e(void)
static void xmon_init(int enable)
{
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return;
-#endif
if (enable) {
__debugger = xmon;
__debugger_ipi = xmon_ipi;
@@ -2895,10 +2872,6 @@ static struct sysrq_key_op sysrq_xmon_op = {
static int __init setup_xmon_sysrq(void)
{
-#ifdef CONFIG_PPC_ISERIES
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return 0;
-#endif
register_sysrq_key('x', &sysrq_xmon_op);
return 0;
}