diff options
273 files changed, 7846 insertions, 3406 deletions
diff --git a/Documentation/acpi/acpi-lid.txt b/Documentation/acpi/acpi-lid.txt new file mode 100644 index 000000000000..effe7af3a5af --- /dev/null +++ b/Documentation/acpi/acpi-lid.txt @@ -0,0 +1,96 @@ +Special Usage Model of the ACPI Control Method Lid Device + +Copyright (C) 2016, Intel Corporation +Author: Lv Zheng <lv.zheng@intel.com> + + +Abstract: + +Platforms containing lids convey lid state (open/close) to OSPMs using a +control method lid device. To implement this, the AML tables issue +Notify(lid_device, 0x80) to notify the OSPMs whenever the lid state has +changed. The _LID control method for the lid device must be implemented to +report the "current" state of the lid as either "opened" or "closed". + +For most platforms, both the _LID method and the lid notifications are +reliable. However, there are exceptions. In order to work with these +exceptional buggy platforms, special restrictions and expections should be +taken into account. This document describes the restrictions and the +expections of the Linux ACPI lid device driver. + + +1. Restrictions of the returning value of the _LID control method + +The _LID control method is described to return the "current" lid state. +However the word of "current" has ambiguity, some buggy AML tables return +the lid state upon the last lid notification instead of returning the lid +state upon the last _LID evaluation. There won't be difference when the +_LID control method is evaluated during the runtime, the problem is its +initial returning value. When the AML tables implement this control method +with cached value, the initial returning value is likely not reliable. +There are platforms always retun "closed" as initial lid state. + +2. Restrictions of the lid state change notifications + +There are buggy AML tables never notifying when the lid device state is +changed to "opened". Thus the "opened" notification is not guaranteed. But +it is guaranteed that the AML tables always notify "closed" when the lid +state is changed to "closed". The "closed" notification is normally used to +trigger some system power saving operations on Windows. Since it is fully +tested, it is reliable from all AML tables. + +3. Expections for the userspace users of the ACPI lid device driver + +The ACPI button driver exports the lid state to the userspace via the +following file: + /proc/acpi/button/lid/LID0/state +This file actually calls the _LID control method described above. And given +the previous explanation, it is not reliable enough on some platforms. So +it is advised for the userspace program to not to solely rely on this file +to determine the actual lid state. + +The ACPI button driver emits the following input event to the userspace: + SW_LID +The ACPI lid device driver is implemented to try to deliver the platform +triggered events to the userspace. However, given the fact that the buggy +firmware cannot make sure "opened"/"closed" events are paired, the ACPI +button driver uses the following 3 modes in order not to trigger issues. + +If the userspace hasn't been prepared to ignore the unreliable "opened" +events and the unreliable initial state notification, Linux users can use +the following kernel parameters to handle the possible issues: +A. button.lid_init_state=method: + When this option is specified, the ACPI button driver reports the + initial lid state using the returning value of the _LID control method + and whether the "opened"/"closed" events are paired fully relies on the + firmware implementation. + This option can be used to fix some platforms where the returning value + of the _LID control method is reliable but the initial lid state + notification is missing. + This option is the default behavior during the period the userspace + isn't ready to handle the buggy AML tables. +B. button.lid_init_state=open: + When this option is specified, the ACPI button driver always reports the + initial lid state as "opened" and whether the "opened"/"closed" events + are paired fully relies on the firmware implementation. + This may fix some platforms where the returning value of the _LID + control method is not reliable and the initial lid state notification is + missing. + +If the userspace has been prepared to ignore the unreliable "opened" events +and the unreliable initial state notification, Linux users should always +use the following kernel parameter: +C. button.lid_init_state=ignore: + When this option is specified, the ACPI button driver never reports the + initial lid state and there is a compensation mechanism implemented to + ensure that the reliable "closed" notifications can always be delievered + to the userspace by always pairing "closed" input events with complement + "opened" input events. But there is still no guarantee that the "opened" + notifications can be delivered to the userspace when the lid is actually + opens given that some AML tables do not send "opened" notifications + reliably. + In this mode, if everything is correctly implemented by the platform + firmware, the old userspace programs should still work. Otherwise, the + new userspace programs are required to work with the ACPI button driver. + This option will be the default behavior after the userspace is ready to + handle the buggy AML tables. diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt index f35dad11f0de..5aafe0b351a1 100644 --- a/Documentation/acpi/gpio-properties.txt +++ b/Documentation/acpi/gpio-properties.txt @@ -28,8 +28,8 @@ index, like the ASL example below shows: ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { - Package () {"reset-gpio", Package() {^BTH, 1, 1, 0 }}, - Package () {"shutdown-gpio", Package() {^BTH, 0, 0, 0 }}, + Package () {"reset-gpios", Package() {^BTH, 1, 1, 0 }}, + Package () {"shutdown-gpios", Package() {^BTH, 0, 0, 0 }}, } }) } @@ -48,7 +48,7 @@ Since ACPI GpioIo() resource does not have a field saying whether it is active low or high, the "active_low" argument can be used here. Setting it to 1 marks the GPIO as active low. -In our Bluetooth example the "reset-gpio" refers to the second GpioIo() +In our Bluetooth example the "reset-gpios" refers to the second GpioIo() resource, second pin in that resource with the GPIO number of 31. ACPI GPIO Mappings Provided by Drivers @@ -83,8 +83,8 @@ static const struct acpi_gpio_params reset_gpio = { 1, 1, false }; static const struct acpi_gpio_params shutdown_gpio = { 0, 0, false }; static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = { - { "reset-gpio", &reset_gpio, 1 }, - { "shutdown-gpio", &shutdown_gpio, 1 }, + { "reset-gpios", &reset_gpio, 1 }, + { "shutdown-gpios", &shutdown_gpio, 1 }, { }, }; diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index ccc60324e738..405da11fc3e4 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -61,3 +61,5 @@ stable kernels. | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | +| | | | | +| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt index e774128935d5..ef5fbe9a77c7 100644 --- a/Documentation/devicetree/bindings/arm/arch_timer.txt +++ b/Documentation/devicetree/bindings/arm/arch_timer.txt @@ -25,6 +25,12 @@ to deliver its interrupts via SPIs. - always-on : a boolean property. If present, the timer is powered through an always-on power domain, therefore it never loses context. +- fsl,erratum-a008585 : A boolean property. Indicates the presence of + QorIQ erratum A-008585, which says that reading the counter is + unreliable unless the same value is returned by back-to-back reads. + This also affects writes to the tval register, due to the implicit + counter read. + ** Optional properties: - arm,cpu-registers-not-fw-configured : Firmware does not initialize diff --git a/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt b/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt new file mode 100644 index 000000000000..f2233138eba9 --- /dev/null +++ b/Documentation/devicetree/bindings/devfreq/event/rockchip-dfi.txt @@ -0,0 +1,19 @@ + +* Rockchip rk3399 DFI device + +Required properties: +- compatible: Must be "rockchip,rk3399-dfi". +- reg: physical base address of each DFI and length of memory mapped region +- rockchip,pmu: phandle to the syscon managing the "pmu general register files" +- clocks: phandles for clock specified in "clock-names" property +- clock-names : the name of clock used by the DFI, must be "pclk_ddr_mon"; + +Example: + dfi: dfi@0xff630000 { + compatible = "rockchip,rk3399-dfi"; + reg = <0x00 0xff630000 0x00 0x4000>; + rockchip,pmu = <&pmugrf>; + clocks = <&cru PCLK_DDR_MON>; + clock-names = "pclk_ddr_mon"; + status = "disabled"; + }; diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt new file mode 100644 index 000000000000..7a9e8603c150 --- /dev/null +++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt @@ -0,0 +1,209 @@ +* Rockchip rk3399 DMC(Dynamic Memory Controller) device + +Required properties: +- compatible: Must be "rockchip,rk3399-dmc". +- devfreq-events: Node to get DDR loading, Refer to + Documentation/devicetree/bindings/devfreq/ + rockchip-dfi.txt +- interrupts: The interrupt number to the CPU. The interrupt + specifier format depends on the interrupt controller. + It should be DCF interrupts, when DDR dvfs finish, + it will happen. +- clocks: Phandles for clock specified in "clock-names" property +- clock-names : The name of clock used by the DFI, must be + "pclk_ddr_mon"; +- operating-points-v2: Refer to Documentation/devicetree/bindings/power/opp.txt + for details. +- center-supply: DMC supply node. +- status: Marks the node enabled/disabled. + +Following properties are ddr timing: + +- rockchip,dram_speed_bin : Value reference include/dt-bindings/clock/ddr.h, + it select ddr3 cl-trp-trcd type, default value + "DDR3_DEFAULT".it must selected according to + "Speed Bin" in ddr3 datasheet, DO NOT use + smaller "Speed Bin" than ddr3 exactly is. + +- rockchip,pd_idle : Config the PD_IDLE value, defined the power-down + idle period, memories are places into power-down + mode if bus is idle for PD_IDLE DFI clocks. + +- rockchip,sr_idle : Configure the SR_IDLE value, defined the + selfrefresh idle period, memories are places + into self-refresh mode if bus is idle for + SR_IDLE*1024 DFI clocks (DFI clocks freq is + half of dram's clocks), defaule value is "0". + +- rockchip,sr_mc_gate_idle : Defined the self-refresh with memory and + controller clock gating idle period, memories + are places into self-refresh mode and memory + controller clock arg gating if bus is idle for + sr_mc_gate_idle*1024 DFI clocks. + +- rockchip,srpd_lite_idle : Defined the self-refresh power down idle + period, memories are places into self-refresh + power down mode if bus is idle for + srpd_lite_idle*1024 DFI clocks. This parameter + is for LPDDR4 only. + +- rockchip,standby_idle : Defined the standby idle period, memories are + places into self-refresh than controller, pi, + phy and dram clock will gating if bus is idle + for standby_idle * DFI clocks. + +- rockchip,dram_dll_disb_freq : It's defined the DDR3 dll bypass frequency in + MHz, when ddr freq less than DRAM_DLL_DISB_FREQ, + ddr3 dll will bypssed note: if dll was bypassed, + the odt also stop working. + +- rockchip,phy_dll_disb_freq : Defined the PHY dll bypass frequency in + MHz (Mega Hz), when ddr freq less than + DRAM_DLL_DISB_FREQ, phy dll will bypssed. + note: phy dll and phy odt are independent. + +- rockchip,ddr3_odt_disb_freq : When dram type is DDR3, this parameter defined + the odt disable frequency in MHz (Mega Hz), + when ddr frequency less then ddr3_odt_disb_freq, + the odt on dram side and controller side are + both disabled. + +- rockchip,ddr3_drv : When dram type is DDR3, this parameter define + the dram side driver stength in ohm, default + value is DDR3_DS_40ohm. + +- rockchip,ddr3_odt : When dram type is DDR3, this parameter define + the dram side ODT stength in ohm, default value + is DDR3_ODT_120ohm. + +- rockchip,phy_ddr3_ca_drv : When dram type is DDR3, this parameter define + the phy side CA line(incluing command line, + address line and clock line) driver strength. + Default value is PHY_DRV_ODT_40. + +- rockchip,phy_ddr3_dq_drv : When dram type is DDR3, this parameter define + the phy side DQ line(incluing DQS/DQ/DM line) + driver strength. default value is PHY_DRV_ODT_40. + +- rockchip,phy_ddr3_odt : When dram type is DDR3, this parameter define the + phy side odt strength, default value is + PHY_DRV_ODT_240. + +- rockchip,lpddr3_odt_disb_freq : When dram type is LPDDR3, this parameter defined + then odt disable frequency in MHz (Mega Hz), + when ddr frequency less then ddr3_odt_disb_freq, + the odt on dram side and controller side are + both disabled. + +- rockchip,lpddr3_drv : When dram type is LPDDR3, this parameter define + the dram side driver stength in ohm, default + value is LP3_DS_34ohm. + +- rockchip,lpddr3_odt : When dram type is LPDDR3, this parameter define + the dram side ODT stength in ohm, default value + is LP3_ODT_240ohm. + +- rockchip,phy_lpddr3_ca_drv : When dram type is LPDDR3, this parameter define + the phy side CA line(incluing command line, + address line and clock line) driver strength. + default value is PHY_DRV_ODT_40. + +- rockchip,phy_lpddr3_dq_drv : When dram type is LPDDR3, this parameter define + the phy side DQ line(incluing DQS/DQ/DM line) + driver strength. default value is + PHY_DRV_ODT_40. + +- rockchip,phy_lpddr3_odt : When dram type is LPDDR3, this parameter define + the phy side odt strength, default value is + PHY_DRV_ODT_240. + +- rockchip,lpddr4_odt_disb_freq : When dram type is LPDDR4, this parameter + defined the odt disable frequency in + MHz (Mega Hz), when ddr frequency less then + ddr3_odt_disb_freq, the odt on dram side and + controller side are both disabled. + +- rockchip,lpddr4_drv : When dram type is LPDDR4, this parameter define + the dram side driver stength in ohm, default + value is LP4_PDDS_60ohm. + +- rockchip,lpddr4_dq_odt : When dram type is LPDDR4, this parameter define + the dram side ODT on dqs/dq line stength in ohm, + default value is LP4_DQ_ODT_40ohm. + +- rockchip,lpddr4_ca_odt : When dram type is LPDDR4, this parameter define + the dram side ODT on ca line stength in ohm, + default value is LP4_CA_ODT_40ohm. + +- rockchip,phy_lpddr4_ca_drv : When dram type is LPDDR4, this parameter define + the phy side CA line(incluing command address + line) driver strength. default value is + PHY_DRV_ODT_40. + +- rockchip,phy_lpddr4_ck_cs_drv : When dram type is LPDDR4, this parameter define + the phy side clock line and cs line driver + strength. default value is PHY_DRV_ODT_80. + +- rockchip,phy_lpddr4_dq_drv : When dram type is LPDDR4, this parameter define + the phy side DQ line(incluing DQS/DQ/DM line) + driver strength. default value is PHY_DRV_ODT_80. + +- rockchip,phy_lpddr4_odt : When dram type is LPDDR4, this parameter define + the phy side odt strength, default value is + PHY_DRV_ODT_60. + +Example: + dmc_opp_table: dmc_opp_table { + compatible = "operating-points-v2"; + + opp00 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <900000>; + }; + opp01 { + opp-hz = /bits/ 64 <666000000>; + opp-microvolt = <900000>; + }; + }; + + dmc: dmc { + compatible = "rockchip,rk3399-dmc"; + devfreq-events = <&dfi>; + interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru SCLK_DDRCLK>; + clock-names = "dmc_clk"; + operating-points-v2 = <&dmc_opp_table>; + center-supply = <&ppvar_centerlogic>; + upthreshold = <15>; + downdifferential = <10>; + rockchip,ddr3_speed_bin = <21>; + rockchip,pd_idle = <0x40>; + rockchip,sr_idle = <0x2>; + rockchip,sr_mc_gate_idle = <0x3>; + rockchip,srpd_lite_idle = <0x4>; + rockchip,standby_idle = <0x2000>; + rockchip,dram_dll_dis_freq = <300>; + rockchip,phy_dll_dis_freq = <125>; + rockchip,auto_pd_dis_freq = <666>; + rockchip,ddr3_odt_dis_freq = <333>; + rockchip,ddr3_drv = <DDR3_DS_40ohm>; + rockchip,ddr3_odt = <DDR3_ODT_120ohm>; + rockchip,phy_ddr3_ca_drv = <PHY_DRV_ODT_40>; + rockchip,phy_ddr3_dq_drv = <PHY_DRV_ODT_40>; + rockchip,phy_ddr3_odt = <PHY_DRV_ODT_240>; + rockchip,lpddr3_odt_dis_freq = <333>; + rockchip,lpddr3_drv = <LP3_DS_34ohm>; + rockchip,lpddr3_odt = <LP3_ODT_240ohm>; + rockchip,phy_lpddr3_ca_drv = <PHY_DRV_ODT_40>; + rockchip,phy_lpddr3_dq_drv = <PHY_DRV_ODT_40>; + rockchip,phy_lpddr3_odt = <PHY_DRV_ODT_240>; + rockchip,lpddr4_odt_dis_freq = <333>; + rockchip,lpddr4_drv = <LP4_PDDS_60ohm>; + rockchip,lpddr4_dq_odt = <LP4_DQ_ODT_40ohm>; + rockchip,lpddr4_ca_odt = <LP4_CA_ODT_40ohm>; + rockchip,phy_lpddr4_ca_drv = <PHY_DRV_ODT_40>; + rockchip,phy_lpddr4_ck_cs_drv = <PHY_DRV_ODT_80>; + rockchip,phy_lpddr4_dq_drv = <PHY_DRV_ODT_80>; + rockchip,phy_lpddr4_odt = <PHY_DRV_ODT_60>; + status = "disabled"; + }; diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a4f4d693e2c1..25037ded3683 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -698,6 +698,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. loops can be debugged more effectively on production systems. + clocksource.arm_arch_timer.fsl-a008585= + [ARM64] + Format: <bool> + Enable/disable the workaround of Freescale/NXP + erratum A-008585. This can be useful for KVM + guests, if the guest device tree doesn't show the + erratum. If unspecified, the workaround is + enabled based on the device tree. + clearcpuid=BITNUM [X86] Disable CPUID feature X for the kernel. See arch/x86/include/asm/cpufeatures.h for the valid bit diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index 477927becacb..ea8d7b4e53f0 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -15,6 +15,8 @@ The updated API replacements are: DEFINE_STATIC_KEY_TRUE(key); DEFINE_STATIC_KEY_FALSE(key); +DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); +DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); static_branch_likely() static_branch_unlikely() @@ -140,6 +142,13 @@ static_branch_inc(), will change the branch back to true. Likewise, if the key is initialized false, a 'static_branch_inc()', will change the branch to true. And then a 'static_branch_dec()', will again make the branch false. +Where an array of keys is required, it can be defined as: + + DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + +or: + + DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); 4) Architecture level code patching interface, 'jump labels' diff --git a/MAINTAINERS b/MAINTAINERS index 2a70dd97d8fa..46c526eb1bab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -921,15 +921,17 @@ F: arch/arm/include/asm/floppy.h ARM PMU PROFILING AND DEBUGGING M: Will Deacon <will.deacon@arm.com> -R: Mark Rutland <mark.rutland@arm.com> +M: Mark Rutland <mark.rutland@arm.com> S: Maintained +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c F: arch/arm*/kernel/hw_breakpoint.c F: arch/arm*/include/asm/hw_breakpoint.h F: arch/arm*/include/asm/perf_event.h -F: drivers/perf/arm_pmu.c +F: drivers/perf/* F: include/linux/perf/arm_pmu.h +F: Documentation/devicetree/bindings/arm/pmu.txt ARM PORT M: Russell King <linux@armlinux.org.uk> @@ -3290,6 +3292,7 @@ L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) +F: Documentation/cpu-freq/ F: drivers/cpufreq/ F: include/linux/cpufreq.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a9c4e48bb7ec..b2113c24850c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1,6 +1,7 @@ config ARM bool default y + select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 01986deef7c5..36cc7cc012f9 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -28,7 +28,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=m CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=m +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_CPUFREQ_DT=y CONFIG_CPU_IDLE=y CONFIG_ARM_EXYNOS_CPUIDLE=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index ea3566fb92e2..58459105cadc 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -135,7 +135,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_POWERSAVE=m CONFIG_CPU_FREQ_GOV_USERSPACE=m CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=m +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_QORIQ_CPUFREQ=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y diff --git a/arch/arm/include/asm/clocksource.h b/arch/arm/include/asm/clocksource.h new file mode 100644 index 000000000000..0b350a7e26f3 --- /dev/null +++ b/arch/arm/include/asm/clocksource.h @@ -0,0 +1,8 @@ +#ifndef _ASM_CLOCKSOURCE_H +#define _ASM_CLOCKSOURCE_H + +struct arch_clocksource_data { + bool vdso_direct; /* Usable for direct VDSO access? */ +}; + +#endif diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 15063851cd10..b9423491b9d7 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -596,12 +596,6 @@ static struct attribute_group armv7_pmuv1_events_attr_group = { .attrs = armv7_pmuv1_event_attrs, }; -static const struct attribute_group *armv7_pmuv1_attr_groups[] = { - &armv7_pmuv1_events_attr_group, - &armv7_pmu_format_attr_group, - NULL, -}; - ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS); ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS); ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB); @@ -653,12 +647,6 @@ static struct attribute_group armv7_pmuv2_events_attr_group = { .attrs = armv7_pmuv2_event_attrs, }; -static const struct attribute_group *armv7_pmuv2_attr_groups[] = { - &armv7_pmuv2_events_attr_group, - &armv7_pmu_format_attr_group, - NULL, -}; - /* * Perf Events' indices */ @@ -1208,7 +1196,10 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a8"; cpu_pmu->map_event = armv7_a8_map_event; - cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv1_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } @@ -1217,7 +1208,10 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a9"; cpu_pmu->map_event = armv7_a9_map_event; - cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv1_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } @@ -1226,7 +1220,10 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a5"; cpu_pmu->map_event = armv7_a5_map_event; - cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv1_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } @@ -1236,7 +1233,10 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->name = "armv7_cortex_a15"; cpu_pmu->map_event = armv7_a15_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; - cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } @@ -1246,7 +1246,10 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->name = "armv7_cortex_a7"; cpu_pmu->map_event = armv7_a7_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; - cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } @@ -1256,7 +1259,10 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->name = "armv7_cortex_a12"; cpu_pmu->map_event = armv7_a12_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; - cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } @@ -1264,7 +1270,10 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) { int ret = armv7_a12_pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a17"; - cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv7_pmuv2_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv7_pmu_format_attr_group; return ret; } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 994e971a8538..a0affd14086a 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -270,7 +270,7 @@ static bool tk_is_cntvct(const struct timekeeper *tk) if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) return false; - if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0) + if (!tk->tkr_mono.clock->archdata.vdso_direct) return false; return true; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index bc3f00f586f1..17c14a1d9112 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -4,6 +4,7 @@ config ARM64 select ACPI_GENERIC_GSI if ACPI select ACPI_REDUCED_HARDWARE_ONLY if ACPI select ACPI_MCFG if ACPI + select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE @@ -102,10 +103,8 @@ config ARM64 select NO_BOOTMEM select OF select OF_EARLY_FLATTREE - select OF_NUMA if NUMA && OF select OF_RESERVED_MEM select PCI_ECAM if ACPI - select PERF_USE_VMALLOC select POWER_RESET select POWER_SUPPLY select SPARSE_IRQ @@ -122,6 +121,9 @@ config ARCH_PHYS_ADDR_T_64BIT config MMU def_bool y +config DEBUG_RODATA + def_bool y + config ARM64_PAGE_SHIFT int default 16 if ARM64_64K_PAGES @@ -415,18 +417,13 @@ config ARM64_ERRATUM_845719 config ARM64_ERRATUM_843419 bool "Cortex-A53: 843419: A load or store might access an incorrect address" - depends on MODULES default y - select ARM64_MODULE_CMODEL_LARGE + select ARM64_MODULE_CMODEL_LARGE if MODULES help - This option builds kernel modules using the large memory model in - order to avoid the use of the ADRP instruction, which can cause - a subsequent memory access to use an incorrect address on Cortex-A53 - parts up to r0p4. - - Note that the kernel itself must be linked with a version of ld - which fixes potentially affected ADRP instructions through the - use of veneers. + This option links the kernel with '--fix-cortex-a53-843419' and + builds modules using the large memory model in order to avoid the use + of the ADRP instruction, which can cause a subsequent memory access + to use an incorrect address on Cortex-A53 parts up to r0p4. If unsure, say Y. @@ -582,7 +579,8 @@ config HOTPLUG_CPU # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" - depends on SMP + select ACPI_NUMA if ACPI + select OF_NUMA help Enable NUMA (Non Uniform Memory Access) support. @@ -603,11 +601,18 @@ config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA +config HAVE_SETUP_PER_CPU_AREA + def_bool y + depends on NUMA + +config NEED_PER_CPU_EMBED_FIRST_CHUNK + def_bool y + depends on NUMA + source kernel/Kconfig.preempt source kernel/Kconfig.hz config ARCH_SUPPORTS_DEBUG_PAGEALLOC - depends on !HIBERNATION def_bool y config ARCH_HAS_HOLES_MEMORYMODEL diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 0cc758cdd0dc..b661fe742615 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -49,16 +49,6 @@ config DEBUG_SET_MODULE_RONX If in doubt, say Y. -config DEBUG_RODATA - bool "Make kernel text and rodata read-only" - default y - help - If this is set, kernel text and rodata will be made read-only. This - is to help catch accidental or malicious attempts to change the - kernel's executable code. - - If in doubt, say Y. - config DEBUG_ALIGN_RODATA depends on DEBUG_RODATA bool "Align linker sections up to SECTION_SIZE" diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index be5d824ebdba..4a4318bc219a 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -159,7 +159,6 @@ config ARCH_TEGRA select CLKSRC_MMIO select CLKSRC_OF select GENERIC_CLOCKEVENTS - select HAVE_CLK select PINCTRL select RESET_CONTROLLER help diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 5b54f8c021d8..ab51aed6b6c1 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -18,6 +18,14 @@ ifneq ($(CONFIG_RELOCATABLE),) LDFLAGS_vmlinux += -pie -Bsymbolic endif +ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) + ifeq ($(call ld-option, --fix-cortex-a53-843419),) +$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum) + else +LDFLAGS_vmlinux += --fix-cortex-a53-843419 + endif +endif + KBUILD_DEFCONFIG := defconfig # Check for binutils support for specific extensions @@ -38,10 +46,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian AS += -EB LD += -EB +UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian AS += -EL LD += -EL +UTS_MACHINE := aarch64 endif CHECKFLAGS += -D__aarch64__ diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index f43d2c44c765..44e1d7f10add 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -1,4 +1,3 @@ -generic-y += bug.h generic-y += bugs.h generic-y += clkdev.h generic-y += cputime.h @@ -10,7 +9,6 @@ generic-y += dma-contiguous.h generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h -generic-y += ftrace.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h @@ -27,12 +25,10 @@ generic-y += mman.h generic-y += msgbuf.h generic-y += msi.h generic-y += mutex.h -generic-y += pci.h generic-y += poll.h generic-y += preempt.h generic-y += resource.h generic-y += rwsem.h -generic-y += sections.h generic-y += segment.h generic-y += sembuf.h generic-y += serial.h @@ -45,7 +41,6 @@ generic-y += swab.h generic-y += switch_to.h generic-y += termbits.h generic-y += termios.h -generic-y += topology.h generic-y += trace_clock.h generic-y += types.h generic-y += unaligned.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 5420cb0fcb3e..e517088d635f 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -12,7 +12,7 @@ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H -#include <linux/mm.h> +#include <linux/memblock.h> #include <linux/psci.h> #include <asm/cputype.h> @@ -32,7 +32,11 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { - if (!page_is_ram(phys >> PAGE_SHIFT)) + /* + * EFI's reserve_regions() call adds memory with the WB attribute + * to memblock via early_init_dt_add_memory_arch(). + */ + if (!memblock_is_memory(phys)) return ioremap(phys, size); return ioremap_cache(phys, size); diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 8746ff6abd77..55101bd86b98 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -2,6 +2,7 @@ #define __ASM_ALTERNATIVE_H #include <asm/cpufeature.h> +#include <asm/insn.h> #ifndef __ASSEMBLY__ @@ -90,34 +91,55 @@ void apply_alternatives(void *start, size_t length); .endm /* - * Begin an alternative code sequence. + * Alternative sequences + * + * The code for the case where the capability is not present will be + * assembled and linked as normal. There are no restrictions on this + * code. + * + * The code for the case where the capability is present will be + * assembled into a special section to be used for dynamic patching. + * Code for that case must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. * - * The code that follows this macro will be assembled and linked as - * normal. There are no restrictions on this code. + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ + +/* + * Begin an alternative code sequence. */ .macro alternative_if_not cap + .set .Lasm_alt_mode, 0 .pushsection .altinstructions, "a" altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection 661: .endm +.macro alternative_if cap + .set .Lasm_alt_mode, 1 + .pushsection .altinstructions, "a" + altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f + .popsection + .pushsection .altinstr_replacement, "ax" + .align 2 /* So GAS knows label 661 is suitably aligned */ +661: +.endm + /* - * Provide the alternative code sequence. - * - * The code that follows this macro is assembled into a special - * section to be used for dynamic patching. Code that follows this - * macro must: - * - * 1. Be exactly the same length (in bytes) as the default code - * sequence. - * - * 2. Not contain a branch target that is used outside of the - * alternative sequence it is defined in (branches into an - * alternative sequence are not fixed up). + * Provide the other half of the alternative code sequence. */ .macro alternative_else -662: .pushsection .altinstr_replacement, "ax" +662: + .if .Lasm_alt_mode==0 + .pushsection .altinstr_replacement, "ax" + .else + .popsection + .endif 663: .endm @@ -125,11 +147,25 @@ void apply_alternatives(void *start, size_t length); * Complete an alternative code sequence. */ .macro alternative_endif -664: .popsection +664: + .if .Lasm_alt_mode==0 + .popsection + .endif .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) .endm +/* + * Provides a trivial alternative or default sequence consisting solely + * of NOPs. The number of NOPs is chosen automatically to match the + * previous case. + */ +.macro alternative_else_nop_endif +alternative_else + nops (662b-661b) / AARCH64_INSN_SIZE +alternative_endif +.endm + #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index fbe0ca31a99c..eaa5bbe3fa87 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -20,13 +20,55 @@ #define __ASM_ARCH_TIMER_H #include <asm/barrier.h> +#include <asm/sysreg.h> #include <linux/bug.h> #include <linux/init.h> +#include <linux/jump_label.h> #include <linux/types.h> #include <clocksource/arm_arch_timer.h> +#if IS_ENABLED(CONFIG_FSL_ERRATUM_A008585) +extern struct static_key_false arch_timer_read_ool_enabled; +#define needs_fsl_a008585_workaround() \ + static_branch_unlikely(&arch_timer_read_ool_enabled) +#else +#define needs_fsl_a008585_workaround() false +#endif + +u32 __fsl_a008585_read_cntp_tval_el0(void); +u32 __fsl_a008585_read_cntv_tval_el0(void); +u64 __fsl_a008585_read_cntvct_el0(void); + +/* + * The number of retries is an arbitrary value well beyond the highest number + * of iterations the loop has been observed to take. + */ +#define __fsl_a008585_read_reg(reg) ({ \ + u64 _old, _new; \ + int _retries = 200; \ + \ + do { \ + _old = read_sysreg(reg); \ + _new = read_sysreg(reg); \ + _retries--; \ + } while (unlikely(_old != _new) && _retries); \ + \ + WARN_ON_ONCE(!_retries); \ + _new; \ +}) + +#define arch_timer_reg_read_stable(reg) \ +({ \ + u64 _val; \ + if (needs_fsl_a008585_workaround()) \ + _val = __fsl_a008585_read_##reg(); \ + else \ + _val = read_sysreg(reg); \ + _val; \ +}) + /* * These register accessors are marked inline so the compiler can * nicely work out which register we want, and chuck away the rest of @@ -38,19 +80,19 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) if (access == ARCH_TIMER_PHYS_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: - asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); + write_sysreg(val, cntp_ctl_el0); break; case ARCH_TIMER_REG_TVAL: - asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); + write_sysreg(val, cntp_tval_el0); break; } } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: - asm volatile("msr cntv_ctl_el0, %0" : : "r" (val)); + write_sysreg(val, cntv_ctl_el0); break; case ARCH_TIMER_REG_TVAL: - asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); + write_sysreg(val, cntv_tval_el0); break; } } @@ -61,48 +103,38 @@ void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) static __always_inline u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) { - u32 val; - if (access == ARCH_TIMER_PHYS_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: - asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); - break; + return read_sysreg(cntp_ctl_el0); case ARCH_TIMER_REG_TVAL: - asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); - break; + return arch_timer_reg_read_stable(cntp_tval_el0); } } else if (access == ARCH_TIMER_VIRT_ACCESS) { switch (reg) { case ARCH_TIMER_REG_CTRL: - asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val)); - break; + return read_sysreg(cntv_ctl_el0); case ARCH_TIMER_REG_TVAL: - asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); - break; + return arch_timer_reg_read_stable(cntv_tval_el0); } } - return val; + BUG(); } static inline u32 arch_timer_get_cntfrq(void) { - u32 val; - asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); - return val; + return read_sysreg(cntfrq_el0); } static inline u32 arch_timer_get_cntkctl(void) { - u32 cntkctl; - asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); - return cntkctl; + return read_sysreg(cntkctl_el1); } static inline void arch_timer_set_cntkctl(u32 cntkctl) { - asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); + write_sysreg(cntkctl, cntkctl_el1); } static inline u64 arch_counter_get_cntpct(void) @@ -116,12 +148,8 @@ static inline u64 arch_counter_get_cntpct(void) static inline u64 arch_counter_get_cntvct(void) { - u64 cval; - isb(); - asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); - - return cval; + return arch_timer_reg_read_stable(cntvct_el0); } static inline int arch_timer_arch_init(void) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index d5025c69ca81..28bfe6132eb6 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -87,6 +87,15 @@ .endm /* + * NOP sequence + */ + .macro nops, num + .rept \num + nop + .endr + .endm + +/* * Emit an entry into the exception table */ .macro _asm_extable, from, to @@ -216,11 +225,26 @@ lr .req x30 // link register .macro mmid, rd, rn ldr \rd, [\rn, #MM_CONTEXT_ID] .endm +/* + * read_ctr - read CTR_EL0. If the system has mismatched + * cache line sizes, provide the system wide safe value + * from arm64_ftr_reg_ctrel0.sys_val + */ + .macro read_ctr, reg +alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE + mrs \reg, ctr_el0 // read CTR + nop +alternative_else + ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL +alternative_endif + .endm + /* - * dcache_line_size - get the minimum D-cache line size from the CTR register. + * raw_dcache_line_size - get the minimum D-cache line size on this CPU + * from the CTR register. */ - .macro dcache_line_size, reg, tmp + .macro raw_dcache_line_size, reg, tmp mrs \tmp, ctr_el0 // read CTR ubfm \tmp, \tmp, #16, #19 // cache line size encoding mov \reg, #4 // bytes per word @@ -228,9 +252,20 @@ lr .req x30 // link register .endm /* - * icache_line_size - get the minimum I-cache line size from the CTR register. + * dcache_line_size - get the safe D-cache line size across all CPUs */ - .macro icache_line_size, reg, tmp + .macro dcache_line_size, reg, tmp + read_ctr \tmp + ubfm \tmp, \tmp, #16, #19 // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * raw_icache_line_size - get the minimum I-cache line size on this CPU + * from the CTR register. + */ + .macro raw_icache_line_size, reg, tmp mrs \tmp, ctr_el0 // read CTR and \tmp, \tmp, #0xf // cache line size encoding mov \reg, #4 // bytes per word @@ -238,6 +273,16 @@ lr .req x30 // link register .endm /* + * icache_line_size - get the safe I-cache line size across all CPUs + */ + .macro icache_line_size, reg, tmp + read_ctr \tmp + and \tmp, \tmp, #0xf // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map */ .macro tcr_set_idmap_t0sz, valreg, tmpreg diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index b5890be8f257..7457ce082b5f 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -86,8 +86,8 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_ATOMIC(add_return##name), \ + __LL_SC_ATOMIC(add_return##name) \ + __nops(1), \ /* LSE atomics */ \ " ldadd" #mb " %w[i], w30, %[v]\n" \ " add %w[i], %w[i], w30") \ @@ -112,8 +112,8 @@ static inline void atomic_and(int i, atomic_t *v) asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ - " nop\n" - __LL_SC_ATOMIC(and), + __LL_SC_ATOMIC(and) + __nops(1), /* LSE atomics */ " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]") @@ -130,8 +130,8 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_ATOMIC(fetch_and##name), \ + __LL_SC_ATOMIC(fetch_and##name) \ + __nops(1), \ /* LSE atomics */ \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]") \ @@ -156,8 +156,8 @@ static inline void atomic_sub(int i, atomic_t *v) asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ - " nop\n" - __LL_SC_ATOMIC(sub), + __LL_SC_ATOMIC(sub) + __nops(1), /* LSE atomics */ " neg %w[i], %w[i]\n" " stadd %w[i], %[v]") @@ -174,9 +174,8 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ __LL_SC_ATOMIC(sub_return##name) \ - " nop", \ + __nops(2), \ /* LSE atomics */ \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], w30, %[v]\n" \ @@ -203,8 +202,8 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_ATOMIC(fetch_sub##name), \ + __LL_SC_ATOMIC(fetch_sub##name) \ + __nops(1), \ /* LSE atomics */ \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]") \ @@ -284,8 +283,8 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_ATOMIC64(add_return##name), \ + __LL_SC_ATOMIC64(add_return##name) \ + __nops(1), \ /* LSE atomics */ \ " ldadd" #mb " %[i], x30, %[v]\n" \ " add %[i], %[i], x30") \ @@ -310,8 +309,8 @@ static inline void atomic64_and(long i, atomic64_t *v) asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ - " nop\n" - __LL_SC_ATOMIC64(and), + __LL_SC_ATOMIC64(and) + __nops(1), /* LSE atomics */ " mvn %[i], %[i]\n" " stclr %[i], %[v]") @@ -328,8 +327,8 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_ATOMIC64(fetch_and##name), \ + __LL_SC_ATOMIC64(fetch_and##name) \ + __nops(1), \ /* LSE atomics */ \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]") \ @@ -354,8 +353,8 @@ static inline void atomic64_sub(long i, atomic64_t *v) asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ - " nop\n" - __LL_SC_ATOMIC64(sub), + __LL_SC_ATOMIC64(sub) + __nops(1), /* LSE atomics */ " neg %[i], %[i]\n" " stadd %[i], %[v]") @@ -372,9 +371,8 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ __LL_SC_ATOMIC64(sub_return##name) \ - " nop", \ + __nops(2), \ /* LSE atomics */ \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], x30, %[v]\n" \ @@ -401,8 +399,8 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_ATOMIC64(fetch_sub##name), \ + __LL_SC_ATOMIC64(fetch_sub##name) \ + __nops(1), \ /* LSE atomics */ \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]") \ @@ -426,13 +424,8 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ - " nop\n" __LL_SC_ATOMIC64(dec_if_positive) - " nop\n" - " nop\n" - " nop\n" - " nop\n" - " nop", + __nops(6), /* LSE atomics */ "1: ldr x30, %[v]\n" " subs %[ret], x30, #1\n" @@ -464,9 +457,8 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - __LL_SC_CMPXCHG(name) \ - " nop", \ + __LL_SC_CMPXCHG(name) \ + __nops(2), \ /* LSE atomics */ \ " mov " #w "30, %" #w "[old]\n" \ " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ @@ -517,10 +509,8 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ - " nop\n" \ - " nop\n" \ - " nop\n" \ - __LL_SC_CMPXCHG_DBL(name), \ + __LL_SC_CMPXCHG_DBL(name) \ + __nops(3), \ /* LSE atomics */ \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 4eea7f618dce..4e0497f581a0 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -20,6 +20,9 @@ #ifndef __ASSEMBLY__ +#define __nops(n) ".rept " #n "\nnop\n.endr\n" +#define nops(n) asm volatile(__nops(n)) + #define sev() asm volatile("sev" : : : "memory") #define wfe() asm volatile("wfe" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory") diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index c64268dbff64..2e5fb976a572 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -68,6 +68,7 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); +extern void __clean_dcache_area_poc(void *addr, size_t len); extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); @@ -85,7 +86,7 @@ static inline void flush_cache_page(struct vm_area_struct *vma, */ extern void __dma_map_area(const void *, size_t, int); extern void __dma_unmap_area(const void *, size_t, int); -extern void __dma_flush_range(const void *, const void *); +extern void __dma_flush_area(const void *, size_t); /* * Copy user data from/to a page which is mapped into a different diff --git a/arch/arm64/include/asm/clocksource.h b/arch/arm64/include/asm/clocksource.h new file mode 100644 index 000000000000..0b350a7e26f3 --- /dev/null +++ b/arch/arm64/include/asm/clocksource.h @@ -0,0 +1,8 @@ +#ifndef _ASM_CLOCKSOURCE_H +#define _ASM_CLOCKSOURCE_H + +struct arch_clocksource_data { + bool vdso_direct; /* Usable for direct VDSO access? */ +}; + +#endif diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index bd86a79491bc..91b26d26af8a 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -43,10 +43,8 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \ " cbnz %w1, 1b\n" \ " " #mb, \ /* LSE atomics */ \ - " nop\n" \ - " nop\n" \ " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ - " nop\n" \ + __nops(3) \ " " #nop_lse) \ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \ : "r" (x) \ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 7099f26e3702..758d74fedfad 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -9,6 +9,8 @@ #ifndef __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H +#include <linux/jump_label.h> + #include <asm/hwcap.h> #include <asm/sysreg.h> @@ -37,8 +39,9 @@ #define ARM64_WORKAROUND_CAVIUM_27456 12 #define ARM64_HAS_32BIT_EL0 13 #define ARM64_HYP_OFFSET_LOW 14 +#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15 -#define ARM64_NCAPS 15 +#define ARM64_NCAPS 16 #ifndef __ASSEMBLY__ @@ -63,7 +66,7 @@ struct arm64_ftr_bits { enum ftr_type type; u8 shift; u8 width; - s64 safe_val; /* safe value for discrete features */ + s64 safe_val; /* safe value for FTR_EXACT features */ }; /* @@ -72,13 +75,14 @@ struct arm64_ftr_bits { * @sys_val Safe value across the CPUs (system view) */ struct arm64_ftr_reg { - u32 sys_id; - const char *name; - u64 strict_mask; - u64 sys_val; - struct arm64_ftr_bits *ftr_bits; + const char *name; + u64 strict_mask; + u64 sys_val; + const struct arm64_ftr_bits *ftr_bits; }; +extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; + /* scope of capability check */ enum { SCOPE_SYSTEM, @@ -109,6 +113,7 @@ struct arm64_cpu_capabilities { }; extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); +extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; bool this_cpu_has_cap(unsigned int cap); @@ -121,16 +126,21 @@ static inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) return false; - return test_bit(num, cpu_hwcaps); + if (__builtin_constant_p(num)) + return static_branch_unlikely(&cpu_hwcap_keys[num]); + else + return test_bit(num, cpu_hwcaps); } static inline void cpus_set_cap(unsigned int num) { - if (num >= ARM64_NCAPS) + if (num >= ARM64_NCAPS) { pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", num, ARM64_NCAPS); - else + } else { __set_bit(num, cpu_hwcaps); + static_branch_enable(&cpu_hwcap_keys[num]); + } } static inline int __attribute_const__ @@ -157,7 +167,7 @@ cpuid_feature_extract_unsigned_field(u64 features, int field) return cpuid_feature_extract_unsigned_field_width(features, field, 4); } -static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) +static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); } @@ -170,7 +180,7 @@ cpuid_feature_extract_field(u64 features, int field, bool sign) cpuid_feature_extract_unsigned_field(features, field); } -static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) +static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) { return (s64)cpuid_feature_extract_field(val, ftrp->shift, ftrp->sign); } @@ -193,11 +203,11 @@ void __init setup_cpu_features(void); void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info); void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); -void check_local_cpu_errata(void); -void __init enable_errata_workarounds(void); +void check_local_cpu_capabilities(void); -void verify_local_cpu_errata(void); -void verify_local_cpu_capabilities(void); +void update_cpu_errata_workarounds(void); +void __init enable_errata_workarounds(void); +void verify_local_cpu_errata_workarounds(void); u64 read_system_reg(u32 id); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 9d9fd4b9a72e..26a68ddb11c1 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -93,11 +93,7 @@ #include <asm/sysreg.h> -#define read_cpuid(reg) ({ \ - u64 __val; \ - asm("mrs_s %0, " __stringify(SYS_ ## reg) : "=r" (__val)); \ - __val; \ -}) +#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) /* * The CPU ID never changes at run time, so we might as well tell the diff --git a/arch/arm64/include/asm/dcc.h b/arch/arm64/include/asm/dcc.h index 65e0190e97c8..836b05630003 100644 --- a/arch/arm64/include/asm/dcc.h +++ b/arch/arm64/include/asm/dcc.h @@ -21,21 +21,16 @@ #define __ASM_DCC_H #include <asm/barrier.h> +#include <asm/sysreg.h> static inline u32 __dcc_getstatus(void) { - u32 ret; - - asm volatile("mrs %0, mdccsr_el0" : "=r" (ret)); - - return ret; + return read_sysreg(mdccsr_el0); } static inline char __dcc_getchar(void) { - char c; - - asm volatile("mrs %0, dbgdtrrx_el0" : "=r" (c)); + char c = read_sysreg(dbgdtrrx_el0); isb(); return c; @@ -47,8 +42,7 @@ static inline void __dcc_putchar(char c) * The typecast is to make absolutely certain that 'c' is * zero-extended. */ - asm volatile("msr dbgdtrtx_el0, %0" - : : "r" ((unsigned long)(unsigned char)c)); + write_sysreg((unsigned char)c, dbgdtrtx_el0); isb(); } diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index f772e15c4766..d14c478976d0 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -78,6 +78,23 @@ #define ESR_ELx_IL (UL(1) << 25) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) + +/* ISS field definitions shared by different classes */ +#define ESR_ELx_WNR (UL(1) << 6) + +/* Shared ISS field definitions for Data/Instruction aborts */ +#define ESR_ELx_EA (UL(1) << 9) +#define ESR_ELx_S1PTW (UL(1) << 7) + +/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */ +#define ESR_ELx_FSC (0x3F) +#define ESR_ELx_FSC_TYPE (0x3C) +#define ESR_ELx_FSC_EXTABT (0x10) +#define ESR_ELx_FSC_ACCESS (0x08) +#define ESR_ELx_FSC_FAULT (0x04) +#define ESR_ELx_FSC_PERM (0x0C) + +/* ISS field definitions for Data Aborts */ #define ESR_ELx_ISV (UL(1) << 24) #define ESR_ELx_SAS_SHIFT (22) #define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT) @@ -86,16 +103,9 @@ #define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT) #define ESR_ELx_SF (UL(1) << 15) #define ESR_ELx_AR (UL(1) << 14) -#define ESR_ELx_EA (UL(1) << 9) #define ESR_ELx_CM (UL(1) << 8) -#define ESR_ELx_S1PTW (UL(1) << 7) -#define ESR_ELx_WNR (UL(1) << 6) -#define ESR_ELx_FSC (0x3F) -#define ESR_ELx_FSC_TYPE (0x3C) -#define ESR_ELx_FSC_EXTABT (0x10) -#define ESR_ELx_FSC_ACCESS (0x08) -#define ESR_ELx_FSC_FAULT (0x04) -#define ESR_ELx_FSC_PERM (0x0C) + +/* ISS field definitions for exceptions taken in to Hyp */ #define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_COND_SHIFT (20) #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) @@ -109,6 +119,62 @@ ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \ ((imm) & 0xffff)) +/* ISS field definitions for System instruction traps */ +#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22 +#define ESR_ELx_SYS64_ISS_RES0_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT) +#define ESR_ELx_SYS64_ISS_DIR_MASK 0x1 +#define ESR_ELx_SYS64_ISS_DIR_READ 0x1 +#define ESR_ELx_SYS64_ISS_DIR_WRITE 0x0 + +#define ESR_ELx_SYS64_ISS_RT_SHIFT 5 +#define ESR_ELx_SYS64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT) +#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1 +#define ESR_ELx_SYS64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT) +#define ESR_ELx_SYS64_ISS_CRN_SHIFT 10 +#define ESR_ELx_SYS64_ISS_CRN_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT) +#define ESR_ELx_SYS64_ISS_OP1_SHIFT 14 +#define ESR_ELx_SYS64_ISS_OP1_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT) +#define ESR_ELx_SYS64_ISS_OP2_SHIFT 17 +#define ESR_ELx_SYS64_ISS_OP2_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT) +#define ESR_ELx_SYS64_ISS_OP0_SHIFT 20 +#define ESR_ELx_SYS64_ISS_OP0_MASK (UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT) +#define ESR_ELx_SYS64_ISS_SYS_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \ + ESR_ELx_SYS64_ISS_OP1_MASK | \ + ESR_ELx_SYS64_ISS_OP2_MASK | \ + ESR_ELx_SYS64_ISS_CRN_MASK | \ + ESR_ELx_SYS64_ISS_CRM_MASK) +#define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \ + (((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \ + ((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \ + ((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \ + ((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \ + ((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT)) + +#define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \ + ESR_ELx_SYS64_ISS_DIR_MASK) +/* + * User space cache operations have the following sysreg encoding + * in System instructions. + * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 14 }, WRITE (L=0) + */ +#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14 +#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11 +#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10 +#define ESR_ELx_SYS64_ISS_CRM_IC_IVAU 5 + +#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \ + ESR_ELx_SYS64_ISS_OP1_MASK | \ + ESR_ELx_SYS64_ISS_OP2_MASK | \ + ESR_ELx_SYS64_ISS_CRN_MASK | \ + ESR_ELx_SYS64_ISS_DIR_MASK) +#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \ + (ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \ + ESR_ELx_SYS64_ISS_DIR_WRITE) + +#define ESR_ELx_SYS64_ISS_SYS_CTR ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 1, 0, 0) +#define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \ + ESR_ELx_SYS64_ISS_DIR_READ) + #ifndef __ASSEMBLY__ #include <asm/types.h> diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 115ea2a64520..9510ace570e2 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -18,6 +18,7 @@ #include <asm/cputype.h> #include <asm/cpufeature.h> +#include <asm/sysreg.h> #include <asm/virt.h> #ifdef __KERNEL__ @@ -98,18 +99,18 @@ static inline void decode_ctrl_reg(u32 reg, #define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP) /* Debug register names. */ -#define AARCH64_DBG_REG_NAME_BVR "bvr" -#define AARCH64_DBG_REG_NAME_BCR "bcr" -#define AARCH64_DBG_REG_NAME_WVR "wvr" -#define AARCH64_DBG_REG_NAME_WCR "wcr" +#define AARCH64_DBG_REG_NAME_BVR bvr +#define AARCH64_DBG_REG_NAME_BCR bcr +#define AARCH64_DBG_REG_NAME_WVR wvr +#define AARCH64_DBG_REG_NAME_WCR wcr /* Accessor macros for the debug registers. */ #define AARCH64_DBG_READ(N, REG, VAL) do {\ - asm volatile("mrs %0, dbg" REG #N "_el1" : "=r" (VAL));\ + VAL = read_sysreg(dbg##REG##N##_el1);\ } while (0) #define AARCH64_DBG_WRITE(N, REG, VAL) do {\ - asm volatile("msr dbg" REG #N "_el1, %0" :: "r" (VAL));\ + write_sysreg(VAL, dbg##REG##N##_el1);\ } while (0) struct task_struct; @@ -141,8 +142,6 @@ static inline void ptrace_hw_copy_thread(struct task_struct *task) } #endif -extern struct pmu perf_ops_bp; - /* Determine number of BRP registers available. */ static inline int get_num_brps(void) { diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 1dbaa901d7e5..bc853663dd51 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -246,7 +246,8 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ { return (val); } -__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000) +__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000) +__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) @@ -318,6 +319,11 @@ __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) bool aarch64_insn_is_nop(u32 insn); bool aarch64_insn_is_branch_imm(u32 insn); +static inline bool aarch64_insn_is_adr_adrp(u32 insn) +{ + return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn); +} + int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); @@ -398,6 +404,9 @@ int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); +s32 aarch64_insn_adrp_get_offset(u32 insn); +u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset); + bool aarch32_insn_is_wide(u32 insn); #define A32_RN_OFFSET 16 diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 9b6e408cfa51..0bba427bb4c2 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -40,25 +40,25 @@ #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { - asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); + asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr)); } #define __raw_writew __raw_writew static inline void __raw_writew(u16 val, volatile void __iomem *addr) { - asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); + asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr)); } #define __raw_writel __raw_writel static inline void __raw_writel(u32 val, volatile void __iomem *addr) { - asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); + asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); } #define __raw_writeq __raw_writeq static inline void __raw_writeq(u64 val, volatile void __iomem *addr) { - asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); + asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr)); } #define __raw_readb __raw_readb @@ -184,17 +184,6 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); }) -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - #include <asm-generic/io.h> /* diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index b6bb83400cd8..dff109871f2a 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -99,14 +99,10 @@ .macro kern_hyp_va reg alternative_if_not ARM64_HAS_VIRT_HOST_EXTN and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK -alternative_else - nop -alternative_endif -alternative_if_not ARM64_HYP_OFFSET_LOW - nop -alternative_else +alternative_else_nop_endif +alternative_if ARM64_HYP_OFFSET_LOW and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK -alternative_endif +alternative_else_nop_endif .endm #else diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 31b73227b41f..ba62df8c6e35 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -214,7 +214,7 @@ static inline void *phys_to_virt(phys_addr_t x) #ifndef CONFIG_SPARSEMEM_VMEMMAP #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #else #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) #define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) @@ -222,11 +222,15 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) -#define virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ +#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ + PHYS_OFFSET) >> PAGE_SHIFT) #endif #endif +#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) +#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ + _virt_addr_valid(kaddr)) + #include <asm-generic/memory_model.h> #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index b1892a0dbcb0..a50185375f09 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -27,22 +27,17 @@ #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> #include <asm/pgtable.h> +#include <asm/sysreg.h> #include <asm/tlbflush.h> -#ifdef CONFIG_PID_IN_CONTEXTIDR -static inline void contextidr_thread_switch(struct task_struct *next) -{ - asm( - " msr contextidr_el1, %0\n" - " isb" - : - : "r" (task_pid_nr(next))); -} -#else static inline void contextidr_thread_switch(struct task_struct *next) { + if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) + return; + + write_sysreg(task_pid_nr(next), contextidr_el1); + isb(); } -#endif /* * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. @@ -51,11 +46,8 @@ static inline void cpu_set_reserved_ttbr0(void) { unsigned long ttbr = virt_to_phys(empty_zero_page); - asm( - " msr ttbr0_el1, %0 // set TTBR0\n" - " isb" - : - : "r" (ttbr)); + write_sysreg(ttbr, ttbr0_el1); + isb(); } /* @@ -81,13 +73,11 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) if (!__cpu_uses_extended_idmap()) return; - asm volatile ( - " mrs %0, tcr_el1 ;" - " bfi %0, %1, %2, %3 ;" - " msr tcr_el1, %0 ;" - " isb" - : "=&r" (tcr) - : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); + tcr = read_sysreg(tcr_el1); + tcr &= ~TCR_T0SZ_MASK; + tcr |= t0sz << TCR_T0SZ_OFFSET; + write_sysreg(tcr, tcr_el1); + isb(); } #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index c3ae239db3ee..eb0c2bd90de9 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -208,6 +208,7 @@ #define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) #define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) #define TCR_TxSZ_WIDTH 6 +#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET) #define TCR_IRGN0_SHIFT 8 #define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 39f5252673f7..2142c7726e76 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -70,12 +70,13 @@ #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC +#define __P100 PAGE_EXECONLY #define __P101 PAGE_READONLY_EXEC #define __P110 PAGE_COPY_EXEC #define __P111 PAGE_COPY_EXEC @@ -84,7 +85,7 @@ #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC +#define __S100 PAGE_EXECONLY #define __S101 PAGE_READONLY_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index e20bd431184a..ffbb9a520563 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -73,7 +73,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) -#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) +#define pte_ng(pte) (!!(pte_val(pte) & PTE_NG)) #ifdef CONFIG_ARM64_HW_AFDBM #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) @@ -84,8 +84,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -#define pte_valid_not_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) +#define pte_valid_global(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_NG)) == PTE_VALID) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) @@ -155,6 +155,16 @@ static inline pte_t pte_mknoncont(pte_t pte) return clear_pte_bit(pte, __pgprot(PTE_CONT)); } +static inline pte_t pte_clear_rdonly(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(PTE_RDONLY)); +} + +static inline pte_t pte_mkpresent(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(PTE_VALID)); +} + static inline pmd_t pmd_mkcont(pmd_t pmd) { return __pmd(pmd_val(pmd) | PMD_SECT_CONT); @@ -168,7 +178,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) { + if (pte_valid_global(pte)) { dsb(ishst); isb(); } @@ -202,7 +212,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_val(pte) &= ~PTE_RDONLY; else pte_val(pte) |= PTE_RDONLY; - if (pte_user(pte) && pte_exec(pte) && !pte_special(pte)) + if (pte_ng(pte) && pte_exec(pte) && !pte_special(pte)) __sync_icache_dcache(pte, addr); } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index ace0a96e7d6e..df2e53d3a969 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -37,7 +37,6 @@ #include <asm/ptrace.h> #include <asm/types.h> -#ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 @@ -49,7 +48,6 @@ extern phys_addr_t arm64_dma_phys_limit; #define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) -#endif /* __KERNEL__ */ struct debug_info { /* Have we suspended stepping by a debugger? */ diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h new file mode 100644 index 000000000000..4e7e7067afdb --- /dev/null +++ b/arch/arm64/include/asm/sections.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2016 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SECTIONS_H +#define __ASM_SECTIONS_H + +#include <asm-generic/sections.h> + +extern char __alt_instructions[], __alt_instructions_end[]; +extern char __exception_text_start[], __exception_text_end[]; +extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; +extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; +extern char __hyp_text_start[], __hyp_text_end[]; +extern char __idmap_text_start[], __idmap_text_end[]; +extern char __irqentry_text_start[], __irqentry_text_end[]; +extern char __mmuoff_data_start[], __mmuoff_data_end[]; + +#endif /* __ASM_SECTIONS_H */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 89206b568cd4..cae331d553f8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -66,8 +66,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " stxr %w1, %w0, %2\n" -" nop\n" -" nop\n", + __nops(2), /* LSE atomics */ " mov %w1, %w0\n" " cas %w0, %w0, %2\n" @@ -99,9 +98,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) /* LSE atomics */ " mov %w2, %w5\n" " ldadda %w2, %w0, %3\n" -" nop\n" -" nop\n" -" nop\n" + __nops(3) ) /* Did we get the lock? */ @@ -165,8 +162,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) " stlrh %w1, %0", /* LSE atomics */ " mov %w1, #1\n" - " nop\n" - " staddlh %w1, %0") + " staddlh %w1, %0\n" + __nops(1)) : "=Q" (lock->owner), "=&r" (tmp) : : "memory"); @@ -212,7 +209,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " cbnz %w0, 1b\n" " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" - " nop", + __nops(1), /* LSE atomics */ "1: mov %w0, wzr\n" "2: casa %w0, %w2, %1\n" @@ -241,8 +238,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) /* LSE atomics */ " mov %w0, wzr\n" " casa %w0, %w2, %1\n" - " nop\n" - " nop") + __nops(2)) : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) : "memory"); @@ -290,8 +286,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " add %w0, %w0, #1\n" " tbnz %w0, #31, 1b\n" " stxr %w1, %w0, %2\n" - " nop\n" - " cbnz %w1, 2b", + " cbnz %w1, 2b\n" + __nops(1), /* LSE atomics */ "1: wfe\n" "2: ldxr %w0, %2\n" @@ -317,9 +313,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " cbnz %w1, 1b", /* LSE atomics */ " movn %w0, #0\n" - " nop\n" - " nop\n" - " staddl %w0, %2") + " staddl %w0, %2\n" + __nops(2)) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : : "memory"); @@ -344,7 +339,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) " tbnz %w1, #31, 1f\n" " casa %w0, %w1, %2\n" " sbc %w1, %w1, %w0\n" - " nop\n" + __nops(1) "1:") : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 024d623f662e..b8a313fd7a09 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -47,4 +47,7 @@ int swsusp_arch_resume(void); int arch_hibernation_header_save(void *addr, unsigned int max_size); int arch_hibernation_header_restore(void *addr); +/* Used to resume on the CPU we hibernated on */ +int hibernate_resume_nonboot_cpu_disable(void); + #endif diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index cc06794b7346..e8d46e8e6079 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -100,6 +100,7 @@ /* SCTLR_EL1 specific flags. */ #define SCTLR_EL1_UCI (1 << 26) #define SCTLR_EL1_SPAN (1 << 23) +#define SCTLR_EL1_UCT (1 << 15) #define SCTLR_EL1_SED (1 << 8) #define SCTLR_EL1_CP15BEN (1 << 5) @@ -253,16 +254,6 @@ asm( " .endm\n" ); -static inline void config_sctlr_el1(u32 clear, u32 set) -{ - u32 val; - - asm volatile("mrs %0, sctlr_el1" : "=r" (val)); - val &= ~clear; - val |= set; - asm volatile("msr sctlr_el1, %0" : : "r" (val)); -} - /* * Unlike read_cpuid, calls to read_sysreg are never expected to be * optimized away or replaced with synthetic values. @@ -273,12 +264,41 @@ static inline void config_sctlr_el1(u32 clear, u32 set) __val; \ }) +/* + * The "Z" constraint normally means a zero immediate, but when combined with + * the "%x0" template means XZR. + */ #define write_sysreg(v, r) do { \ u64 __val = (u64)v; \ - asm volatile("msr " __stringify(r) ", %0" \ - : : "r" (__val)); \ + asm volatile("msr " __stringify(r) ", %x0" \ + : : "rZ" (__val)); \ +} while (0) + +/* + * For registers without architectural names, or simply unsupported by + * GAS. + */ +#define read_sysreg_s(r) ({ \ + u64 __val; \ + asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define write_sysreg_s(v, r) do { \ + u64 __val = (u64)v; \ + asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \ } while (0) +static inline void config_sctlr_el1(u32 clear, u32 set) +{ + u32 val; + + val = read_sysreg(sctlr_el1); + val &= ~clear; + val |= set; + write_sysreg(val, sctlr_el1); +} + #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 57f110bea6a8..bc812435bc76 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -56,12 +56,6 @@ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); __show_ratelimited; \ }) -#define UDBG_UNDEFINED (1 << 0) -#define UDBG_SYSCALL (1 << 1) -#define UDBG_BADABORT (1 << 2) -#define UDBG_SEGV (1 << 3) -#define UDBG_BUS (1 << 4) - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SYSTEM_MISC_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index abd64bd1f6d9..e9ea5a6bd449 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -75,6 +75,9 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__; /* * struct thread_info can be accessed directly via sp_el0. + * + * We don't use read_sysreg() as we want the compiler to cache the value where + * possible. */ static inline struct thread_info *current_thread_info(void) { diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b460ae28e346..deab52374119 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -25,6 +25,24 @@ #include <asm/cputype.h> /* + * Raw TLBI operations. + * + * Where necessary, use the __tlbi() macro to avoid asm() + * boilerplate. Drivers and most kernel code should use the TLB + * management routines in preference to the macro below. + * + * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending + * on whether a particular TLBI operation takes an argument or + * not. The macros handles invoking the asm with or without the + * register argument as appropriate. + */ +#define __TLBI_0(op, arg) asm ("tlbi " #op) +#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) +#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) + +#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) + +/* * TLB Management * ============== * @@ -66,7 +84,7 @@ static inline void local_flush_tlb_all(void) { dsb(nshst); - asm("tlbi vmalle1"); + __tlbi(vmalle1); dsb(nsh); isb(); } @@ -74,7 +92,7 @@ static inline void local_flush_tlb_all(void) static inline void flush_tlb_all(void) { dsb(ishst); - asm("tlbi vmalle1is"); + __tlbi(vmalle1is); dsb(ish); isb(); } @@ -84,7 +102,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) unsigned long asid = ASID(mm) << 48; dsb(ishst); - asm("tlbi aside1is, %0" : : "r" (asid)); + __tlbi(aside1is, asid); dsb(ish); } @@ -94,7 +112,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); dsb(ishst); - asm("tlbi vale1is, %0" : : "r" (addr)); + __tlbi(vale1is, addr); dsb(ish); } @@ -122,9 +140,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { if (last_level) - asm("tlbi vale1is, %0" : : "r"(addr)); + __tlbi(vale1is, addr); else - asm("tlbi vae1is, %0" : : "r"(addr)); + __tlbi(vae1is, addr); } dsb(ish); } @@ -149,7 +167,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) - asm("tlbi vaae1is, %0" : : "r"(addr)); + __tlbi(vaae1is, addr); dsb(ish); isb(); } @@ -163,7 +181,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, { unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); - asm("tlbi vae1is, %0" : : "r" (addr)); + __tlbi(vae1is, addr); dsb(ish); } diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 9cd03f3e812f..02e9035b0685 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -19,6 +19,7 @@ #define __ASM_TRAP_H #include <linux/list.h> +#include <asm/sections.h> struct pt_regs; @@ -39,9 +40,6 @@ void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); #ifdef CONFIG_FUNCTION_GRAPH_TRACER static inline int __in_irqentry_text(unsigned long ptr) { - extern char __irqentry_text_start[]; - extern char __irqentry_text_end[]; - return ptr >= (unsigned long)&__irqentry_text_start && ptr < (unsigned long)&__irqentry_text_end; } @@ -54,8 +52,6 @@ static inline int __in_irqentry_text(unsigned long ptr) static inline int in_exception_text(unsigned long ptr) { - extern char __exception_text_start[]; - extern char __exception_text_end[]; int in; in = ptr >= (unsigned long)&__exception_text_start && diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 1788545f25bc..fea10736b11f 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -45,6 +45,8 @@ #ifndef __ASSEMBLY__ #include <asm/ptrace.h> +#include <asm/sections.h> +#include <asm/sysreg.h> /* * __boot_cpu_mode records what mode CPUs were booted in. @@ -75,10 +77,7 @@ static inline bool is_hyp_mode_mismatched(void) static inline bool is_kernel_in_hyp_mode(void) { - u64 el; - - asm("mrs %0, CurrentEL" : "=r" (el)); - return el == CurrentEL_EL2; + return read_sysreg(CurrentEL) == CurrentEL_EL2; } #ifdef CONFIG_ARM64_VHE @@ -87,14 +86,6 @@ extern void verify_cpu_run_el(void); static inline void verify_cpu_run_el(void) {} #endif -/* The section containing the hypervisor idmap text */ -extern char __hyp_idmap_text_start[]; -extern char __hyp_idmap_text_end[]; - -/* The section containing the hypervisor text */ -extern char __hyp_text_start[]; -extern char __hyp_text_end[]; - #endif /* __ASSEMBLY__ */ #endif /* ! __ASM__VIRT_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 14f7b651c787..7d66bbaafc0c 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -10,6 +10,8 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg +CFLAGS_setup.o = -DUTS_MACHINE='"$(UTS_MACHINE)"' + # Object file lists. arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c index f85149cc7c71..f01fab637dab 100644 --- a/arch/arm64/kernel/acpi_numa.c +++ b/arch/arm64/kernel/acpi_numa.c @@ -105,8 +105,10 @@ int __init arm64_acpi_numa_init(void) int ret; ret = acpi_numa_init(); - if (ret) + if (ret) { + pr_info("Failed to initialise from firmware\n"); return ret; + } return srat_disabled() ? -EINVAL : 0; } diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d2ee1b21a10d..06d650f61da7 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -25,14 +25,13 @@ #include <asm/alternative.h> #include <asm/cpufeature.h> #include <asm/insn.h> +#include <asm/sections.h> #include <linux/stop_machine.h> #define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f) #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; - struct alt_region { struct alt_instr *begin; struct alt_instr *end; @@ -59,6 +58,8 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) BUG(); } +#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) + static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr) { u32 insn; @@ -80,6 +81,25 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr) offset = target - (unsigned long)insnptr; insn = aarch64_set_branch_offset(insn, offset); } + } else if (aarch64_insn_is_adrp(insn)) { + s32 orig_offset, new_offset; + unsigned long target; + + /* + * If we're replacing an adrp instruction, which uses PC-relative + * immediate addressing, adjust the offset to reflect the new + * PC. adrp operates on 4K aligned addresses. + */ + orig_offset = aarch64_insn_adrp_get_offset(insn); + target = align_down(altinsnptr, SZ_4K) + orig_offset; + new_offset = target - align_down(insnptr, SZ_4K); + insn = aarch64_insn_adrp_set_offset(insn, new_offset); + } else if (aarch64_insn_uses_literal(insn)) { + /* + * Disallow patching unhandled instructions using PC relative + * literal addresses + */ + BUG(); } return insn; @@ -124,8 +144,8 @@ static int __apply_alternatives_multi_stop(void *unused) { static int patched = 0; struct alt_region region = { - .begin = __alt_instructions, - .end = __alt_instructions_end, + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, }; /* We always have a CPU 0 at this point (__init) */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 05070b72fc28..4a2f0f0fef32 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -23,6 +23,7 @@ #include <linux/dma-mapping.h> #include <linux/kvm_host.h> #include <linux/suspend.h> +#include <asm/cpufeature.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/smp_plat.h> @@ -145,5 +146,6 @@ int main(void) DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); + DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val)); return 0; } diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index b8629d52fba9..9617301f76b5 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -39,7 +39,7 @@ static inline enum cache_type get_cache_type(int level) if (level > MAX_CACHE_LEVEL) return CACHE_TYPE_NOCACHE; - asm volatile ("mrs %x0, clidr_el1" : "=r" (clidr)); + clidr = read_sysreg(clidr_el1); return CLIDR_CTYPE(clidr, level); } @@ -55,11 +55,9 @@ u64 __attribute_const__ cache_get_ccsidr(u64 csselr) WARN_ON(preemptible()); - /* Put value into CSSELR */ - asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); + write_sysreg(csselr, csselr_el1); isb(); - /* Read result out of CCSIDR */ - asm volatile("mrs %x0, ccsidr_el1" : "=r" (ccsidr)); + ccsidr = read_sysreg(ccsidr_el1); return ccsidr; } diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 82b0fc2e637b..0150394f4cab 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -30,6 +30,21 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) entry->midr_range_max); } +static bool +has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, + int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != + (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); +} + +static void cpu_enable_trap_ctr_access(void *__unused) +{ + /* Clear SCTLR_EL1.UCT */ + config_sctlr_el1(SCTLR_EL1_UCT, 0); +} + #define MIDR_RANGE(model, min, max) \ .def_scope = SCOPE_LOCAL_CPU, \ .matches = is_affected_midr_range, \ @@ -108,6 +123,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { }, #endif { + .desc = "Mismatched cache line size", + .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, + .matches = has_mismatched_cache_line_size, + .def_scope = SCOPE_LOCAL_CPU, + .enable = cpu_enable_trap_ctr_access, + }, + { } }; @@ -116,7 +138,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { * and the related information is freed soon after. If the new CPU requires * an errata not detected at boot, fail this CPU. */ -void verify_local_cpu_errata(void) +void verify_local_cpu_errata_workarounds(void) { const struct arm64_cpu_capabilities *caps = arm64_errata; @@ -131,7 +153,7 @@ void verify_local_cpu_errata(void) } } -void check_local_cpu_errata(void) +void update_cpu_errata_workarounds(void) { update_cpu_capabilities(arm64_errata, "enabling workaround for"); } diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index c7cfb8fe06f9..e137ceaf5016 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -17,6 +17,7 @@ */ #include <linux/acpi.h> +#include <linux/cache.h> #include <linux/errno.h> #include <linux/of.h> #include <linux/string.h> @@ -28,7 +29,7 @@ extern const struct cpu_operations smp_spin_table_ops; extern const struct cpu_operations acpi_parking_protocol_ops; extern const struct cpu_operations cpu_psci_ops; -const struct cpu_operations *cpu_ops[NR_CPUS]; +const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { &smp_spin_table_ops, diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 62272eac1352..d577f263cc4a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -46,6 +46,9 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); +DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); +EXPORT_SYMBOL(cpu_hwcap_keys); + #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ .sign = SIGNED, \ @@ -74,7 +77,7 @@ static bool __maybe_unused cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused); -static struct arm64_ftr_bits ftr_id_aa64isar0[] = { +static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), @@ -87,7 +90,7 @@ static struct arm64_ftr_bits ftr_id_aa64isar0[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_aa64pfr0[] = { +static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), @@ -101,7 +104,7 @@ static struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { +static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), @@ -119,7 +122,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { +static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), @@ -130,7 +133,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { +static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0), @@ -139,7 +142,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_ctr[] = { +static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ @@ -147,15 +150,21 @@ static struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will - * make use of *minLine + * make use of *minLine. + * If we have differing I-cache policies, report it as the weakest - AIVIVT. */ - ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ + ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_mmfr0[] = { +struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { + .name = "SYS_CTR_EL0", + .ftr_bits = ftr_ctr +}; + +static const struct arm64_ftr_bits ftr_id_mmfr0[] = { S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ @@ -167,7 +176,7 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { +static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), @@ -178,14 +187,14 @@ static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_mvfr2[] = { +static const struct arm64_ftr_bits ftr_mvfr2[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_dczid[] = { +static const struct arm64_ftr_bits ftr_dczid[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ @@ -193,7 +202,7 @@ static struct arm64_ftr_bits ftr_dczid[] = { }; -static struct arm64_ftr_bits ftr_id_isar5[] = { +static const struct arm64_ftr_bits ftr_id_isar5[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), @@ -204,14 +213,14 @@ static struct arm64_ftr_bits ftr_id_isar5[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_mmfr4[] = { +static const struct arm64_ftr_bits ftr_id_mmfr4[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_pfr0[] = { +static const struct arm64_ftr_bits ftr_id_pfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ @@ -220,7 +229,7 @@ static struct arm64_ftr_bits ftr_id_pfr0[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_id_dfr0[] = { +static const struct arm64_ftr_bits ftr_id_dfr0[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), @@ -238,7 +247,7 @@ static struct arm64_ftr_bits ftr_id_dfr0[] = { * 0. Covers the following 32bit registers: * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] */ -static struct arm64_ftr_bits ftr_generic_32bits[] = { +static const struct arm64_ftr_bits ftr_generic_32bits[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), @@ -250,29 +259,32 @@ static struct arm64_ftr_bits ftr_generic_32bits[] = { ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_generic[] = { +static const struct arm64_ftr_bits ftr_generic[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_generic32[] = { +static const struct arm64_ftr_bits ftr_generic32[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0), ARM64_FTR_END, }; -static struct arm64_ftr_bits ftr_aa64raz[] = { +static const struct arm64_ftr_bits ftr_aa64raz[] = { ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), ARM64_FTR_END, }; -#define ARM64_FTR_REG(id, table) \ - { \ - .sys_id = id, \ +#define ARM64_FTR_REG(id, table) { \ + .sys_id = id, \ + .reg = &(struct arm64_ftr_reg){ \ .name = #id, \ .ftr_bits = &((table)[0]), \ - } + }} -static struct arm64_ftr_reg arm64_ftr_regs[] = { +static const struct __ftr_reg_entry { + u32 sys_id; + struct arm64_ftr_reg *reg; +} arm64_ftr_regs[] = { /* Op1 = 0, CRn = 0, CRm = 1 */ ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), @@ -315,7 +327,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = { ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), /* Op1 = 3, CRn = 0, CRm = 0 */ - ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr), + { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 }, ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), /* Op1 = 3, CRn = 14, CRm = 0 */ @@ -324,7 +336,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = { static int search_cmp_ftr_reg(const void *id, const void *regp) { - return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id; + return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id; } /* @@ -339,14 +351,20 @@ static int search_cmp_ftr_reg(const void *id, const void *regp) */ static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) { - return bsearch((const void *)(unsigned long)sys_id, + const struct __ftr_reg_entry *ret; + + ret = bsearch((const void *)(unsigned long)sys_id, arm64_ftr_regs, ARRAY_SIZE(arm64_ftr_regs), sizeof(arm64_ftr_regs[0]), search_cmp_ftr_reg); + if (ret) + return ret->reg; + return NULL; } -static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val) +static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, + s64 ftr_val) { u64 mask = arm64_ftr_mask(ftrp); @@ -355,7 +373,8 @@ static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val return reg; } -static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur) +static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, + s64 cur) { s64 ret = 0; @@ -376,27 +395,13 @@ static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur) return ret; } -static int __init sort_cmp_ftr_regs(const void *a, const void *b) -{ - return ((const struct arm64_ftr_reg *)a)->sys_id - - ((const struct arm64_ftr_reg *)b)->sys_id; -} - -static void __init swap_ftr_regs(void *a, void *b, int size) -{ - struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a; - *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b; - *(struct arm64_ftr_reg *)b = tmp; -} - static void __init sort_ftr_regs(void) { - /* Keep the array sorted so that we can do the binary search */ - sort(arm64_ftr_regs, - ARRAY_SIZE(arm64_ftr_regs), - sizeof(arm64_ftr_regs[0]), - sort_cmp_ftr_regs, - swap_ftr_regs); + int i; + + /* Check that the array is sorted so that we can do the binary search */ + for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++) + BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); } /* @@ -407,7 +412,7 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) { u64 val = 0; u64 strict_mask = ~0x0ULL; - struct arm64_ftr_bits *ftrp; + const struct arm64_ftr_bits *ftrp; struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); BUG_ON(!reg); @@ -464,7 +469,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) { - struct arm64_ftr_bits *ftrp; + const struct arm64_ftr_bits *ftrp; for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); @@ -1004,23 +1009,33 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) * cannot do anything to fix it up and could cause unexpected failures. So * we park the CPU. */ -void verify_local_cpu_capabilities(void) +static void verify_local_cpu_capabilities(void) { + verify_local_cpu_errata_workarounds(); + verify_local_cpu_features(arm64_features); + verify_local_elf_hwcaps(arm64_elf_hwcaps); + if (system_supports_32bit_el0()) + verify_local_elf_hwcaps(compat_elf_hwcaps); +} +void check_local_cpu_capabilities(void) +{ + /* + * All secondary CPUs should conform to the early CPU features + * in use by the kernel based on boot CPU. + */ check_early_cpu_features(); /* - * If we haven't computed the system capabilities, there is nothing - * to verify. + * If we haven't finalised the system capabilities, this CPU gets + * a chance to update the errata work arounds. + * Otherwise, this CPU should verify that it has all the system + * advertised capabilities. */ if (!sys_caps_initialised) - return; - - verify_local_cpu_errata(); - verify_local_cpu_features(arm64_features); - verify_local_elf_hwcaps(arm64_elf_hwcaps); - if (system_supports_32bit_el0()) - verify_local_elf_hwcaps(compat_elf_hwcaps); + update_cpu_errata_workarounds(); + else + verify_local_cpu_capabilities(); } static void __init setup_feature_capabilities(void) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index ed1b84fe6925..b3d5b3e8fbcb 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -363,8 +363,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) } cpuinfo_detect_icache_policy(info); - - check_local_cpu_errata(); } void cpuinfo_store_cpu(void) diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 91fff48d0f57..73ae90ef434c 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -46,16 +46,14 @@ static void mdscr_write(u32 mdscr) { unsigned long flags; local_dbg_save(flags); - asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); + write_sysreg(mdscr, mdscr_el1); local_dbg_restore(flags); } NOKPROBE_SYMBOL(mdscr_write); static u32 mdscr_read(void) { - u32 mdscr; - asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr)); - return mdscr; + return read_sysreg(mdscr_el1); } NOKPROBE_SYMBOL(mdscr_read); @@ -132,36 +130,18 @@ NOKPROBE_SYMBOL(disable_debug_monitors); /* * OS lock clearing. */ -static void clear_os_lock(void *unused) +static int clear_os_lock(unsigned int cpu) { - asm volatile("msr oslar_el1, %0" : : "r" (0)); -} - -static int os_lock_notify(struct notifier_block *self, - unsigned long action, void *data) -{ - if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) - clear_os_lock(NULL); - return NOTIFY_OK; + write_sysreg(0, oslar_el1); + isb(); + return 0; } -static struct notifier_block os_lock_nb = { - .notifier_call = os_lock_notify, -}; - static int debug_monitors_init(void) { - cpu_notifier_register_begin(); - - /* Clear the OS lock. */ - on_each_cpu(clear_os_lock, NULL, 1); - isb(); - - /* Register hotplug handler. */ - __register_cpu_notifier(&os_lock_nb); - - cpu_notifier_register_done(); - return 0; + return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, + "CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING", + clear_os_lock, NULL); } postcore_initcall(debug_monitors_init); @@ -254,7 +234,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr, return 0; if (user_mode(regs)) { - send_user_sigtrap(TRAP_HWBKPT); + send_user_sigtrap(TRAP_TRACE); /* * ptrace will disable single step unless explicitly @@ -382,7 +362,7 @@ NOKPROBE_SYMBOL(aarch32_break_handler); static int __init debug_traps_init(void) { hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, - TRAP_HWBKPT, "single-step handler"); + TRAP_TRACE, "single-step handler"); hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, TRAP_BRKPT, "ptrace BRK handler"); return 0; @@ -435,8 +415,10 @@ NOKPROBE_SYMBOL(kernel_active_single_step); /* ptrace API */ void user_enable_single_step(struct task_struct *task) { - set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); - set_regs_spsr_ss(task_pt_regs(task)); + struct thread_info *ti = task_thread_info(task); + + if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) + set_regs_spsr_ss(task_pt_regs(task)); } NOKPROBE_SYMBOL(user_enable_single_step); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 441420ca7d08..223d54a4d66b 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -104,7 +104,7 @@ str x20, [sp, #S_ORIG_ADDR_LIMIT] mov x20, #TASK_SIZE_64 str x20, [tsk, #TI_ADDR_LIMIT] - ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO) + /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ mrs x22, elr_el1 mrs x23, spsr_el1 @@ -150,13 +150,7 @@ ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 #ifdef CONFIG_ARM64_ERRATUM_845719 -alternative_if_not ARM64_WORKAROUND_845719 - nop - nop -#ifdef CONFIG_PID_IN_CONTEXTIDR - nop -#endif -alternative_else +alternative_if ARM64_WORKAROUND_845719 tbz x22, #4, 1f #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 @@ -165,7 +159,7 @@ alternative_else msr contextidr_el1, xzr #endif 1: -alternative_endif +alternative_else_nop_endif #endif .endif msr elr_el1, x21 // set up the return data @@ -707,18 +701,13 @@ ret_fast_syscall_trace: * Ok, we need to do extra processing, enter the slow path. */ work_pending: - tbnz x1, #TIF_NEED_RESCHED, work_resched - /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ mov x0, sp // 'regs' - enable_irq // enable interrupts for do_notify_resume() bl do_notify_resume - b ret_to_user -work_resched: #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off // the IRQs are off here, inform the tracing code + bl trace_hardirqs_on // enabled while in userspace #endif - bl schedule - + ldr x1, [tsk, #TI_FLAGS] // re-check for single-step + b finish_ret_to_user /* * "slow" syscall return path. */ @@ -727,6 +716,7 @@ ret_to_user: ldr x1, [tsk, #TI_FLAGS] and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending +finish_ret_to_user: enable_step_tsk x1, x2 kernel_exit 0 ENDPROC(ret_to_user) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 3e7b050e99dc..427f6d3f084c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -208,13 +208,23 @@ efi_header_end: __INIT + /* + * The following callee saved general purpose registers are used on the + * primary lowlevel boot path: + * + * Register Scope Purpose + * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 + * x23 stext() .. start_kernel() physical misalignment/KASLR offset + * x28 __create_page_tables() callee preserved temp register + * x19/x20 __primary_switch() callee preserved temp registers + */ ENTRY(stext) bl preserve_boot_args - bl el2_setup // Drop to EL1, w20=cpu_boot_mode - adrp x24, __PHYS_OFFSET - and x23, x24, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 + bl el2_setup // Drop to EL1, w0=cpu_boot_mode + adrp x23, __PHYS_OFFSET + and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag - bl __create_page_tables // x25=TTBR0, x26=TTBR1 + bl __create_page_tables /* * The following calls CPU setup code, see arch/arm64/mm/proc.S for * details. @@ -222,9 +232,7 @@ ENTRY(stext) * the TCR will have been set. */ bl __cpu_setup // initialise processor - adr_l x27, __primary_switch // address to jump to after - // MMU has been enabled - b __enable_mmu + b __primary_switch ENDPROC(stext) /* @@ -311,23 +319,21 @@ ENDPROC(preserve_boot_args) * been enabled */ __create_page_tables: - adrp x25, idmap_pg_dir - adrp x26, swapper_pg_dir mov x28, lr /* * Invalidate the idmap and swapper page tables to avoid potential * dirty cache lines being evicted. */ - mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE + adrp x0, idmap_pg_dir + adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE bl __inval_cache_range /* * Clear the idmap and swapper page tables. */ - mov x0, x25 - add x6, x26, #SWAPPER_DIR_SIZE + adrp x0, idmap_pg_dir + adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE 1: stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16 @@ -340,7 +346,7 @@ __create_page_tables: /* * Create the identity mapping. */ - mov x0, x25 // idmap_pg_dir + adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) #ifndef CONFIG_ARM64_VA_BITS_48 @@ -390,7 +396,7 @@ __create_page_tables: /* * Map the kernel image (starting with PHYS_OFFSET). */ - mov x0, x26 // swapper_pg_dir + adrp x0, swapper_pg_dir mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) add x5, x5, x23 // add KASLR displacement create_pgd_entry x0, x5, x3, x6 @@ -405,8 +411,8 @@ __create_page_tables: * accesses (MMU disabled), invalidate the idmap and swapper page * tables again to remove any speculatively loaded cache lines. */ - mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE + adrp x0, idmap_pg_dir + adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE dmb sy bl __inval_cache_range @@ -416,14 +422,27 @@ ENDPROC(__create_page_tables) /* * The following fragment of code is executed with the MMU enabled. + * + * x0 = __PHYS_OFFSET */ - .set initial_sp, init_thread_union + THREAD_START_SP __primary_switched: - mov x28, lr // preserve LR + adrp x4, init_thread_union + add sp, x4, #THREAD_SIZE + msr sp_el0, x4 // Save thread_info + adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb + stp xzr, x30, [sp, #-16]! + mov x29, sp + + str_l x21, __fdt_pointer, x5 // Save FDT pointer + + ldr_l x4, kimage_vaddr // Save the offset between + sub x4, x4, x0 // the kernel virtual and + str_l x4, kimage_voffset, x5 // physical mappings + // Clear BSS adr_l x0, __bss_start mov x1, xzr @@ -432,17 +451,6 @@ __primary_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW - adr_l sp, initial_sp, x4 - mov x4, sp - and x4, x4, #~(THREAD_SIZE - 1) - msr sp_el0, x4 // Save thread_info - str_l x21, __fdt_pointer, x5 // Save FDT pointer - - ldr_l x4, kimage_vaddr // Save the offset between - sub x4, x4, x24 // the kernel virtual and - str_l x4, kimage_voffset, x5 // physical mappings - - mov x29, #0 #ifdef CONFIG_KASAN bl kasan_early_init #endif @@ -454,8 +462,8 @@ __primary_switched: bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset - ret x28 // we must enable KASLR, return - // to __enable_mmu() + ldp x29, x30, [sp], #16 // we must enable KASLR, return + ret // to __primary_switch() 0: #endif b start_kernel @@ -465,7 +473,7 @@ ENDPROC(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".text","ax" + .section ".idmap.text","ax" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET @@ -490,7 +498,7 @@ CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 msr sctlr_el1, x0 - mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 + mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 isb ret @@ -586,7 +594,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems cbz x2, install_el2_stub - mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 + mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 isb ret @@ -601,7 +609,7 @@ install_el2_stub: PSR_MODE_EL1h) msr spsr_el2, x0 msr elr_el2, lr - mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 + mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 eret ENDPROC(el2_setup) @@ -611,27 +619,39 @@ ENDPROC(el2_setup) */ set_cpu_boot_mode_flag: adr_l x1, __boot_cpu_mode - cmp w20, #BOOT_CPU_MODE_EL2 + cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 -1: str w20, [x1] // This CPU has booted in EL1 +1: str w0, [x1] // This CPU has booted in EL1 dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret ENDPROC(set_cpu_boot_mode_flag) /* + * These values are written with the MMU off, but read with the MMU on. + * Writers will invalidate the corresponding address, discarding up to a + * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures + * sufficient alignment that the CWG doesn't overlap another section. + */ + .pushsection ".mmuoff.data.write", "aw" +/* * We need to find out the CPU boot mode long after boot, so we need to * store it in a writable variable. * * This is not in .bss, because we set it sufficiently early that the boot-time * zeroing of .bss would clobber it. */ - .pushsection .data..cacheline_aligned - .align L1_CACHE_SHIFT ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL1 +/* + * The booting CPU updates the failed status @__early_cpu_boot_status, + * with MMU turned off. + */ +ENTRY(__early_cpu_boot_status) + .long 0 + .popsection /* @@ -639,7 +659,7 @@ ENTRY(__boot_cpu_mode) * cores are held until we're ready for them to initialise. */ ENTRY(secondary_holding_pen) - bl el2_setup // Drop to EL1, w20=cpu_boot_mode + bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK @@ -666,12 +686,10 @@ secondary_startup: /* * Common entry point for secondary CPUs. */ - adrp x25, idmap_pg_dir - adrp x26, swapper_pg_dir bl __cpu_setup // initialise processor - - adr_l x27, __secondary_switch // address to jump to after enabling the MMU - b __enable_mmu + bl __enable_mmu + ldr x8, =__secondary_switched + br x8 ENDPROC(secondary_startup) __secondary_switched: @@ -706,33 +724,27 @@ ENDPROC(__secondary_switched) dc ivac, \tmp1 // Invalidate potentially stale cache line .endm - .pushsection .data..cacheline_aligned - .align L1_CACHE_SHIFT -ENTRY(__early_cpu_boot_status) - .long 0 - .popsection - /* * Enable the MMU. * * x0 = SCTLR_EL1 value for turning on the MMU. - * x27 = *virtual* address to jump to upon completion * - * Other registers depend on the function called upon completion. + * Returns to the caller via x30/lr. This requires the caller to be covered + * by the .idmap.text section. * * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ - .section ".idmap.text", "ax" ENTRY(__enable_mmu) - mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED b.ne __no_granule_support update_early_cpu_boot_status 0, x1, x2 - msr ttbr0_el1, x25 // load TTBR0 - msr ttbr1_el1, x26 // load TTBR1 + adrp x1, idmap_pg_dir + adrp x2, swapper_pg_dir + msr ttbr0_el1, x1 // load TTBR0 + msr ttbr1_el1, x2 // load TTBR1 isb msr sctlr_el1, x0 isb @@ -744,29 +756,7 @@ ENTRY(__enable_mmu) ic iallu dsb nsh isb -#ifdef CONFIG_RANDOMIZE_BASE - mov x19, x0 // preserve new SCTLR_EL1 value - blr x27 - - /* - * If we return here, we have a KASLR displacement in x23 which we need - * to take into account by discarding the current kernel mapping and - * creating a new one. - */ - msr sctlr_el1, x22 // disable the MMU - isb - bl __create_page_tables // recreate kernel mapping - - tlbi vmalle1 // Remove any stale TLB entries - dsb nsh - - msr sctlr_el1, x19 // re-enable the MMU - isb - ic iallu // flush instructions fetched - dsb nsh // via old mapping - isb -#endif - br x27 + ret ENDPROC(__enable_mmu) __no_granule_support: @@ -775,11 +765,11 @@ __no_granule_support: 1: wfe wfi - b 1b + b 1b ENDPROC(__no_granule_support) -__primary_switch: #ifdef CONFIG_RELOCATABLE +__relocate_kernel: /* * Iterate over each entry in the relocation table, and apply the * relocations in place. @@ -801,14 +791,46 @@ __primary_switch: add x13, x13, x23 // relocate str x13, [x11, x23] b 0b +1: ret +ENDPROC(__relocate_kernel) +#endif -1: +__primary_switch: +#ifdef CONFIG_RANDOMIZE_BASE + mov x19, x0 // preserve new SCTLR_EL1 value + mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value #endif + + bl __enable_mmu +#ifdef CONFIG_RELOCATABLE + bl __relocate_kernel +#ifdef CONFIG_RANDOMIZE_BASE ldr x8, =__primary_switched - br x8 -ENDPROC(__primary_switch) + adrp x0, __PHYS_OFFSET + blr x8 -__secondary_switch: - ldr x8, =__secondary_switched + /* + * If we return here, we have a KASLR displacement in x23 which we need + * to take into account by discarding the current kernel mapping and + * creating a new one. + */ + msr sctlr_el1, x20 // disable the MMU + isb + bl __create_page_tables // recreate kernel mapping + + tlbi vmalle1 // Remove any stale TLB entries + dsb nsh + + msr sctlr_el1, x19 // re-enable the MMU + isb + ic iallu // flush instructions fetched + dsb nsh // via old mapping + isb + + bl __relocate_kernel +#endif +#endif + ldr x8, =__primary_switched + adrp x0, __PHYS_OFFSET br x8 -ENDPROC(__secondary_switch) +ENDPROC(__primary_switch) diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 46f29b6560ec..e56d848b6466 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -36,8 +36,8 @@ .macro break_before_make_ttbr_switch zero_page, page_table msr ttbr1_el1, \zero_page isb - tlbi vmalle1is - dsb ish + tlbi vmalle1 + dsb nsh msr ttbr1_el1, \page_table isb .endm @@ -96,7 +96,7 @@ ENTRY(swsusp_arch_suspend_exit) add x1, x10, #PAGE_SIZE /* Clean the copied page to PoU - based on flush_icache_range() */ - dcache_line_size x2, x3 + raw_dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x10, x3 2: dc cvau, x4 /* clean D line / unified line */ diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 65d81f965e74..d55a7b09959b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -15,9 +15,9 @@ * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(x) "hibernate: " x +#include <linux/cpu.h> #include <linux/kvm_host.h> #include <linux/mm.h> -#include <linux/notifier.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/suspend.h> @@ -26,6 +26,7 @@ #include <asm/barrier.h> #include <asm/cacheflush.h> +#include <asm/cputype.h> #include <asm/irqflags.h> #include <asm/memory.h> #include <asm/mmu_context.h> @@ -34,6 +35,7 @@ #include <asm/pgtable-hwdef.h> #include <asm/sections.h> #include <asm/smp.h> +#include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/sysreg.h> #include <asm/virt.h> @@ -54,12 +56,6 @@ extern int in_suspend; /* Do we need to reset el2? */ #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) -/* - * Start/end of the hibernate exit code, this must be copied to a 'safe' - * location in memory, and executed from there. - */ -extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; - /* temporary el2 vectors in the __hibernate_exit_text section. */ extern char hibernate_el2_vectors[]; @@ -67,6 +63,12 @@ extern char hibernate_el2_vectors[]; extern char __hyp_stub_vectors[]; /* + * The logical cpu number we should resume on, initialised to a non-cpu + * number. + */ +static int sleep_cpu = -EINVAL; + +/* * Values that may not change over hibernate/resume. We put the build number * and date in here so that we guarantee not to resume with a different * kernel. @@ -88,6 +90,8 @@ static struct arch_hibernate_hdr { * re-configure el2. */ phys_addr_t __hyp_stub_vectors; + + u64 sleep_cpu_mpidr; } resume_hdr; static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) @@ -130,12 +134,22 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) else hdr->__hyp_stub_vectors = 0; + /* Save the mpidr of the cpu we called cpu_suspend() on... */ + if (sleep_cpu < 0) { + pr_err("Failing to hibernate on an unkown CPU.\n"); + return -ENODEV; + } + hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); + pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, + hdr->sleep_cpu_mpidr); + return 0; } EXPORT_SYMBOL(arch_hibernation_header_save); int arch_hibernation_header_restore(void *addr) { + int ret; struct arch_hibernate_hdr_invariants invariants; struct arch_hibernate_hdr *hdr = addr; @@ -145,6 +159,24 @@ int arch_hibernation_header_restore(void *addr) return -EINVAL; } + sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); + pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, + hdr->sleep_cpu_mpidr); + if (sleep_cpu < 0) { + pr_crit("Hibernated on a CPU not known to this kernel!\n"); + sleep_cpu = -EINVAL; + return -EINVAL; + } + if (!cpu_online(sleep_cpu)) { + pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); + ret = cpu_up(sleep_cpu); + if (ret) { + pr_err("Failed to bring hibernate-CPU up!\n"); + sleep_cpu = -EINVAL; + return ret; + } + } + resume_hdr = *hdr; return 0; @@ -241,6 +273,7 @@ out: return rc; } +#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) int swsusp_arch_suspend(void) { @@ -256,10 +289,16 @@ int swsusp_arch_suspend(void) local_dbg_save(flags); if (__cpu_suspend_enter(&state)) { + sleep_cpu = smp_processor_id(); ret = swsusp_save(); } else { - /* Clean kernel to PoC for secondary core startup */ - __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START); + /* Clean kernel core startup/idle code to PoC*/ + dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); + dcache_clean_range(__idmap_text_start, __idmap_text_end); + + /* Clean kvm setup code to PoC? */ + if (el2_reset_needed()) + dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); /* * Tell the hibernation core that we've just restored @@ -267,6 +306,7 @@ int swsusp_arch_suspend(void) */ in_suspend = 0; + sleep_cpu = -EINVAL; __cpu_suspend_exit(); } @@ -275,6 +315,33 @@ int swsusp_arch_suspend(void) return ret; } +static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) +{ + pte_t pte = *src_pte; + + if (pte_valid(pte)) { + /* + * Resume will overwrite areas that may be marked + * read only (code, rodata). Clear the RDONLY bit from + * the temporary mappings we use during restore. + */ + set_pte(dst_pte, pte_clear_rdonly(pte)); + } else if (debug_pagealloc_enabled() && !pte_none(pte)) { + /* + * debug_pagealloc will removed the PTE_VALID bit if + * the page isn't in use by the resume kernel. It may have + * been in use by the original kernel, in which case we need + * to put it back in our copy to do the restore. + * + * Before marking this entry valid, check the pfn should + * be mapped. + */ + BUG_ON(!pfn_valid(pte_pfn(pte))); + + set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte))); + } +} + static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, unsigned long end) { @@ -290,13 +357,7 @@ static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, src_pte = pte_offset_kernel(src_pmd, start); do { - if (!pte_none(*src_pte)) - /* - * Resume will overwrite areas that may be marked - * read only (code, rodata). Clear the RDONLY bit from - * the temporary mappings we use during restore. - */ - set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY)); + _copy_pte(dst_pte, src_pte, addr); } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); return 0; @@ -483,27 +544,12 @@ out: return rc; } -static int check_boot_cpu_online_pm_callback(struct notifier_block *nb, - unsigned long action, void *ptr) +int hibernate_resume_nonboot_cpu_disable(void) { - if (action == PM_HIBERNATION_PREPARE && - cpumask_first(cpu_online_mask) != 0) { - pr_warn("CPU0 is offline.\n"); - return notifier_from_errno(-ENODEV); + if (sleep_cpu < 0) { + pr_err("Failing to resume from hibernate on an unkown CPU.\n"); + return -ENODEV; } - return NOTIFY_OK; -} - -static int __init check_boot_cpu_online_init(void) -{ - /* - * Set this pm_notifier callback with a lower priority than - * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be - * called earlier to disable cpu hotplug before the cpu online check. - */ - pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX); - - return 0; + return freeze_secondary_cpus(sleep_cpu); } -core_initcall(check_boot_cpu_online_init); diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 26a6bf77d272..948b73148d56 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -857,7 +857,7 @@ void hw_breakpoint_thread_switch(struct task_struct *next) /* * CPU initialisation. */ -static void hw_breakpoint_reset(void *unused) +static int hw_breakpoint_reset(unsigned int cpu) { int i; struct perf_event **slots; @@ -888,28 +888,14 @@ static void hw_breakpoint_reset(void *unused) write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); } } -} -static int hw_breakpoint_reset_notify(struct notifier_block *self, - unsigned long action, - void *hcpu) -{ - if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) { - local_irq_disable(); - hw_breakpoint_reset(NULL); - local_irq_enable(); - } - return NOTIFY_OK; + return 0; } -static struct notifier_block hw_breakpoint_reset_nb = { - .notifier_call = hw_breakpoint_reset_notify, -}; - #ifdef CONFIG_CPU_PM -extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); +extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)); #else -static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) +static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) { } #endif @@ -919,36 +905,34 @@ static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) */ static int __init arch_hw_breakpoint_init(void) { + int ret; + core_num_brps = get_num_brps(); core_num_wrps = get_num_wrps(); pr_info("found %d breakpoint and %d watchpoint registers.\n", core_num_brps, core_num_wrps); - cpu_notifier_register_begin(); - - /* - * Reset the breakpoint resources. We assume that a halting - * debugger will leave the world in a nice state for us. - */ - smp_call_function(hw_breakpoint_reset, NULL, 1); - hw_breakpoint_reset(NULL); - /* Register debug fault handlers. */ hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-breakpoint handler"); hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-watchpoint handler"); - /* Register hotplug notifier. */ - __register_cpu_notifier(&hw_breakpoint_reset_nb); - - cpu_notifier_register_done(); + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, + "CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING", + hw_breakpoint_reset, NULL); + if (ret) + pr_err("failed to register CPU hotplug notifier: %d\n", ret); /* Register cpu_suspend hw breakpoint restore hook */ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); - return 0; + return ret; } arch_initcall(arch_hw_breakpoint_init); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 63f9432d05e8..6f2ac4fc66ca 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -96,7 +96,7 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); - else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) + else if (!module) page = pfn_to_page(PHYS_PFN(__pa(addr))); else return addr; @@ -1202,6 +1202,19 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset) BUG(); } +s32 aarch64_insn_adrp_get_offset(u32 insn) +{ + BUG_ON(!aarch64_insn_is_adrp(insn)); + return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12; +} + +u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset) +{ + BUG_ON(!aarch64_insn_is_adrp(insn)); + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, + offset >> 12); +} + /* * Extract the Op/CR data from a msr/mrs instruction. */ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index b05469173ba5..769f24ef628c 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ +#include <linux/cache.h> #include <linux/crc32.h> #include <linux/init.h> #include <linux/libfdt.h> @@ -20,7 +21,7 @@ #include <asm/pgtable.h> #include <asm/sections.h> -u64 __read_mostly module_alloc_base; +u64 __ro_after_init module_alloc_base; u16 __initdata memstart_offset_seed; static __init u64 get_kaslr_seed(void *fdt) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 838ccf123307..a9310a69fffd 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -24,6 +24,7 @@ #include <asm/sysreg.h> #include <asm/virt.h> +#include <linux/acpi.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> @@ -190,13 +191,23 @@ #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED /* PMUv3 HW events mapping. */ + +/* + * ARMv8 Architectural defined events, not all of these may + * be supported on any given implementation. Undefined events will + * be disabled at run-time. + */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND, }; /* ARM Cortex-A53 HW events mapping. */ @@ -258,6 +269,15 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, + [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE, + [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL, + + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL, + [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL, + [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB, + [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, @@ -523,12 +543,6 @@ static struct attribute_group armv8_pmuv3_format_attr_group = { .attrs = armv8_pmuv3_format_attrs, }; -static const struct attribute_group *armv8_pmuv3_attr_groups[] = { - &armv8_pmuv3_events_attr_group, - &armv8_pmuv3_format_attr_group, - NULL, -}; - /* * Perf Events' indices */ @@ -905,9 +919,22 @@ static void armv8pmu_reset(void *info) static int armv8_pmuv3_map_event(struct perf_event *event) { - return armpmu_map_event(event, &armv8_pmuv3_perf_map, - &armv8_pmuv3_perf_cache_map, - ARMV8_PMU_EVTYPE_EVENT); + int hw_event_id; + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + + hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, + &armv8_pmuv3_perf_cache_map, + ARMV8_PMU_EVTYPE_EVENT); + if (hw_event_id < 0) + return hw_event_id; + + /* disable micro/arch events not supported by this PMU */ + if ((hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) && + !test_bit(hw_event_id, armpmu->pmceid_bitmap)) { + return -EOPNOTSUPP; + } + + return hw_event_id; } static int armv8_a53_map_event(struct perf_event *event) @@ -985,7 +1012,10 @@ static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) armv8_pmu_init(cpu_pmu); cpu_pmu->name = "armv8_pmuv3"; cpu_pmu->map_event = armv8_pmuv3_map_event; - cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv8_pmuv3_format_attr_group; return armv8pmu_probe_pmu(cpu_pmu); } @@ -994,7 +1024,10 @@ static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) armv8_pmu_init(cpu_pmu); cpu_pmu->name = "armv8_cortex_a53"; cpu_pmu->map_event = armv8_a53_map_event; - cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv8_pmuv3_format_attr_group; return armv8pmu_probe_pmu(cpu_pmu); } @@ -1003,7 +1036,10 @@ static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) armv8_pmu_init(cpu_pmu); cpu_pmu->name = "armv8_cortex_a57"; cpu_pmu->map_event = armv8_a57_map_event; - cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv8_pmuv3_format_attr_group; return armv8pmu_probe_pmu(cpu_pmu); } @@ -1012,7 +1048,10 @@ static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) armv8_pmu_init(cpu_pmu); cpu_pmu->name = "armv8_cortex_a72"; cpu_pmu->map_event = armv8_a57_map_event; - cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv8_pmuv3_format_attr_group; return armv8pmu_probe_pmu(cpu_pmu); } @@ -1021,7 +1060,10 @@ static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) armv8_pmu_init(cpu_pmu); cpu_pmu->name = "armv8_cavium_thunder"; cpu_pmu->map_event = armv8_thunder_map_event; - cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv8_pmuv3_format_attr_group; return armv8pmu_probe_pmu(cpu_pmu); } @@ -1030,7 +1072,10 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) armv8_pmu_init(cpu_pmu); cpu_pmu->name = "armv8_brcm_vulcan"; cpu_pmu->map_event = armv8_vulcan_map_event; - cpu_pmu->pmu.attr_groups = armv8_pmuv3_attr_groups; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = + &armv8_pmuv3_events_attr_group; + cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = + &armv8_pmuv3_format_attr_group; return armv8pmu_probe_pmu(cpu_pmu); } @@ -1044,21 +1089,32 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = { {}, }; +/* + * Non DT systems have their micro/arch events probed at run-time. + * A fairly complete list of generic events are provided and ones that + * aren't supported by the current PMU are disabled. + */ +static const struct pmu_probe_info armv8_pmu_probe_table[] = { + PMU_PROBE(0, 0, armv8_pmuv3_init), /* enable all defined counters */ + { /* sentinel value */ } +}; + static int armv8_pmu_device_probe(struct platform_device *pdev) { - return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); + if (acpi_disabled) + return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, + NULL); + + return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, + armv8_pmu_probe_table); } static struct platform_driver armv8_pmu_driver = { .driver = { - .name = "armv8-pmu", + .name = ARMV8_PMU_PDEV_NAME, .of_match_table = armv8_pmu_of_device_ids, }, .probe = armv8_pmu_device_probe, }; -static int __init register_armv8_pmu_driver(void) -{ - return platform_driver_register(&armv8_pmu_driver); -} -device_initcall(register_armv8_pmu_driver); +builtin_platform_driver(armv8_pmu_driver); diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c index 37e47a9d617e..d1731bf977ef 100644 --- a/arch/arm64/kernel/probes/decode-insn.c +++ b/arch/arm64/kernel/probes/decode-insn.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> +#include <linux/kallsyms.h> #include <asm/kprobes.h> #include <asm/insn.h> #include <asm/sections.h> @@ -122,7 +123,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) static bool __kprobes is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) { - while (scan_start > scan_end) { + while (scan_start >= scan_end) { /* * atomic region starts from exclusive load and ends with * exclusive store. @@ -142,33 +143,30 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) { enum kprobe_insn decoded; kprobe_opcode_t insn = le32_to_cpu(*addr); - kprobe_opcode_t *scan_start = addr - 1; - kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) - struct module *mod; -#endif - - if (addr >= (kprobe_opcode_t *)_text && - scan_end < (kprobe_opcode_t *)_text) - scan_end = (kprobe_opcode_t *)_text; -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) - else { - preempt_disable(); - mod = __module_address((unsigned long)addr); - if (mod && within_module_init((unsigned long)addr, mod) && - !within_module_init((unsigned long)scan_end, mod)) - scan_end = (kprobe_opcode_t *)mod->init_layout.base; - else if (mod && within_module_core((unsigned long)addr, mod) && - !within_module_core((unsigned long)scan_end, mod)) - scan_end = (kprobe_opcode_t *)mod->core_layout.base; - preempt_enable(); + kprobe_opcode_t *scan_end = NULL; + unsigned long size = 0, offset = 0; + + /* + * If there's a symbol defined in front of and near enough to + * the probe address assume it is the entry point to this + * code and use it to further limit how far back we search + * when determining if we're in an atomic sequence. If we could + * not find any symbol skip the atomic test altogether as we + * could otherwise end up searching irrelevant text/literals. + * KPROBES depends on KALLSYMS so this last case should never + * happen. + */ + if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) { + if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t))) + scan_end = addr - (offset / sizeof(kprobe_opcode_t)); + else + scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; } -#endif decoded = arm_probe_decode_insn(insn, asi); - if (decoded == INSN_REJECTED || - is_probed_address_atomic(scan_start, scan_end)) - return INSN_REJECTED; + if (decoded != INSN_REJECTED && scan_end) + if (is_probed_address_atomic(addr - 1, scan_end)) + return INSN_REJECTED; return decoded; } diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index c6b0f40620d8..f5077ea7af6d 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -19,7 +19,7 @@ #include <linux/kasan.h> #include <linux/kernel.h> #include <linux/kprobes.h> -#include <linux/module.h> +#include <linux/extable.h> #include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/stringify.h> @@ -31,7 +31,7 @@ #include <asm/insn.h> #include <asm/uaccess.h> #include <asm/irq.h> -#include <asm-generic/sections.h> +#include <asm/sections.h> #include "decode-insn.h" @@ -166,13 +166,18 @@ static void __kprobes set_current_kprobe(struct kprobe *p) } /* - * The D-flag (Debug mask) is set (masked) upon debug exception entry. - * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive - * probe i.e. when probe hit from kprobe handler context upon - * executing the pre/post handlers. In this case we return with - * D-flag clear so that single-stepping can be carried-out. - * - * Leave D-flag set in all other cases. + * When PSTATE.D is set (masked), then software step exceptions can not be + * generated. + * SPSR's D bit shows the value of PSTATE.D immediately before the + * exception was taken. PSTATE.D is set while entering into any exception + * mode, however software clears it for any normal (none-debug-exception) + * mode in the exception entry. Therefore, when we are entering into kprobe + * breakpoint handler from any normal mode then SPSR.D bit is already + * cleared, however it is set when we are entering from any debug exception + * mode. + * Since we always need to generate single step exception after a kprobe + * breakpoint exception therefore we need to clear it unconditionally, when + * we become sure that the current breakpoint exception is for kprobe. */ static void __kprobes spsr_set_debug_flag(struct pt_regs *regs, int mask) @@ -245,10 +250,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, set_ss_context(kcb, slot); /* mark pending ss */ - if (kcb->kprobe_status == KPROBE_REENTER) - spsr_set_debug_flag(regs, 0); - else - WARN_ON(regs->pstate & PSR_D_BIT); + spsr_set_debug_flag(regs, 0); /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); @@ -333,8 +335,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) BUG(); kernel_disable_single_step(); - if (kcb->kprobe_status == KPROBE_REENTER) - spsr_set_debug_flag(regs, 1); if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); @@ -457,9 +457,6 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) kprobes_restore_local_irqflag(kcb, regs); kernel_disable_single_step(); - if (kcb->kprobe_status == KPROBE_REENTER) - spsr_set_debug_flag(regs, 1); - post_kprobe_handler(kcb, regs); } @@ -543,9 +540,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) bool arch_within_kprobe_blacklist(unsigned long addr) { - extern char __idmap_text_start[], __idmap_text_end[]; - extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; - if ((addr >= (unsigned long)__kprobes_text_start && addr < (unsigned long)__kprobes_text_end) || (addr >= (unsigned long)__entry_text_start && diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6cd2612236dc..a4f5f766af08 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -202,7 +202,7 @@ void show_regs(struct pt_regs * regs) static void tls_thread_flush(void) { - asm ("msr tpidr_el0, xzr"); + write_sysreg(0, tpidr_el0); if (is_compat_task()) { current->thread.tp_value = 0; @@ -213,7 +213,7 @@ static void tls_thread_flush(void) * with a stale shadow state during context switch. */ barrier(); - asm ("msr tpidrro_el0, xzr"); + write_sysreg(0, tpidrro_el0); } } @@ -253,7 +253,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, * Read the current TLS pointer from tpidr_el0 as it may be * out-of-sync with the saved value. */ - asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p))); + *task_user_tls(p) = read_sysreg(tpidr_el0); if (stack_start) { if (is_compat_thread(task_thread_info(p))) @@ -289,17 +289,15 @@ static void tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; - asm("mrs %0, tpidr_el0" : "=r" (tpidr)); + tpidr = read_sysreg(tpidr_el0); *task_user_tls(current) = tpidr; tpidr = *task_user_tls(next); tpidrro = is_compat_thread(task_thread_info(next)) ? next->thread.tp_value : 0; - asm( - " msr tpidr_el0, %0\n" - " msr tpidrro_el0, %1" - : : "r" (tpidr), "r" (tpidrro)); + write_sysreg(tpidr, tpidr_el0); + write_sysreg(tpidrro, tpidrro_el0); } /* Restore the UAO state depending on next's addr_limit */ diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 51b73cdde287..ce704a4aeadd 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -34,7 +34,7 @@ ENTRY(arm64_relocate_new_kernel) /* Setup the list loop variables. */ mov x17, x1 /* x17 = kimage_start */ mov x16, x0 /* x16 = kimage_head */ - dcache_line_size x15, x0 /* x15 = dcache line size */ + raw_dcache_line_size x15, x0 /* x15 = dcache line size */ mov x14, xzr /* x14 = entry ptr */ mov x13, xzr /* x13 = copy dest */ diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 536dce22fe76..f534f492a268 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -206,10 +206,15 @@ static void __init request_standard_resources(void) for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); - res->name = "System RAM"; + if (memblock_is_nomap(region)) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; - res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); @@ -228,7 +233,7 @@ void __init setup_arch(char **cmdline_p) { pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id()); - sprintf(init_utsname()->machine, ELF_PLATFORM); + sprintf(init_utsname()->machine, UTS_MACHINE); init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index a8eafdbc7cb8..404dd67080b9 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -402,15 +402,31 @@ static void do_signal(struct pt_regs *regs) asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags) { - if (thread_flags & _TIF_SIGPENDING) - do_signal(regs); - - if (thread_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } - - if (thread_flags & _TIF_FOREIGN_FPSTATE) - fpsimd_restore_current_state(); + /* + * The assembly code enters us with IRQs off, but it hasn't + * informed the tracing code of that for efficiency reasons. + * Update the trace code with the current status. + */ + trace_hardirqs_off(); + do { + if (thread_flags & _TIF_NEED_RESCHED) { + schedule(); + } else { + local_irq_enable(); + + if (thread_flags & _TIF_SIGPENDING) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + + if (thread_flags & _TIF_FOREIGN_FPSTATE) + fpsimd_restore_current_state(); + } + local_irq_disable(); + thread_flags = READ_ONCE(current_thread_info()->flags); + } while (thread_flags & _TIF_WORK_MASK); } diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index ccf79d849e0a..b8799e7c79de 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -73,10 +73,9 @@ ENTRY(__cpu_suspend_enter) str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] /* find the mpidr_hash */ - ldr x1, =sleep_save_stash - ldr x1, [x1] + ldr_l x1, sleep_save_stash mrs x7, mpidr_el1 - ldr x9, =mpidr_hash + adr_l x9, mpidr_hash ldr x10, [x9, #MPIDR_HASH_MASK] /* * Following code relies on the struct mpidr_hash @@ -95,36 +94,30 @@ ENTRY(__cpu_suspend_enter) mov x0, #1 ret ENDPROC(__cpu_suspend_enter) - .ltorg + .pushsection ".idmap.text", "ax" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ - adr_l lr, __enable_mmu /* __cpu_setup will return here */ - adr_l x27, _resume_switched /* __enable_mmu will branch here */ - adrp x25, idmap_pg_dir - adrp x26, swapper_pg_dir - b __cpu_setup -ENDPROC(cpu_resume) - - .pushsection ".idmap.text", "ax" -_resume_switched: + bl __enable_mmu ldr x8, =_cpu_resume br x8 -ENDPROC(_resume_switched) +ENDPROC(cpu_resume) .ltorg .popsection ENTRY(_cpu_resume) mrs x1, mpidr_el1 - adrp x8, mpidr_hash - add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address - /* retrieve mpidr_hash members to compute the hash */ + adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address + + /* retrieve mpidr_hash members to compute the hash */ ldr x2, [x8, #MPIDR_HASH_MASK] ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 - /* x7 contains hash index, let's use it to grab context pointer */ + + /* x7 contains hash index, let's use it to grab context pointer */ ldr_l x0, sleep_save_stash ldr x0, [x0, x7, lsl #3] add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3ff173e92582..d3f151cfd4a1 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -233,7 +233,7 @@ asmlinkage void secondary_start_kernel(void) * this CPU ticks all of those. If it doesn't, the CPU will * fail to come online. */ - verify_local_cpu_capabilities(); + check_local_cpu_capabilities(); if (cpu_ops[cpu]->cpu_postboot) cpu_ops[cpu]->cpu_postboot(); @@ -431,8 +431,19 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + /* + * Initialise the static keys early as they may be enabled by the + * cpufeature code. + */ + jump_label_init(); cpuinfo_store_boot_cpu(); save_boot_cpu_run_el(); + /* + * Run the errata work around checks on the boot CPU, once we have + * initialised the cpu feature infrastructure from + * cpuinfo_store_boot_cpu() above. + */ + update_cpu_errata_workarounds(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) @@ -613,6 +624,7 @@ static void __init of_parse_and_init_cpus(void) } bootcpu_valid = true; + early_map_cpu_to_node(0, of_node_to_nid(dn)); /* * cpu_logical_map has already been diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 18a71bcd26ee..9a00eee9acc8 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -29,7 +29,8 @@ #include <asm/smp_plat.h> extern void secondary_holding_pen(void); -volatile unsigned long secondary_holding_pen_release = INVALID_HWID; +volatile unsigned long __section(".mmuoff.data.read") +secondary_holding_pen_release = INVALID_HWID; static phys_addr_t cpu_release_addr[NR_CPUS]; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d9751a4769e7..c2efddfca18c 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -43,6 +43,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) unsigned long fp = frame->fp; unsigned long irq_stack_ptr; + if (!tsk) + tsk = current; + /* * Switching between stacks is valid when tracing current and in * non-preemptible context. @@ -67,7 +70,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (tsk && tsk->ret_stack && + if (tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { /* * This is a case where function graph tracer has @@ -152,6 +155,27 @@ static int save_trace(struct stackframe *frame, void *d) return trace->nr_entries >= trace->max_entries; } +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct stack_trace_data data; + struct stackframe frame; + + data.trace = trace; + data.skip = trace->skip; + data.no_sched_functions = 0; + + frame.fp = regs->regs[29]; + frame.sp = regs->sp; + frame.pc = regs->pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + frame.graph = current->curr_ret_stack; +#endif + + walk_stackframe(current, &frame, save_trace, &data); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { struct stack_trace_data data; diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index b616e365cee3..ad734142070d 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -23,8 +23,8 @@ unsigned long *sleep_save_stash; * time the notifier runs debug exceptions might have been enabled already, * with HW breakpoints registers content still in an unknown state. */ -static void (*hw_breakpoint_restore)(void *); -void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) +static int (*hw_breakpoint_restore)(unsigned int); +void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) { /* Prevent multiple restore hook initializations */ if (WARN_ON(hw_breakpoint_restore)) @@ -34,6 +34,8 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) void notrace __cpu_suspend_exit(void) { + unsigned int cpu = smp_processor_id(); + /* * We are resuming from reset with the idmap active in TTBR0_EL1. * We must uninstall the idmap and restore the expected MMU @@ -45,7 +47,7 @@ void notrace __cpu_suspend_exit(void) * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + set_my_cpu_offset(per_cpu_offset(cpu)); /* * Restore HW breakpoint registers to sane values @@ -53,7 +55,7 @@ void notrace __cpu_suspend_exit(void) * through local_dbg_restore. */ if (hw_breakpoint_restore) - hw_breakpoint_restore(NULL); + hw_breakpoint_restore(cpu); } /* diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 28c511b06edf..abaf582fc7a8 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -94,7 +94,7 @@ long compat_arm_syscall(struct pt_regs *regs) * See comment in tls_thread_flush. */ barrier(); - asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); + write_sysreg(regs->regs[0], tpidrro_el0); return 0; default: diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index e04f83873af7..5ff020f8fb7f 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -142,6 +142,11 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) unsigned long irq_stack_ptr; int skip; + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); + + if (!tsk) + tsk = current; + /* * Switching between stacks is valid when tracing current and in * non-preemptible context. @@ -151,11 +156,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) else irq_stack_ptr = 0; - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - - if (!tsk) - tsk = current; - if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; @@ -447,36 +447,29 @@ void cpu_enable_cache_maint_trap(void *__unused) : "=r" (res) \ : "r" (address), "i" (-EFAULT) ) -asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) +static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) { unsigned long address; - int ret; - - /* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */ - if ((esr & 0x01fffc01) == 0x0012dc00) { - int rt = (esr >> 5) & 0x1f; - int crm = (esr >> 1) & 0x0f; + int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; + int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; + int ret = 0; - address = (rt == 31) ? 0 : regs->regs[rt]; + address = (rt == 31) ? 0 : regs->regs[rt]; - switch (crm) { - case 11: /* DC CVAU, gets promoted */ - __user_cache_maint("dc civac", address, ret); - break; - case 10: /* DC CVAC, gets promoted */ - __user_cache_maint("dc civac", address, ret); - break; - case 14: /* DC CIVAC */ - __user_cache_maint("dc civac", address, ret); - break; - case 5: /* IC IVAU */ - __user_cache_maint("ic ivau", address, ret); - break; - default: - force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); - return; - } - } else { + switch (crm) { + case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ + __user_cache_maint("dc civac", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ + __user_cache_maint("dc civac", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ + __user_cache_maint("dc civac", address, ret); + break; + case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ + __user_cache_maint("ic ivau", address, ret); + break; + default: force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); return; } @@ -487,6 +480,48 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) regs->pc += 4; } +static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) +{ + int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; + + regs->regs[rt] = arm64_ftr_reg_ctrel0.sys_val; + regs->pc += 4; +} + +struct sys64_hook { + unsigned int esr_mask; + unsigned int esr_val; + void (*handler)(unsigned int esr, struct pt_regs *regs); +}; + +static struct sys64_hook sys64_hooks[] = { + { + .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, + .handler = user_cache_maint_handler, + }, + { + /* Trap read access to CTR_EL0 */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, + .handler = ctr_read_handler, + }, + {}, +}; + +asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) +{ + struct sys64_hook *hook; + + for (hook = sys64_hooks; hook->handler; hook++) + if ((hook->esr_mask & esr) == hook->esr_val) { + hook->handler(esr, regs); + return; + } + + force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); +} + long compat_arm_syscall(struct pt_regs *regs); asmlinkage long do_ni_syscall(struct pt_regs *regs) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 076312b17d4f..a2c2478e7d78 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -18,12 +18,13 @@ * Author: Will Deacon <will.deacon@arm.com> */ -#include <linux/kernel.h> +#include <linux/cache.h> #include <linux/clocksource.h> #include <linux/elf.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/gfp.h> +#include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/signal.h> @@ -37,8 +38,7 @@ #include <asm/vdso_datapage.h> extern char vdso_start, vdso_end; -static unsigned long vdso_pages; -static struct page **vdso_pagelist; +static unsigned long vdso_pages __ro_after_init; /* * The vDSO data page. @@ -53,9 +53,9 @@ struct vdso_data *vdso_data = &vdso_data_store.data; /* * Create and map the vectors page for AArch32 tasks. */ -static struct page *vectors_page[1]; +static struct page *vectors_page[1] __ro_after_init; -static int alloc_vectors_page(void) +static int __init alloc_vectors_page(void) { extern char __kuser_helper_start[], __kuser_helper_end[]; extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; @@ -88,7 +88,7 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr = AARCH32_VECTORS_BASE; - static struct vm_special_mapping spec = { + static const struct vm_special_mapping spec = { .name = "[vectors]", .pages = vectors_page, @@ -110,11 +110,19 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) } #endif /* CONFIG_COMPAT */ -static struct vm_special_mapping vdso_spec[2]; +static struct vm_special_mapping vdso_spec[2] __ro_after_init = { + { + .name = "[vvar]", + }, + { + .name = "[vdso]", + }, +}; static int __init vdso_init(void) { int i; + struct page **vdso_pagelist; if (memcmp(&vdso_start, "\177ELF", 4)) { pr_err("vDSO is not a valid ELF object!\n"); @@ -138,16 +146,8 @@ static int __init vdso_init(void) for (i = 0; i < vdso_pages; i++) vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i); - /* Populate the special mapping structures */ - vdso_spec[0] = (struct vm_special_mapping) { - .name = "[vvar]", - .pages = vdso_pagelist, - }; - - vdso_spec[1] = (struct vm_special_mapping) { - .name = "[vdso]", - .pages = &vdso_pagelist[1], - }; + vdso_spec[0].pages = &vdso_pagelist[0]; + vdso_spec[1].pages = &vdso_pagelist[1]; return 0; } @@ -201,7 +201,7 @@ up_fail: */ void update_vsyscall(struct timekeeper *tk) { - u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); + u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct; ++vdso_data->tb_seq_count; smp_wmb(); diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 659963d40bb4..5ce9b2929e0d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -185,6 +185,25 @@ SECTIONS _data = .; _sdata = .; RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + + /* + * Data written with the MMU off but read with the MMU on requires + * cache lines to be invalidated, discarding up to a Cache Writeback + * Granule (CWG) of data from the cache. Keep the section that + * requires this type of maintenance to be in its own Cache Writeback + * Granule (CWG) area so the cache maintenance operations don't + * interfere with adjacent data. + */ + .mmuoff.data.write : ALIGN(SZ_2K) { + __mmuoff_data_start = .; + *(.mmuoff.data.write) + } + . = ALIGN(SZ_2K); + .mmuoff.data.read : { + *(.mmuoff.data.read) + __mmuoff_data_end = .; + } + PECOFF_EDATA_PADDING _edata = .; diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 7ce931565151..2726635dceba 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -46,10 +46,6 @@ alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ldr lr, [sp], #16 ret -alternative_else +alternative_else_nop_endif b __vhe_hyp_call - nop - nop - nop -alternative_endif ENDPROC(__kvm_call_hyp) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e51367d159d0..f302fdb3a030 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -36,6 +36,7 @@ #include <asm/kvm_host.h> #include <asm/kvm_mmu.h> #include <asm/perf_event.h> +#include <asm/sysreg.h> #include <trace/events/kvm.h> @@ -67,11 +68,9 @@ static u32 get_ccsidr(u32 csselr) /* Make sure noone else changes CSSELR during this! */ local_irq_disable(); - /* Put value into CSSELR */ - asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); + write_sysreg(csselr, csselr_el1); isb(); - /* Read result out of CCSIDR */ - asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); + ccsidr = read_sysreg(ccsidr_el1); local_irq_enable(); return ccsidr; @@ -174,9 +173,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, if (p->is_write) { return ignore_write(vcpu, p); } else { - u32 val; - asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); - p->regval = val; + p->regval = read_sysreg(dbgauthstatus_el1); return true; } } @@ -429,10 +426,7 @@ static void reset_wcr(struct kvm_vcpu *vcpu, static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 amair; - - asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); - vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; + vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1); } static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) @@ -456,8 +450,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 pmcr, val; - asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr)); - /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN + pmcr = read_sysreg(pmcr_el0); + /* + * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN * except PMCR.E resetting to zero. */ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) @@ -557,9 +552,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return false; if (!(p->Op2 & 1)) - asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); + pmceid = read_sysreg(pmceid0_el0); else - asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid)); + pmceid = read_sysreg(pmceid1_el0); p->regval = pmceid; @@ -1833,11 +1828,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, static void get_##reg(struct kvm_vcpu *v, \ const struct sys_reg_desc *r) \ { \ - u64 val; \ - \ - asm volatile("mrs %0, " __stringify(reg) "\n" \ - : "=r" (val)); \ - ((struct sys_reg_desc *)r)->val = val; \ + ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ } FUNCTION_INVARIANT(midr_el1) diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index ed90578fa120..46af7186bca6 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -26,6 +26,7 @@ #include <asm/kvm_host.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> +#include <asm/sysreg.h> #include <linux/init.h> #include "sys_regs.h" @@ -43,10 +44,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 actlr; - - asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr)); - vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr; + vcpu_sys_reg(vcpu, ACTLR_EL1) = read_sysreg(actlr_el1); } /* diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S index 4c1e700840b6..c3cd65e31814 100644 --- a/arch/arm64/lib/copy_page.S +++ b/arch/arm64/lib/copy_page.S @@ -29,14 +29,11 @@ * x1 - src */ ENTRY(copy_page) -alternative_if_not ARM64_HAS_NO_HW_PREFETCH - nop - nop -alternative_else +alternative_if ARM64_HAS_NO_HW_PREFETCH # Prefetch two cache lines ahead. prfm pldl1strm, [x1, #128] prfm pldl1strm, [x1, #256] -alternative_endif +alternative_else_nop_endif ldp x2, x3, [x1] ldp x4, x5, [x1, #16] @@ -52,11 +49,9 @@ alternative_endif 1: subs x18, x18, #128 -alternative_if_not ARM64_HAS_NO_HW_PREFETCH - nop -alternative_else +alternative_if ARM64_HAS_NO_HW_PREFETCH prfm pldl1strm, [x1, #384] -alternative_endif +alternative_else_nop_endif stnp x2, x3, [x0] ldp x2, x3, [x1] diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 07d7352d7c38..58b5a906ff78 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -105,19 +105,20 @@ ENTRY(__clean_dcache_area_pou) ENDPROC(__clean_dcache_area_pou) /* - * __inval_cache_range(start, end) - * - start - start address of region - * - end - end address of region + * __dma_inv_area(start, size) + * - start - virtual start address of region + * - size - size in question */ -ENTRY(__inval_cache_range) +__dma_inv_area: + add x1, x1, x0 /* FALLTHROUGH */ /* - * __dma_inv_range(start, end) - * - start - virtual start address of region - * - end - virtual end address of region + * __inval_cache_range(start, end) + * - start - start address of region + * - end - end address of region */ -__dma_inv_range: +ENTRY(__inval_cache_range) dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? @@ -136,46 +137,43 @@ __dma_inv_range: dsb sy ret ENDPIPROC(__inval_cache_range) -ENDPROC(__dma_inv_range) +ENDPROC(__dma_inv_area) + +/* + * __clean_dcache_area_poc(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned to the PoC. + * + * - kaddr - kernel address + * - size - size in question + */ +ENTRY(__clean_dcache_area_poc) + /* FALLTHROUGH */ /* - * __dma_clean_range(start, end) + * __dma_clean_area(start, size) * - start - virtual start address of region - * - end - virtual end address of region + * - size - size in question */ -__dma_clean_range: - dcache_line_size x2, x3 - sub x3, x2, #1 - bic x0, x0, x3 -1: -alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc cvac, x0 -alternative_else - dc civac, x0 -alternative_endif - add x0, x0, x2 - cmp x0, x1 - b.lo 1b - dsb sy +__dma_clean_area: + dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret -ENDPROC(__dma_clean_range) +ENDPIPROC(__clean_dcache_area_poc) +ENDPROC(__dma_clean_area) /* - * __dma_flush_range(start, end) + * __dma_flush_area(start, size) + * + * clean & invalidate D / U line + * * - start - virtual start address of region - * - end - virtual end address of region + * - size - size in question */ -ENTRY(__dma_flush_range) - dcache_line_size x2, x3 - sub x3, x2, #1 - bic x0, x0, x3 -1: dc civac, x0 // clean & invalidate D / U line - add x0, x0, x2 - cmp x0, x1 - b.lo 1b - dsb sy +ENTRY(__dma_flush_area) + dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__dma_flush_range) +ENDPIPROC(__dma_flush_area) /* * __dma_map_area(start, size, dir) @@ -184,10 +182,9 @@ ENDPIPROC(__dma_flush_range) * - dir - DMA direction */ ENTRY(__dma_map_area) - add x1, x1, x0 cmp w2, #DMA_FROM_DEVICE - b.eq __dma_inv_range - b __dma_clean_range + b.eq __dma_inv_area + b __dma_clean_area ENDPIPROC(__dma_map_area) /* @@ -197,8 +194,7 @@ ENDPIPROC(__dma_map_area) * - dir - DMA direction */ ENTRY(__dma_unmap_area) - add x1, x1, x0 cmp w2, #DMA_TO_DEVICE - b.ne __dma_inv_range + b.ne __dma_inv_area ret ENDPIPROC(__dma_unmap_area) diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index c4284c432ae8..bdacead5b802 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -20,6 +20,7 @@ #include <linux/gfp.h> #include <linux/acpi.h> #include <linux/bootmem.h> +#include <linux/cache.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/genalloc.h> @@ -30,7 +31,7 @@ #include <asm/cacheflush.h> -static int swiotlb __read_mostly; +static int swiotlb __ro_after_init; static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) @@ -168,7 +169,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return ptr; /* remove any dirty cache lines on the kernel alias */ - __dma_flush_range(ptr, ptr + size); + __dma_flush_area(ptr, size); /* create a coherent mapping */ page = virt_to_page(ptr); @@ -387,7 +388,7 @@ static int __init atomic_pool_init(void) void *page_addr = page_address(page); memset(page_addr, 0, atomic_pool_size); - __dma_flush_range(page_addr, page_addr + atomic_pool_size); + __dma_flush_area(page_addr, atomic_pool_size); atomic_pool = gen_pool_create(PAGE_SHIFT, -1); if (!atomic_pool) @@ -548,7 +549,7 @@ fs_initcall(dma_debug_do_init); /* Thankfully, all cache ops are by VA so we can ignore phys here */ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) { - __dma_flush_range(virt, virt + PAGE_SIZE); + __dma_flush_area(virt, PAGE_SIZE); } static void *__iommu_alloc_attrs(struct device *dev, size_t size, diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 81acd4706878..c9f118cd812b 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -2,7 +2,7 @@ * Based on arch/arm/mm/extable.c */ -#include <linux/module.h> +#include <linux/extable.h> #include <linux/uaccess.h> int fixup_exception(struct pt_regs *regs) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 05d2bd776c69..53d9159662fe 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -18,7 +18,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/module.h> +#include <linux/extable.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/hardirq.h> @@ -251,8 +251,7 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, good_area: /* * Check that the permissions on the VMA allow for the fault which - * occurred. If we encountered a write or exec fault, we must have - * appropriate permissions, otherwise we allow any permission. + * occurred. */ if (!(vma->vm_flags & vm_flags)) { fault = VM_FAULT_BADACCESS; @@ -288,7 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; - unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned long vm_flags = VM_READ | VM_WRITE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; if (notify_page_fault(regs, esr)) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 43a76b07eb32..8377329d8c97 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -25,8 +25,6 @@ #include <asm/cachetype.h> #include <asm/tlbflush.h> -#include "mm.h" - void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index bbb7ee76e319..21c489bdeb4e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -23,6 +23,7 @@ #include <linux/swap.h> #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/cache.h> #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/initrd.h> @@ -34,6 +35,7 @@ #include <linux/dma-contiguous.h> #include <linux/efi.h> #include <linux/swiotlb.h> +#include <linux/vmalloc.h> #include <asm/boot.h> #include <asm/fixmap.h> @@ -47,16 +49,14 @@ #include <asm/tlb.h> #include <asm/alternative.h> -#include "mm.h" - /* * We need to be able to catch inadvertent references to memstart_addr * that occur (potentially in generic code) before arm64_memblock_init() * executes, which assigns it its actual value. So use a default value * that cannot be mistaken for a real physical address. */ -s64 memstart_addr __read_mostly = -1; -phys_addr_t arm64_dma_phys_limit __read_mostly; +s64 memstart_addr __ro_after_init = -1; +phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) @@ -485,7 +485,12 @@ void free_initmem(void) { free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)), 0, "unused kernel"); - fixup_init(); + /* + * Unmap the __init region but leave the VM area in place. This + * prevents the region from being reused for kernel modules, which + * is not supported by kallsyms. + */ + unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); } #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h deleted file mode 100644 index 71fe98985455..000000000000 --- a/arch/arm64/mm/mm.h +++ /dev/null @@ -1,2 +0,0 @@ - -void fixup_init(void); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 4989948d1feb..05615a3fdc6f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -17,6 +17,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/cache.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -42,11 +43,9 @@ #include <asm/memblock.h> #include <asm/mmu_context.h> -#include "mm.h" - u64 idmap_t0sz = TCR_T0SZ(VA_BITS); -u64 kimage_voffset __read_mostly; +u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); /* @@ -399,16 +398,6 @@ void mark_rodata_ro(void) section_size, PAGE_KERNEL_RO); } -void fixup_init(void) -{ - /* - * Unmap the __init region but leave the VM area in place. This - * prevents the region from being reused for kernel modules, which - * is not supported by kallsyms. - */ - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); -} - static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma) { diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 5bb15eab6f00..778a985c8a70 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -17,6 +17,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#define pr_fmt(fmt) "NUMA: " fmt + #include <linux/acpi.h> #include <linux/bootmem.h> #include <linux/memblock.h> @@ -24,6 +26,7 @@ #include <linux/of.h> #include <asm/acpi.h> +#include <asm/sections.h> struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; EXPORT_SYMBOL(node_data); @@ -38,10 +41,9 @@ static __init int numa_parse_early_param(char *opt) { if (!opt) return -EINVAL; - if (!strncmp(opt, "off", 3)) { - pr_info("%s\n", "NUMA turned off"); + if (!strncmp(opt, "off", 3)) numa_off = true; - } + return 0; } early_param("numa", numa_parse_early_param); @@ -93,7 +95,6 @@ void numa_clear_node(unsigned int cpu) */ static void __init setup_node_to_cpumask_map(void) { - unsigned int cpu; int node; /* setup nr_node_ids if not done yet */ @@ -106,11 +107,8 @@ static void __init setup_node_to_cpumask_map(void) cpumask_clear(node_to_cpumask_map[node]); } - for_each_possible_cpu(cpu) - set_cpu_numa_node(cpu, NUMA_NO_NODE); - /* cpumask_of_node() will now work */ - pr_debug("NUMA: Node to cpumask map for %d nodes\n", nr_node_ids); + pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); } /* @@ -118,18 +116,77 @@ static void __init setup_node_to_cpumask_map(void) */ void numa_store_cpu_info(unsigned int cpu) { - map_cpu_to_node(cpu, numa_off ? 0 : cpu_to_node_map[cpu]); + map_cpu_to_node(cpu, cpu_to_node_map[cpu]); } void __init early_map_cpu_to_node(unsigned int cpu, int nid) { /* fallback to node 0 */ - if (nid < 0 || nid >= MAX_NUMNODES) + if (nid < 0 || nid >= MAX_NUMNODES || numa_off) nid = 0; cpu_to_node_map[cpu] = nid; + + /* + * We should set the numa node of cpu0 as soon as possible, because it + * has already been set up online before. cpu_to_node(0) will soon be + * called. + */ + if (!cpu) + set_cpu_numa_node(cpu, nid); +} + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +static int __init early_cpu_to_node(int cpu) +{ + return cpu_to_node_map[cpu]; +} + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + return node_distance(from, to); } +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, + size_t align) +{ + int nid = early_cpu_to_node(cpu); + + return memblock_virt_alloc_try_nid(size, align, + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free_early(__pa(ptr), size); +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc; + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, + pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); + if (rc < 0) + panic("Failed to initialize percpu areas."); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; +} +#endif + /** * numa_add_memblk - Set node id to memblk * @nid: NUMA node ID of the new memblk @@ -145,13 +202,13 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) ret = memblock_set_node(start, (end - start), &memblock.memory, nid); if (ret < 0) { - pr_err("NUMA: memblock [0x%llx - 0x%llx] failed to add on node %d\n", + pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", start, (end - 1), nid); return ret; } node_set(nid, numa_nodes_parsed); - pr_info("NUMA: Adding memblock [0x%llx - 0x%llx] on node %d\n", + pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n", start, (end - 1), nid); return ret; } @@ -166,19 +223,18 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) void *nd; int tnid; - pr_info("NUMA: Initmem setup node %d [mem %#010Lx-%#010Lx]\n", - nid, start_pfn << PAGE_SHIFT, - (end_pfn << PAGE_SHIFT) - 1); + pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", + nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); nd = __va(nd_pa); /* report and initialize */ - pr_info("NUMA: NODE_DATA [mem %#010Lx-%#010Lx]\n", + pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n", nd_pa, nd_pa + nd_size - 1); tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); if (tnid != nid) - pr_info("NUMA: NODE_DATA(%d) on node %d\n", nid, tnid); + pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); node_data[nid] = nd; memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); @@ -235,8 +291,7 @@ static int __init numa_alloc_distance(void) numa_distance[i * numa_distance_cnt + j] = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; - pr_debug("NUMA: Initialized distance table, cnt=%d\n", - numa_distance_cnt); + pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt); return 0; } @@ -257,20 +312,20 @@ static int __init numa_alloc_distance(void) void __init numa_set_distance(int from, int to, int distance) { if (!numa_distance) { - pr_warn_once("NUMA: Warning: distance table not allocated yet\n"); + pr_warn_once("Warning: distance table not allocated yet\n"); return; } if (from >= numa_distance_cnt || to >= numa_distance_cnt || from < 0 || to < 0) { - pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", from, to, distance); return; } if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { - pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", from, to, distance); return; } @@ -297,7 +352,7 @@ static int __init numa_register_nodes(void) /* Check that valid nid is set to memblks */ for_each_memblock(memory, mblk) if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) { - pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", + pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", mblk->nid, mblk->base, mblk->base + mblk->size - 1); return -EINVAL; @@ -335,8 +390,10 @@ static int __init numa_init(int (*init_func)(void)) if (ret < 0) return ret; - if (nodes_empty(numa_nodes_parsed)) + if (nodes_empty(numa_nodes_parsed)) { + pr_info("No NUMA configuration found\n"); return -EINVAL; + } ret = numa_register_nodes(); if (ret < 0) @@ -344,10 +401,6 @@ static int __init numa_init(int (*init_func)(void)) setup_node_to_cpumask_map(); - /* init boot processor */ - cpu_to_node_map[0] = 0; - map_cpu_to_node(0, 0); - return 0; } @@ -367,10 +420,8 @@ static int __init dummy_numa_init(void) if (numa_off) pr_info("NUMA disabled\n"); /* Forced off on command line. */ - else - pr_info("No NUMA configuration found\n"); - pr_info("NUMA: Faking a node at [mem %#018Lx-%#018Lx]\n", - 0LLU, PFN_PHYS(max_pfn) - 1); + pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", + 0LLU, PFN_PHYS(max_pfn) - 1); for_each_memblock(memory, mblk) { ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index ca6d268e3313..8def55e7249b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -139,4 +139,43 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) __pgprot(0), __pgprot(PTE_VALID)); } -#endif +#ifdef CONFIG_HIBERNATION +/* + * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function + * is used to determine if a linear map page has been marked as not-valid by + * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit. + * This is based on kern_addr_valid(), which almost does what we need. + * + * Because this is only called on the kernel linear map, p?d_sect() implies + * p?d_present(). When debug_pagealloc is enabled, sections mappings are + * disabled. + */ +bool kernel_page_present(struct page *page) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned long addr = (unsigned long)page_address(page); + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return false; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return false; + if (pud_sect(*pud)) + return true; + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return false; + if (pmd_sect(*pmd)) + return true; + + pte = pte_offset_kernel(pmd, addr); + return pte_valid(*pte); +} +#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index ae11d4e03d0e..371c5f03a170 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -26,8 +26,6 @@ #include <asm/page.h> #include <asm/tlbflush.h> -#include "mm.h" - static struct kmem_cache *pgd_cache; pgd_t *pgd_alloc(struct mm_struct *mm) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 9d37e967fa19..352c73b6a59e 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -83,6 +83,7 @@ ENDPROC(cpu_do_suspend) * * x0: Address of context pointer */ + .pushsection ".idmap.text", "ax" ENTRY(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -120,6 +121,7 @@ ENTRY(cpu_do_resume) isb ret ENDPROC(cpu_do_resume) + .popsection #endif /* @@ -134,17 +136,12 @@ ENTRY(cpu_do_switch_mm) bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 isb -alternative_if_not ARM64_WORKAROUND_CAVIUM_27456 - ret - nop - nop - nop -alternative_else +alternative_if ARM64_WORKAROUND_CAVIUM_27456 ic iallu dsb nsh isb +alternative_else_nop_endif ret -alternative_endif ENDPROC(cpu_do_switch_mm) .pushsection ".idmap.text", "ax" @@ -181,6 +178,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) * Initialise the processor for turning the MMU on. Return in x0 the * value of the SCTLR_EL1 register. */ + .pushsection ".idmap.text", "ax" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh @@ -266,3 +264,4 @@ ENDPROC(__cpu_setup) crval: .word 0xfcffffff // clear .word 0x34d5d91d // set + .popsection diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 3242e591fa82..26b78d86f25a 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o obj-$(CONFIG_ACPI_APEI) += apei.o +obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_msr.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 90d84c3eee53..ccd27fe9ca81 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1031,8 +1031,8 @@ static int __init acpi_parse_madt_lapic_entries(void) return ret; } - x2count = madt_proc[0].count; - count = madt_proc[1].count; + count = madt_proc[0].count; + x2count = madt_proc[1].count; } if (!count && !x2count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); @@ -1513,7 +1513,7 @@ void __init acpi_boot_table_init(void) * If acpi_disabled, bail out */ if (acpi_disabled) - return; + return; /* * Initialize the ACPI boot-time table parser. diff --git a/arch/x86/kernel/acpi/cppc_msr.c b/arch/x86/kernel/acpi/cppc_msr.c new file mode 100644 index 000000000000..6fb478bf82fd --- /dev/null +++ b/arch/x86/kernel/acpi/cppc_msr.c @@ -0,0 +1,58 @@ +/* + * cppc_msr.c: MSR Interface for CPPC + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <acpi/cppc_acpi.h> +#include <asm/msr.h> + +/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */ + +bool cpc_ffh_supported(void) +{ + return true; +} + +int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) +{ + int err; + + err = rdmsrl_safe_on_cpu(cpunum, reg->address, val); + if (!err) { + u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, + reg->bit_offset); + + *val &= mask; + *val >>= reg->bit_offset; + } + return err; +} + +int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + u64 rd_val; + int err; + + err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val); + if (!err) { + u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, + reg->bit_offset); + + val <<= reg->bit_offset; + val &= mask; + rd_val &= ~mask; + rd_val |= val; + err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val); + } + return err; +} diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 445ce28475b3..696c6f74a9c7 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -227,7 +227,6 @@ config ACPI_MCFG config ACPI_CPPC_LIB bool depends on ACPI_PROCESSOR - depends on !ACPI_CPU_FREQ_PSS select MAILBOX select PCC help @@ -462,6 +461,9 @@ source "drivers/acpi/nfit/Kconfig" source "drivers/acpi/apei/Kconfig" source "drivers/acpi/dptf/Kconfig" +config ACPI_WATCHDOG + bool + config ACPI_EXTLOG tristate "Extended Error Log support" depends on X86_MCE && X86_LOCAL_APIC diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 5ae9d85c5159..3a1fa8f03749 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -56,6 +56,7 @@ acpi-$(CONFIG_ACPI_NUMA) += numa.o acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o acpi-y += acpi_lpat.o acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o +acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o # These are (potentially) separate modules diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 1daf9c46df8e..d58fbf7f04e6 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -42,6 +42,7 @@ struct apd_private_data; struct apd_device_desc { unsigned int flags; unsigned int fixed_clk_rate; + struct property_entry *properties; int (*setup)(struct apd_private_data *pdata); }; @@ -71,22 +72,35 @@ static int acpi_apd_setup(struct apd_private_data *pdata) } #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE -static struct apd_device_desc cz_i2c_desc = { +static const struct apd_device_desc cz_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 133000000, }; -static struct apd_device_desc cz_uart_desc = { +static struct property_entry uart_properties[] = { + PROPERTY_ENTRY_U32("reg-io-width", 4), + PROPERTY_ENTRY_U32("reg-shift", 2), + PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), + { }, +}; + +static const struct apd_device_desc cz_uart_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 48000000, + .properties = uart_properties, }; #endif #ifdef CONFIG_ARM64 -static struct apd_device_desc xgene_i2c_desc = { +static const struct apd_device_desc xgene_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 100000000, }; + +static const struct apd_device_desc vulcan_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 133000000, +}; #endif #else @@ -125,6 +139,12 @@ static int acpi_apd_create_device(struct acpi_device *adev, goto err_out; } + if (dev_desc->properties) { + ret = device_add_properties(&adev->dev, dev_desc->properties); + if (ret) + goto err_out; + } + adev->driver_data = pdata; pdev = acpi_create_platform_device(adev); if (!IS_ERR_OR_NULL(pdev)) @@ -149,6 +169,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { #endif #ifdef CONFIG_ARM64 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, + { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 357a0b8f860b..552010288135 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -75,6 +75,7 @@ struct lpss_device_desc { const char *clk_con_id; unsigned int prv_offset; size_t prv_size_override; + struct property_entry *properties; void (*setup)(struct lpss_private_data *pdata); }; @@ -163,11 +164,19 @@ static const struct lpss_device_desc lpt_i2c_dev_desc = { .prv_offset = 0x800, }; +static struct property_entry uart_properties[] = { + PROPERTY_ENTRY_U32("reg-io-width", 4), + PROPERTY_ENTRY_U32("reg-shift", 2), + PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), + { }, +}; + static const struct lpss_device_desc lpt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, + .properties = uart_properties, }; static const struct lpss_device_desc lpt_sdio_dev_desc = { @@ -189,6 +198,7 @@ static const struct lpss_device_desc byt_uart_dev_desc = { .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, + .properties = uart_properties, }; static const struct lpss_device_desc bsw_uart_dev_desc = { @@ -197,6 +207,7 @@ static const struct lpss_device_desc bsw_uart_dev_desc = { .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, + .properties = uart_properties, }; static const struct lpss_device_desc byt_spi_dev_desc = { @@ -440,6 +451,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev, goto err_out; } + if (dev_desc->properties) { + ret = device_add_properties(&adev->dev, dev_desc->properties); + if (ret) + goto err_out; + } + adev->driver_data = pdata; pdev = acpi_create_platform_device(adev); if (!IS_ERR_OR_NULL(pdev)) { diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 159f7f19abce..b200ae1f3c6f 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/dma-mapping.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include "internal.h" @@ -30,6 +31,22 @@ static const struct acpi_device_id forbidden_id_list[] = { {"", 0}, }; +static void acpi_platform_fill_resource(struct acpi_device *adev, + const struct resource *src, struct resource *dest) +{ + struct device *parent; + + *dest = *src; + + /* + * If the device has parent we need to take its resources into + * account as well because this device might consume part of those. + */ + parent = acpi_get_first_physical_node(adev->parent); + if (parent && dev_is_pci(parent)) + dest->parent = pci_find_resource(to_pci_dev(parent), dest); +} + /** * acpi_create_platform_device - Create platform device for ACPI device node * @adev: ACPI device node to create a platform device for. @@ -70,7 +87,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) } count = 0; list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + acpi_platform_fill_resource(adev, rentry->res, + &resources[count++]); acpi_dev_free_resource_list(&resource_list); } diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c new file mode 100644 index 000000000000..13caebd679f5 --- /dev/null +++ b/drivers/acpi/acpi_watchdog.c @@ -0,0 +1,123 @@ +/* + * ACPI watchdog table parsing support. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "ACPI: watchdog: " fmt + +#include <linux/acpi.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> + +#include "internal.h" + +/** + * Returns true if this system should prefer ACPI based watchdog instead of + * the native one (which are typically the same hardware). + */ +bool acpi_has_watchdog(void) +{ + struct acpi_table_header hdr; + + if (acpi_disabled) + return false; + + return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr)); +} +EXPORT_SYMBOL_GPL(acpi_has_watchdog); + +void __init acpi_watchdog_init(void) +{ + const struct acpi_wdat_entry *entries; + const struct acpi_table_wdat *wdat; + struct list_head resource_list; + struct resource_entry *rentry; + struct platform_device *pdev; + struct resource *resources; + size_t nresources = 0; + acpi_status status; + int i; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, + (struct acpi_table_header **)&wdat); + if (ACPI_FAILURE(status)) { + /* It is fine if there is no WDAT */ + return; + } + + /* Watchdog disabled by BIOS */ + if (!(wdat->flags & ACPI_WDAT_ENABLED)) + return; + + /* Skip legacy PCI WDT devices */ + if (wdat->pci_segment != 0xff || wdat->pci_bus != 0xff || + wdat->pci_device != 0xff || wdat->pci_function != 0xff) + return; + + INIT_LIST_HEAD(&resource_list); + + entries = (struct acpi_wdat_entry *)(wdat + 1); + for (i = 0; i < wdat->entries; i++) { + const struct acpi_generic_address *gas; + struct resource_entry *rentry; + struct resource res; + bool found; + + gas = &entries[i].register_region; + + res.start = gas->address; + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + res.flags = IORESOURCE_MEM; + res.end = res.start + ALIGN(gas->access_width, 4); + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + res.flags = IORESOURCE_IO; + res.end = res.start + gas->access_width; + } else { + pr_warn("Unsupported address space: %u\n", + gas->space_id); + goto fail_free_resource_list; + } + + found = false; + resource_list_for_each_entry(rentry, &resource_list) { + if (resource_contains(rentry->res, &res)) { + found = true; + break; + } + } + + if (!found) { + rentry = resource_list_create_entry(NULL, 0); + if (!rentry) + goto fail_free_resource_list; + + *rentry->res = res; + resource_list_add_tail(rentry, &resource_list); + nresources++; + } + } + + resources = kcalloc(nresources, sizeof(*resources), GFP_KERNEL); + if (!resources) + goto fail_free_resource_list; + + i = 0; + resource_list_for_each_entry(rentry, &resource_list) + resources[i++] = *rentry->res; + + pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE, + resources, nresources); + if (IS_ERR(pdev)) + pr_err("Failed to create platform device\n"); + + kfree(resources); + +fail_free_resource_list: + resource_list_free(&resource_list); +} diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 227bb7bb19d7..32d93edbc479 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -175,6 +175,7 @@ acpi-y += \ utresrc.o \ utstate.o \ utstring.o \ + utstrtoul64.o \ utxface.o \ utxfinit.o \ utxferror.o \ diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index ca2c0607104b..0bd6307e1f3c 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -44,7 +44,9 @@ #ifndef _ACAPPS #define _ACAPPS -#include <stdio.h> +#ifdef ACPI_USE_STANDARD_HEADERS +#include <sys/stat.h> +#endif /* ACPI_USE_STANDARD_HEADERS */ /* Common info for tool signons */ @@ -81,13 +83,13 @@ /* Macros for usage messages */ #define ACPI_USAGE_HEADER(usage) \ - acpi_os_printf ("Usage: %s\nOptions:\n", usage); + printf ("Usage: %s\nOptions:\n", usage); #define ACPI_USAGE_TEXT(description) \ - acpi_os_printf (description); + printf (description); #define ACPI_OPTION(name, description) \ - acpi_os_printf (" %-20s%s\n", name, description); + printf (" %-20s%s\n", name, description); /* Check for unexpected exceptions */ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index f6404ea928cb..94737f8845ac 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -155,7 +155,7 @@ acpi_status acpi_db_disassemble_method(char *name); void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); -void acpi_db_batch_execute(char *count_arg); +void acpi_db_evaluate_predefined_names(void); /* * dbnames - namespace commands diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 77af91cf46d4..92fa47c6498c 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -86,6 +86,9 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); acpi_status +acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked); + +acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); acpi_status diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index fded776236e2..750fa824d42c 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -317,6 +317,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE); ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_emit_external_opcodes, FALSE); ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm); ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing); @@ -382,6 +383,7 @@ ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]); ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL); ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL); +ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE); /* Print buffer */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 13331d70dea0..dff1207a6078 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -484,6 +484,7 @@ struct acpi_gpe_event_info { u8 flags; /* Misc info about this GPE */ u8 gpe_number; /* This GPE */ u8 runtime_count; /* References to a run GPE */ + u8 disable_for_dispatch; /* Masked during dispatching */ }; /* Information about a GPE register pair, one per each status/enable pair in an array */ @@ -494,6 +495,7 @@ struct acpi_gpe_register_info { u16 base_gpe_number; /* Base GPE number for this register */ u8 enable_for_wake; /* GPEs to keep enabled when sleeping */ u8 enable_for_run; /* GPEs to keep enabled when running */ + u8 mask_for_run; /* GPEs to keep masked when running */ u8 enable_mask; /* Current mask of enabled GPEs */ }; diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index f33a4ba8e0cb..bb7fca1c8ba3 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -130,6 +130,9 @@ acpi_status acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node); acpi_status +acpi_ns_execute_table(u32 table_index, struct acpi_namespace_node *start_node); + +acpi_status acpi_ns_one_complete_parse(u32 pass_number, u32 table_index, struct acpi_namespace_node *start_node); @@ -296,6 +299,11 @@ u8 acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for); acpi_status +acpi_ns_get_node_unlocked(struct acpi_namespace_node *prefix_node, + const char *external_pathname, + u32 flags, struct acpi_namespace_node **out_node); + +acpi_status acpi_ns_get_node(struct acpi_namespace_node *prefix_node, const char *external_pathname, u32 flags, struct acpi_namespace_node **out_node); diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index fc305775c3d7..939d41113970 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -78,6 +78,8 @@ extern const u8 acpi_gbl_long_op_index[]; */ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info); +acpi_status acpi_ps_execute_table(struct acpi_evaluate_info *info); + /* * psargs - Parse AML opcode arguments */ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index cd5a135fcf29..e85953b6fa0e 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -123,6 +123,14 @@ acpi_tb_install_standard_table(acpi_physical_address address, void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc); +acpi_status +acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node); + +acpi_status +acpi_tb_install_and_load_table(struct acpi_table_header *table, + acpi_physical_address address, + u8 flags, u8 override, u32 *table_index); + void acpi_tb_terminate(void); acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index); @@ -155,10 +163,6 @@ void acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, u8 override, u32 *table_index); -acpi_status -acpi_tb_install_fixed_table(acpi_physical_address address, - char *signature, u32 *table_index); - acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address); /* diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index a7dbb2b882cf..0a1b53c9ee0e 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -114,13 +114,25 @@ extern const char *acpi_gbl_pt_decode[]; /* * Common error message prefixes */ +#ifndef ACPI_MSG_ERROR #define ACPI_MSG_ERROR "ACPI Error: " +#endif +#ifndef ACPI_MSG_EXCEPTION #define ACPI_MSG_EXCEPTION "ACPI Exception: " +#endif +#ifndef ACPI_MSG_WARNING #define ACPI_MSG_WARNING "ACPI Warning: " +#endif +#ifndef ACPI_MSG_INFO #define ACPI_MSG_INFO "ACPI: " +#endif +#ifndef ACPI_MSG_BIOS_ERROR #define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): " +#endif +#ifndef ACPI_MSG_BIOS_WARNING #define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): " +#endif /* * Common message suffix @@ -184,14 +196,15 @@ void acpi_ut_strlwr(char *src_string); int acpi_ut_stricmp(char *string1, char *string2); -acpi_status -acpi_ut_strtoul64(char *string, - u32 base, u32 max_integer_byte_width, u64 *ret_integer); - -/* Values for max_integer_byte_width above */ +acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *ret_integer); -#define ACPI_MAX32_BYTE_WIDTH 4 -#define ACPI_MAX64_BYTE_WIDTH 8 +/* + * Values for Flags above + * Note: LIMIT values correspond to acpi_gbl_integer_byte_width values (4/8) + */ +#define ACPI_STRTOUL_32BIT 0x04 /* 4 bytes */ +#define ACPI_STRTOUL_64BIT 0x08 /* 8 bytes */ +#define ACPI_STRTOUL_BASE16 0x10 /* Default: Base10/16 */ /* * utglobal - Global data structures and procedures @@ -221,6 +234,8 @@ const char *acpi_ut_get_event_name(u32 event_id); char acpi_ut_hex_to_ascii_char(u64 integer, u32 position); +acpi_status acpi_ut_ascii_to_hex_byte(char *two_ascii_chars, u8 *return_byte); + u8 acpi_ut_ascii_char_to_hex(int hex_char); u8 acpi_ut_valid_object_type(acpi_object_type type); @@ -318,6 +333,11 @@ acpi_ut_ptr_exit(u32 line_number, const char *module_name, u32 component_id, u8 *ptr); void +acpi_ut_str_exit(u32 line_number, + const char *function_name, + const char *module_name, u32 component_id, const char *string); + +void acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id); void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset); @@ -707,25 +727,6 @@ const struct ah_device_id *acpi_ah_match_hardware_id(char *hid); const char *acpi_ah_match_uuid(u8 *data); /* - * utprint - printf/vprintf output functions - */ -const char *acpi_ut_scan_number(const char *string, u64 *number_ptr); - -const char *acpi_ut_print_number(char *string, u64 number); - -int -acpi_ut_vsnprintf(char *string, - acpi_size size, const char *format, va_list args); - -int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...); - -#ifdef ACPI_APPLICATION -int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args); - -int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...); -#endif - -/* * utuuid -- UUID support functions */ #if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_HELP_APP) diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 7cd07b27f758..147ce8894f76 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -277,9 +277,10 @@ acpi_db_convert_to_object(acpi_object_type type, default: object->type = ACPI_TYPE_INTEGER; - status = - acpi_ut_strtoul64(string, 16, acpi_gbl_integer_byte_width, - &object->integer.value); + status = acpi_ut_strtoul64(string, + (acpi_gbl_integer_byte_width | + ACPI_STRTOUL_BASE16), + &object->integer.value); break; } diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c index 12df2915ad74..fe3da7c31bb7 100644 --- a/drivers/acpi/acpica/dbexec.c +++ b/drivers/acpi/acpica/dbexec.c @@ -392,42 +392,48 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags) acpi_db_execution_walk, NULL, NULL, NULL); return; - } else { - name_string = ACPI_ALLOCATE(strlen(name) + 1); - if (!name_string) { - return; - } + } - memset(&acpi_gbl_db_method_info, 0, - sizeof(struct acpi_db_method_info)); + name_string = ACPI_ALLOCATE(strlen(name) + 1); + if (!name_string) { + return; + } - strcpy(name_string, name); - acpi_ut_strupr(name_string); - acpi_gbl_db_method_info.name = name_string; - acpi_gbl_db_method_info.args = args; - acpi_gbl_db_method_info.types = types; - acpi_gbl_db_method_info.flags = flags; + memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info)); + strcpy(name_string, name); + acpi_ut_strupr(name_string); - return_obj.pointer = NULL; - return_obj.length = ACPI_ALLOCATE_BUFFER; + /* Subcommand to Execute all predefined names in the namespace */ - status = acpi_db_execute_setup(&acpi_gbl_db_method_info); - if (ACPI_FAILURE(status)) { - ACPI_FREE(name_string); - return; - } + if (!strncmp(name_string, "PREDEF", 6)) { + acpi_db_evaluate_predefined_names(); + ACPI_FREE(name_string); + return; + } - /* Get the NS node, determines existence also */ + acpi_gbl_db_method_info.name = name_string; + acpi_gbl_db_method_info.args = args; + acpi_gbl_db_method_info.types = types; + acpi_gbl_db_method_info.flags = flags; - status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname, - &acpi_gbl_db_method_info.method); - if (ACPI_SUCCESS(status)) { - status = - acpi_db_execute_method(&acpi_gbl_db_method_info, - &return_obj); - } + return_obj.pointer = NULL; + return_obj.length = ACPI_ALLOCATE_BUFFER; + + status = acpi_db_execute_setup(&acpi_gbl_db_method_info); + if (ACPI_FAILURE(status)) { ACPI_FREE(name_string); + return; + } + + /* Get the NS node, determines existence also */ + + status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname, + &acpi_gbl_db_method_info.method); + if (ACPI_SUCCESS(status)) { + status = acpi_db_execute_method(&acpi_gbl_db_method_info, + &return_obj); } + ACPI_FREE(name_string); /* * Allow any handlers in separate threads to complete. diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c index 483287942372..6f05b8c271a5 100644 --- a/drivers/acpi/acpica/dbfileio.c +++ b/drivers/acpi/acpica/dbfileio.c @@ -46,14 +46,12 @@ #include "accommon.h" #include "acdebug.h" #include "actables.h" -#include <stdio.h> -#ifdef ACPI_APPLICATION -#include "acapps.h" -#endif #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbfileio") +#ifdef ACPI_APPLICATION +#include "acapps.h" #ifdef ACPI_DEBUGGER /******************************************************************************* * @@ -69,8 +67,6 @@ ACPI_MODULE_NAME("dbfileio") void acpi_db_close_debug_file(void) { -#ifdef ACPI_APPLICATION - if (acpi_gbl_debug_file) { fclose(acpi_gbl_debug_file); acpi_gbl_debug_file = NULL; @@ -78,7 +74,6 @@ void acpi_db_close_debug_file(void) acpi_os_printf("Debug output file %s closed\n", acpi_gbl_db_debug_filename); } -#endif } /******************************************************************************* @@ -96,8 +91,6 @@ void acpi_db_close_debug_file(void) void acpi_db_open_debug_file(char *name) { -#ifdef ACPI_APPLICATION - acpi_db_close_debug_file(); acpi_gbl_debug_file = fopen(name, "w+"); if (!acpi_gbl_debug_file) { @@ -109,8 +102,6 @@ void acpi_db_open_debug_file(char *name) strncpy(acpi_gbl_db_debug_filename, name, sizeof(acpi_gbl_db_debug_filename)); acpi_gbl_db_output_to_file = TRUE; - -#endif } #endif @@ -152,12 +143,13 @@ acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head) return (status); } - fprintf(stderr, - "Acpi table [%4.4s] successfully installed and loaded\n", - table->signature); + acpi_os_printf + ("Acpi table [%4.4s] successfully installed and loaded\n", + table->signature); table_list_head = table_list_head->next; } return (AE_OK); } +#endif diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index 7cd5d2e022da..068214f9cc9d 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -286,6 +286,8 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = { {1, " \"Ascii String\"", "String method argument\n"}, {1, " (Hex Byte List)", "Buffer method argument\n"}, {1, " [Package Element List]", "Package method argument\n"}, + {5, " Execute predefined", + "Execute all predefined (public) methods\n"}, {1, " Go", "Allow method to run to completion\n"}, {1, " Information", "Display info about the current method\n"}, {1, " Into", "Step into (not over) a method call\n"}, diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c index f17a86f6b16b..314b94cf086a 100644 --- a/drivers/acpi/acpica/dbmethod.c +++ b/drivers/acpi/acpica/dbmethod.c @@ -52,6 +52,11 @@ #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbmethod") +/* Local prototypes */ +static acpi_status +acpi_db_walk_for_execute(acpi_handle obj_handle, + u32 nesting_level, void *context, void **return_value); + /******************************************************************************* * * FUNCTION: acpi_db_set_method_breakpoint @@ -66,6 +71,7 @@ ACPI_MODULE_NAME("dbmethod") * AML offset * ******************************************************************************/ + void acpi_db_set_method_breakpoint(char *location, struct acpi_walk_state *walk_state, @@ -367,3 +373,129 @@ acpi_status acpi_db_disassemble_method(char *name) acpi_ut_release_owner_id(&obj_desc->method.owner_id); return (AE_OK); } + +/******************************************************************************* + * + * FUNCTION: acpi_db_walk_for_execute + * + * PARAMETERS: Callback from walk_namespace + * + * RETURN: Status + * + * DESCRIPTION: Batch execution module. Currently only executes predefined + * ACPI names. + * + ******************************************************************************/ + +static acpi_status +acpi_db_walk_for_execute(acpi_handle obj_handle, + u32 nesting_level, void *context, void **return_value) +{ + struct acpi_namespace_node *node = + (struct acpi_namespace_node *)obj_handle; + struct acpi_db_execute_walk *info = + (struct acpi_db_execute_walk *)context; + struct acpi_buffer return_obj; + acpi_status status; + char *pathname; + u32 i; + struct acpi_device_info *obj_info; + struct acpi_object_list param_objects; + union acpi_object params[ACPI_METHOD_NUM_ARGS]; + const union acpi_predefined_info *predefined; + + predefined = acpi_ut_match_predefined_method(node->name.ascii); + if (!predefined) { + return (AE_OK); + } + + if (node->type == ACPI_TYPE_LOCAL_SCOPE) { + return (AE_OK); + } + + pathname = acpi_ns_get_external_pathname(node); + if (!pathname) { + return (AE_OK); + } + + /* Get the object info for number of method parameters */ + + status = acpi_get_object_info(obj_handle, &obj_info); + if (ACPI_FAILURE(status)) { + return (status); + } + + param_objects.pointer = NULL; + param_objects.count = 0; + + if (obj_info->type == ACPI_TYPE_METHOD) { + + /* Setup default parameters */ + + for (i = 0; i < obj_info->param_count; i++) { + params[i].type = ACPI_TYPE_INTEGER; + params[i].integer.value = 1; + } + + param_objects.pointer = params; + param_objects.count = obj_info->param_count; + } + + ACPI_FREE(obj_info); + return_obj.pointer = NULL; + return_obj.length = ACPI_ALLOCATE_BUFFER; + + /* Do the actual method execution */ + + acpi_gbl_method_executing = TRUE; + + status = acpi_evaluate_object(node, NULL, ¶m_objects, &return_obj); + + acpi_os_printf("%-32s returned %s\n", pathname, + acpi_format_exception(status)); + acpi_gbl_method_executing = FALSE; + ACPI_FREE(pathname); + + /* Ignore status from method execution */ + + status = AE_OK; + + /* Update count, check if we have executed enough methods */ + + info->count++; + if (info->count >= info->max_count) { + status = AE_CTRL_TERMINATE; + } + + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_db_evaluate_predefined_names + * + * PARAMETERS: None + * + * RETURN: None + * + * DESCRIPTION: Namespace batch execution. Execute predefined names in the + * namespace, up to the max count, if specified. + * + ******************************************************************************/ + +void acpi_db_evaluate_predefined_names(void) +{ + struct acpi_db_execute_walk info; + + info.count = 0; + info.max_count = ACPI_UINT32_MAX; + + /* Search all nodes in namespace */ + + (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, acpi_db_walk_for_execute, + NULL, (void *)&info, NULL); + + acpi_os_printf("Evaluated %u predefined names in the namespace\n", + info.count); +} diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c index 1d59e8b6f859..08eaaf350b24 100644 --- a/drivers/acpi/acpica/dbobject.c +++ b/drivers/acpi/acpica/dbobject.c @@ -142,11 +142,11 @@ void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc) case ACPI_TYPE_STRING: - acpi_os_printf("(%u) \"%.24s", + acpi_os_printf("(%u) \"%.60s", obj_desc->string.length, obj_desc->string.pointer); - if (obj_desc->string.length > 24) { + if (obj_desc->string.length > 60) { acpi_os_printf("..."); } else { acpi_os_printf("\""); diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 47c7b52a519c..32e9ddc0cf2b 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -99,11 +99,14 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, "Method auto-serialization parse [%4.4s] %p\n", acpi_ut_get_node_name(node), node)); + acpi_ex_enter_interpreter(); + /* Create/Init a root op for the method parse tree */ op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); if (!op) { - return_ACPI_STATUS(AE_NO_MEMORY); + status = AE_NO_MEMORY; + goto unlock; } acpi_ps_set_name(op, node->name.integer); @@ -115,7 +118,8 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); if (!walk_state) { acpi_ps_free_op(op); - return_ACPI_STATUS(AE_NO_MEMORY); + status = AE_NO_MEMORY; + goto unlock; } status = acpi_ds_init_aml_walk(walk_state, op, node, @@ -134,6 +138,8 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, status = acpi_ps_parse_aml(walk_state); acpi_ps_delete_parse_tree(op); +unlock: + acpi_ex_exit_interpreter(); return_ACPI_STATUS(status); } @@ -757,8 +763,10 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, /* Delete any direct children of (created by) this method */ + (void)acpi_ex_exit_interpreter(); acpi_ns_delete_namespace_subtree(walk_state-> method_node); + (void)acpi_ex_enter_interpreter(); /* * Delete any objects that were created by this method @@ -769,9 +777,11 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, */ if (method_desc->method. info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { + (void)acpi_ex_exit_interpreter(); acpi_ns_delete_namespace_by_owner(method_desc-> method. owner_id); + (void)acpi_ex_enter_interpreter(); method_desc->method.info_flags &= ~ACPI_METHOD_MODIFIED_NAMESPACE; } diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index f393de9f5887..7d8ef52fb88d 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -565,15 +565,14 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, status = AE_OK; } else if (parent_op->common.aml_opcode == AML_EXTERNAL_OP) { - - /* TBD: May only be temporary */ - - obj_desc = - acpi_ut_create_string_object((acpi_size)name_length); - - strncpy(obj_desc->string.pointer, - name_string, name_length); - status = AE_OK; + /* + * This opcode should never appear here. It is used only + * by AML disassemblers and is surrounded by an If(0) + * by the ASL compiler. + * + * Therefore, if we see it here, it is a serious error. + */ + status = AE_AML_BAD_OPCODE; } else { /* * We just plain didn't find it -- which is a diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 402ecc590c56..438597cf6cc5 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -133,7 +133,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, * Result of predicate evaluation must be an Integer * object. Implicitly convert the argument if necessary. */ - status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, 16); + status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, + ACPI_STRTOUL_BASE16); if (ACPI_FAILURE(status)) { goto cleanup; } diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 762db3fa70e0..028b22a3154e 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -605,16 +605,13 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } - - acpi_ex_exit_interpreter(); } + acpi_ex_exit_interpreter(); status = acpi_ev_initialize_region (acpi_ns_get_attached_object(node), FALSE); - if (walk_state->method_node) { - acpi_ex_enter_interpreter(); - } + acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { /* diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 4b4949ce05bc..bdb10bee13ce 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -130,6 +130,60 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) /******************************************************************************* * + * FUNCTION: acpi_ev_mask_gpe + * + * PARAMETERS: gpe_event_info - GPE to be blocked/unblocked + * is_masked - Whether the GPE is masked or not + * + * RETURN: Status + * + * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime. + * + ******************************************************************************/ + +acpi_status +acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked) +{ + struct acpi_gpe_register_info *gpe_register_info; + u32 register_bit; + + ACPI_FUNCTION_TRACE(ev_mask_gpe); + + gpe_register_info = gpe_event_info->register_info; + if (!gpe_register_info) { + return_ACPI_STATUS(AE_NOT_EXIST); + } + + register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); + + /* Perform the action */ + + if (is_masked) { + if (register_bit & gpe_register_info->mask_for_run) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); + ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit); + } else { + if (!(register_bit & gpe_register_info->mask_for_run)) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + ACPI_CLEAR_BIT(gpe_register_info->mask_for_run, + (u8)register_bit); + if (gpe_event_info->runtime_count + && !gpe_event_info->disable_for_dispatch) { + (void)acpi_hw_low_set_gpe(gpe_event_info, + ACPI_GPE_ENABLE); + } + } + + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE @@ -674,6 +728,7 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) * in the event_info. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); + gpe_event_info->disable_for_dispatch = FALSE; return (AE_OK); } @@ -737,6 +792,8 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, } } + gpe_event_info->disable_for_dispatch = TRUE; + /* * Dispatch the GPE to either an installed handler or the control * method associated with this GPE (_Lxx or _Exx). If a handler diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 7dc75474c897..16ce4835e7d0 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -323,7 +323,9 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, struct acpi_gpe_walk_info *walk_info = ACPI_CAST_PTR(struct acpi_gpe_walk_info, context); struct acpi_gpe_event_info *gpe_event_info; + acpi_status status; u32 gpe_number; + u8 temp_gpe_number; char name[ACPI_NAME_SIZE + 1]; u8 type; @@ -377,8 +379,8 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, /* 4) The last two characters of the name are the hex GPE Number */ - gpe_number = strtoul(&name[2], NULL, 16); - if (gpe_number == ACPI_UINT32_MAX) { + status = acpi_ut_ascii_to_hex_byte(&name[2], &temp_gpe_number); + if (ACPI_FAILURE(status)) { /* Conversion failed; invalid method, just ignore it */ @@ -390,6 +392,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, /* Ensure that we have a valid GPE number for this GPE block */ + gpe_number = (u32)temp_gpe_number; gpe_event_info = acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block); if (!gpe_event_info) { diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index b6ea9c0d0d8c..3843f1fc5dbb 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -553,7 +553,8 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, * * See acpi_ns_exec_module_code */ - if (obj_desc->method. + if (!acpi_gbl_parse_table_as_term_list && + obj_desc->method. info_flags & ACPI_METHOD_MODULE_LEVEL) { handler_obj = obj_desc->method.dispatch.handler; diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 17cfef721d00..d7a3b2775505 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -235,11 +235,13 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) case ACPI_GPE_ENABLE: status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); + gpe_event_info->disable_for_dispatch = FALSE; break; case ACPI_GPE_DISABLE: status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); + gpe_event_info->disable_for_dispatch = TRUE; break; default: @@ -257,6 +259,47 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe) /******************************************************************************* * + * FUNCTION: acpi_mask_gpe + * + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 + * gpe_number - GPE level within the GPE block + * is_masked - Whether the GPE is masked or not + * + * RETURN: Status + * + * DESCRIPTION: Unconditionally mask/unmask the an individual GPE, ex., to + * prevent a GPE flooding. + * + ******************************************************************************/ +acpi_status acpi_mask_gpe(acpi_handle gpe_device, u32 gpe_number, u8 is_masked) +{ + struct acpi_gpe_event_info *gpe_event_info; + acpi_status status; + acpi_cpu_flags flags; + + ACPI_FUNCTION_TRACE(acpi_mask_gpe); + + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + + /* Ensure that we have a valid GPE number */ + + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); + if (!gpe_event_info) { + status = AE_BAD_PARAMETER; + goto unlock_and_exit; + } + + status = acpi_ev_mask_gpe(gpe_event_info, is_masked); + +unlock_and_exit: + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_mask_gpe) + +/******************************************************************************* + * * FUNCTION: acpi_mark_gpe_for_wake * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index 2423fe03e879..5429c2a6bc41 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -156,7 +156,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, status = acpi_ex_convert_to_integer(local_operand1, &temp_operand1, - 16); + ACPI_STRTOUL_BASE16); break; case ACPI_TYPE_BUFFER: diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index a1d177d58254..718428ba0b89 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -55,9 +55,7 @@ ACPI_MODULE_NAME("exconfig") /* Local prototypes */ static acpi_status -acpi_ex_add_table(u32 table_index, - struct acpi_namespace_node *parent_node, - union acpi_operand_object **ddb_handle); +acpi_ex_add_table(u32 table_index, union acpi_operand_object **ddb_handle); static acpi_status acpi_ex_region_read(union acpi_operand_object *obj_desc, @@ -79,13 +77,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, ******************************************************************************/ static acpi_status -acpi_ex_add_table(u32 table_index, - struct acpi_namespace_node *parent_node, - union acpi_operand_object **ddb_handle) +acpi_ex_add_table(u32 table_index, union acpi_operand_object **ddb_handle) { union acpi_operand_object *obj_desc; - acpi_status status; - acpi_owner_id owner_id; ACPI_FUNCTION_TRACE(ex_add_table); @@ -100,39 +94,8 @@ acpi_ex_add_table(u32 table_index, obj_desc->common.flags |= AOPOBJ_DATA_VALID; obj_desc->reference.class = ACPI_REFCLASS_TABLE; - *ddb_handle = obj_desc; - - /* Install the new table into the local data structures */ - obj_desc->reference.value = table_index; - - /* Add the table to the namespace */ - - status = acpi_ns_load_table(table_index, parent_node); - if (ACPI_FAILURE(status)) { - acpi_ut_remove_reference(obj_desc); - *ddb_handle = NULL; - return_ACPI_STATUS(status); - } - - /* Execute any module-level code that was found in the table */ - - acpi_ex_exit_interpreter(); - if (acpi_gbl_group_module_level_code) { - acpi_ns_exec_module_code_list(); - } - acpi_ex_enter_interpreter(); - - /* - * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is - * responsible for discovering any new wake GPEs by running _PRW methods - * that may have been loaded by this table. - */ - status = acpi_tb_get_owner_id(table_index, &owner_id); - if (ACPI_SUCCESS(status)) { - acpi_ev_update_gpes(owner_id); - } - + *ddb_handle = obj_desc; return_ACPI_STATUS(AE_OK); } @@ -159,16 +122,17 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, struct acpi_namespace_node *start_node; struct acpi_namespace_node *parameter_node = NULL; union acpi_operand_object *ddb_handle; - struct acpi_table_header *table; u32 table_index; ACPI_FUNCTION_TRACE(ex_load_table_op); /* Find the ACPI table in the RSDT/XSDT */ + acpi_ex_exit_interpreter(); status = acpi_tb_find_table(operand[0]->string.pointer, operand[1]->string.pointer, operand[2]->string.pointer, &table_index); + acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { return_ACPI_STATUS(status); @@ -197,9 +161,10 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, * Find the node referenced by the root_path_string. This is the * location within the namespace where the table will be loaded. */ - status = - acpi_ns_get_node(start_node, operand[3]->string.pointer, - ACPI_NS_SEARCH_PARENT, &parent_node); + status = acpi_ns_get_node_unlocked(start_node, + operand[3]->string.pointer, + ACPI_NS_SEARCH_PARENT, + &parent_node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -219,9 +184,10 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, /* Find the node referenced by the parameter_path_string */ - status = - acpi_ns_get_node(start_node, operand[4]->string.pointer, - ACPI_NS_SEARCH_PARENT, ¶meter_node); + status = acpi_ns_get_node_unlocked(start_node, + operand[4]->string.pointer, + ACPI_NS_SEARCH_PARENT, + ¶meter_node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -229,7 +195,15 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, /* Load the table into the namespace */ - status = acpi_ex_add_table(table_index, parent_node, &ddb_handle); + ACPI_INFO(("Dynamic OEM Table Load:")); + acpi_ex_exit_interpreter(); + status = acpi_tb_load_table(table_index, parent_node); + acpi_ex_enter_interpreter(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_ex_add_table(table_index, &ddb_handle); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -252,19 +226,6 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, } } - status = acpi_get_table_by_index(table_index, &table); - if (ACPI_SUCCESS(status)) { - ACPI_INFO(("Dynamic OEM Table Load:")); - acpi_tb_print_table_header(0, table); - } - - /* Invoke table handler if present */ - - if (acpi_gbl_table_handler) { - (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, - acpi_gbl_table_handler_context); - } - *return_desc = ddb_handle; return_ACPI_STATUS(status); } @@ -475,13 +436,12 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, /* Install the new table into the local data structures */ ACPI_INFO(("Dynamic OEM Table Load:")); - (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); - - status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table), - ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, - TRUE, TRUE, &table_index); - - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); + acpi_ex_exit_interpreter(); + status = + acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table), + ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, + TRUE, &table_index); + acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { /* Delete allocated table buffer */ @@ -491,25 +451,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, } /* - * Note: Now table is "INSTALLED", it must be validated before - * loading. - */ - status = - acpi_tb_validate_table(&acpi_gbl_root_table_list. - tables[table_index]); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - - /* * Add the table to the namespace. * * Note: Load the table objects relative to the root of the namespace. * This appears to go against the ACPI specification, but we do it for * compatibility with other ACPI implementations. */ - status = - acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle); + status = acpi_ex_add_table(table_index, &ddb_handle); if (ACPI_FAILURE(status)) { /* On error, table_ptr was deallocated above */ @@ -532,14 +480,6 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, /* Remove the reference by added by acpi_ex_store above */ acpi_ut_remove_reference(ddb_handle); - - /* Invoke table handler if present */ - - if (acpi_gbl_table_handler) { - (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, - acpi_gbl_table_handler_context); - } - return_ACPI_STATUS(status); } @@ -592,10 +532,17 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) table_index = table_desc->reference.value; + /* + * Release the interpreter lock so that the table lock won't have + * strict order requirement against it. + */ + acpi_ex_exit_interpreter(); + /* Ensure the table is still loaded */ if (!acpi_tb_is_table_loaded(table_index)) { - return_ACPI_STATUS(AE_NOT_EXIST); + status = AE_NOT_EXIST; + goto lock_and_exit; } /* Invoke table handler if present */ @@ -613,16 +560,24 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) status = acpi_tb_delete_namespace_by_owner(table_index); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + goto lock_and_exit; } (void)acpi_tb_release_owner_id(table_index); acpi_tb_set_table_loaded_flag(table_index, FALSE); +lock_and_exit: + + /* Re-acquire the interpreter lock */ + + acpi_ex_enter_interpreter(); + /* * Invalidate the handle. We do this because the handle may be stored * in a named object and may not be actually deleted until much later. */ - ddb_handle->common.flags &= ~AOPOBJ_DATA_VALID; - return_ACPI_STATUS(AE_OK); + if (ACPI_SUCCESS(status)) { + ddb_handle->common.flags &= ~AOPOBJ_DATA_VALID; + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index b7e9b3d803e1..588ad1409dbe 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -124,9 +124,9 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, * of ACPI 3.0) is that the to_integer() operator allows both decimal * and hexadecimal strings (hex prefixed with "0x"). */ - status = acpi_ut_strtoul64((char *)pointer, flags, - acpi_gbl_integer_byte_width, - &result); + status = acpi_ut_strtoul64(ACPI_CAST_PTR(char, pointer), + (acpi_gbl_integer_byte_width | + flags), &result); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -632,7 +632,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type, */ status = acpi_ex_convert_to_integer(source_desc, result_desc, - 16); + ACPI_STRTOUL_BASE16); break; case ACPI_TYPE_STRING: diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 4f7e667624b3..37c88b424a02 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -327,8 +327,8 @@ acpi_ex_do_logical_op(u16 opcode, switch (operand0->common.type) { case ACPI_TYPE_INTEGER: - status = - acpi_ex_convert_to_integer(operand1, &local_operand1, 16); + status = acpi_ex_convert_to_integer(operand1, &local_operand1, + ACPI_STRTOUL_BASE16); break; case ACPI_TYPE_STRING: diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 4e17506a7384..007300433cde 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -521,9 +521,10 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) case AML_TO_INTEGER_OP: /* to_integer (Data, Result) */ + /* Perform "explicit" conversion */ + status = - acpi_ex_convert_to_integer(operand[0], &return_desc, - ACPI_ANY_BASE); + acpi_ex_convert_to_integer(operand[0], &return_desc, 0); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ @@ -890,14 +891,16 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) * Field, so we need to resolve the node to a value. */ status = - acpi_ns_get_node(walk_state->scope_info-> - scope.node, - operand[0]->string.pointer, - ACPI_NS_SEARCH_PARENT, - ACPI_CAST_INDIRECT_PTR - (struct - acpi_namespace_node, - &return_desc)); + acpi_ns_get_node_unlocked(walk_state-> + scope_info->scope. + node, + operand[0]-> + string.pointer, + ACPI_NS_SEARCH_PARENT, + ACPI_CAST_INDIRECT_PTR + (struct + acpi_namespace_node, + &return_desc)); if (ACPI_FAILURE(status)) { goto cleanup; } diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index 27b41fd7542d..f29eba1dc5e9 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -410,12 +410,13 @@ acpi_ex_resolve_operands(u16 opcode, case ARGI_INTEGER: /* - * Need an operand of type ACPI_TYPE_INTEGER, - * But we can implicitly convert from a STRING or BUFFER - * aka - "Implicit Source Operand Conversion" + * Need an operand of type ACPI_TYPE_INTEGER, but we can + * implicitly convert from a STRING or BUFFER. + * + * Known as "Implicit Source Operand Conversion" */ - status = - acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16); + status = acpi_ex_convert_to_integer(obj_desc, stack_ptr, + ACPI_STRTOUL_BASE16); if (ACPI_FAILURE(status)) { if (status == AE_TYPE) { ACPI_ERROR((AE_INFO, diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index b52e84841c1a..c9ca82610d77 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -201,7 +201,6 @@ acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { - acpi_status status; char *pathname = NULL; u8 enabled = FALSE; @@ -211,11 +210,6 @@ acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); } - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit; - } - enabled = acpi_ex_interpreter_trace_enabled(pathname); if (enabled && !acpi_gbl_trace_method_object) { acpi_gbl_trace_method_object = obj_desc; @@ -233,9 +227,6 @@ acpi_ex_start_trace_method(struct acpi_namespace_node *method_node, } } - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - -exit: if (enabled) { ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE, obj_desc ? obj_desc->method.aml_start : NULL, @@ -267,7 +258,6 @@ acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { - acpi_status status; char *pathname = NULL; u8 enabled; @@ -277,26 +267,14 @@ acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); } - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit_path; - } - enabled = acpi_ex_interpreter_trace_enabled(NULL); - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - if (enabled) { ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE, obj_desc ? obj_desc->method.aml_start : NULL, pathname); } - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit_path; - } - /* Check whether the tracer should be stopped */ if (acpi_gbl_trace_method_object == obj_desc) { @@ -312,9 +290,6 @@ acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node, acpi_gbl_trace_method_object = NULL; } - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - -exit_path: if (pathname) { ACPI_FREE(pathname); } diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 425f13372e68..a8b857a7e9fb 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -94,6 +94,10 @@ void acpi_ex_enter_interpreter(void) ACPI_ERROR((AE_INFO, "Could not acquire AML Interpreter mutex")); } + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + ACPI_ERROR((AE_INFO, "Could not acquire AML Namespace mutex")); + } return_VOID; } @@ -127,6 +131,10 @@ void acpi_ex_exit_interpreter(void) ACPI_FUNCTION_TRACE(ex_exit_interpreter); + status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + ACPI_ERROR((AE_INFO, "Could not release AML Namespace mutex")); + } status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index bdecd5e76e87..76b0e350f5bb 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -98,7 +98,7 @@ acpi_status acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) { struct acpi_gpe_register_info *gpe_register_info; - acpi_status status; + acpi_status status = AE_OK; u32 enable_mask; u32 register_bit; @@ -148,9 +148,14 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) return (AE_BAD_PARAMETER); } - /* Write the updated enable mask */ + if (!(register_bit & gpe_register_info->mask_for_run)) { - status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); + /* Write the updated enable mask */ + + status = + acpi_hw_write(enable_mask, + &gpe_register_info->enable_address); + } return (status); } @@ -242,6 +247,12 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, local_event_status |= ACPI_EVENT_FLAG_ENABLED; } + /* GPE currently masked? (masked for runtime?) */ + + if (register_bit & gpe_register_info->mask_for_run) { + local_event_status |= ACPI_EVENT_FLAG_MASKED; + } + /* GPE enabled for wake? */ if (register_bit & gpe_register_info->enable_for_wake) { @@ -397,6 +408,7 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, u32 i; acpi_status status; struct acpi_gpe_register_info *gpe_register_info; + u8 enable_mask; /* NOTE: assumes that all GPEs are currently disabled */ @@ -410,9 +422,10 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, /* Enable all "runtime" GPEs in this register */ + enable_mask = gpe_register_info->enable_for_run & + ~gpe_register_info->mask_for_run; status = - acpi_hw_gpe_enable_write(gpe_register_info->enable_for_run, - gpe_register_info); + acpi_hw_gpe_enable_write(enable_mask, gpe_register_info); if (ACPI_FAILURE(status)) { return (status); } diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 426a6307eafa..73f98d3fed25 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -108,9 +108,9 @@ acpi_status acpi_ns_root_initialize(void) } status = - acpi_ns_lookup(NULL, (char *)init_val->name, init_val->type, - ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH, - NULL, &new_node); + acpi_ns_lookup(NULL, ACPI_CAST_PTR(char, init_val->name), + init_val->type, ACPI_IMODE_LOAD_PASS2, + ACPI_NS_NO_UPSEARCH, NULL, &new_node); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not create predefined name %s", diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index c803bda7915c..2b85dee6d4c0 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -79,7 +79,6 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object, /* String-to-Integer conversion */ status = acpi_ut_strtoul64(original_object->string.pointer, - ACPI_ANY_BASE, acpi_gbl_integer_byte_width, &value); if (ACPI_FAILURE(status)) { return (status); diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index ce1f8605d996..84f35dd27033 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -338,7 +338,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, case ACPI_TYPE_STRING: acpi_os_printf("Len %.2X ", obj_desc->string.length); - acpi_ut_print_string(obj_desc->string.pointer, 32); + acpi_ut_print_string(obj_desc->string.pointer, 80); acpi_os_printf("\n"); break; diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index b5e2b0ada0ab..334d3c5ba617 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -46,6 +46,7 @@ #include "acnamesp.h" #include "acdispat.h" #include "actables.h" +#include "acinterp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsload") @@ -78,20 +79,6 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) ACPI_FUNCTION_TRACE(ns_load_table); - /* - * Parse the table and load the namespace with all named - * objects found within. Control methods are NOT parsed - * at this time. In fact, the control methods cannot be - * parsed until the entire namespace is loaded, because - * if a control method makes a forward reference (call) - * to another control method, we can't continue parsing - * because we don't know how many arguments to parse next! - */ - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - /* If table already loaded into namespace, just return */ if (acpi_tb_is_table_loaded(table_index)) { @@ -107,6 +94,15 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) goto unlock; } + /* + * Parse the table and load the namespace with all named + * objects found within. Control methods are NOT parsed + * at this time. In fact, the control methods cannot be + * parsed until the entire namespace is loaded, because + * if a control method makes a forward reference (call) + * to another control method, we can't continue parsing + * because we don't know how many arguments to parse next! + */ status = acpi_ns_parse_table(table_index, node); if (ACPI_SUCCESS(status)) { acpi_tb_set_table_loaded_flag(table_index, TRUE); @@ -120,7 +116,6 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) * exist. This target of Scope must already exist in the * namespace, as per the ACPI specification. */ - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list. tables[table_index].owner_id); @@ -129,8 +124,6 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) } unlock: - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -162,7 +155,8 @@ unlock: * other ACPI implementations. Optionally, the execution can be deferred * until later, see acpi_initialize_objects. */ - if (!acpi_gbl_group_module_level_code) { + if (!acpi_gbl_parse_table_as_term_list + && !acpi_gbl_group_module_level_code) { acpi_ns_exec_module_code_list(); } diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index f631a47724f0..4f14e9205bff 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -47,12 +47,103 @@ #include "acparser.h" #include "acdispat.h" #include "actables.h" +#include "acinterp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsparse") /******************************************************************************* * + * FUNCTION: ns_execute_table + * + * PARAMETERS: table_desc - An ACPI table descriptor for table to parse + * start_node - Where to enter the table into the namespace + * + * RETURN: Status + * + * DESCRIPTION: Load ACPI/AML table by executing the entire table as a + * term_list. + * + ******************************************************************************/ +acpi_status +acpi_ns_execute_table(u32 table_index, struct acpi_namespace_node *start_node) +{ + acpi_status status; + struct acpi_table_header *table; + acpi_owner_id owner_id; + struct acpi_evaluate_info *info = NULL; + u32 aml_length; + u8 *aml_start; + union acpi_operand_object *method_obj = NULL; + + ACPI_FUNCTION_TRACE(ns_execute_table); + + status = acpi_get_table_by_index(table_index, &table); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Table must consist of at least a complete header */ + + if (table->length < sizeof(struct acpi_table_header)) { + return_ACPI_STATUS(AE_BAD_HEADER); + } + + aml_start = (u8 *)table + sizeof(struct acpi_table_header); + aml_length = table->length - sizeof(struct acpi_table_header); + + status = acpi_tb_get_owner_id(table_index, &owner_id); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Create, initialize, and link a new temporary method object */ + + method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); + if (!method_obj) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + /* Allocate the evaluation information block */ + + info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); + if (!info) { + status = AE_NO_MEMORY; + goto cleanup; + } + + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Create table code block: %p\n", method_obj)); + + method_obj->method.aml_start = aml_start; + method_obj->method.aml_length = aml_length; + method_obj->method.owner_id = owner_id; + method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL; + + info->pass_number = ACPI_IMODE_EXECUTE; + info->node = start_node; + info->obj_desc = method_obj; + info->node_flags = info->node->flags; + info->full_pathname = acpi_ns_get_normalized_pathname(info->node, TRUE); + if (!info->full_pathname) { + status = AE_NO_MEMORY; + goto cleanup; + } + + status = acpi_ps_execute_table(info); + +cleanup: + if (info) { + ACPI_FREE(info->full_pathname); + info->full_pathname = NULL; + } + ACPI_FREE(info); + acpi_ut_remove_reference(method_obj); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * * FUNCTION: ns_one_complete_parse * * PARAMETERS: pass_number - 1 or 2 @@ -63,6 +154,7 @@ ACPI_MODULE_NAME("nsparse") * DESCRIPTION: Perform one complete parse of an ACPI/AML table. * ******************************************************************************/ + acpi_status acpi_ns_one_complete_parse(u32 pass_number, u32 table_index, @@ -143,7 +235,9 @@ acpi_ns_one_complete_parse(u32 pass_number, ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %u parse\n", pass_number)); + acpi_ex_enter_interpreter(); status = acpi_ps_parse_aml(walk_state); + acpi_ex_exit_interpreter(); cleanup: acpi_ps_delete_parse_tree(parse_root); @@ -170,38 +264,47 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) ACPI_FUNCTION_TRACE(ns_parse_table); - /* - * AML Parse, pass 1 - * - * In this pass, we load most of the namespace. Control methods - * are not parsed until later. A parse tree is not created. Instead, - * each Parser Op subtree is deleted when it is finished. This saves - * a great deal of memory, and allows a small cache of parse objects - * to service the entire parse. The second pass of the parse then - * performs another complete parse of the AML. - */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); - - status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, - table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } + if (acpi_gbl_parse_table_as_term_list) { + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start load pass\n")); - /* - * AML Parse, pass 2 - * - * In this pass, we resolve forward references and other things - * that could not be completed during the first pass. - * Another complete parse of the AML is performed, but the - * overhead of this is compensated for by the fact that the - * parse objects are all cached. - */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); - status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, - table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + status = acpi_ns_execute_table(table_index, start_node); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } else { + /* + * AML Parse, pass 1 + * + * In this pass, we load most of the namespace. Control methods + * are not parsed until later. A parse tree is not created. + * Instead, each Parser Op subtree is deleted when it is finished. + * This saves a great deal of memory, and allows a small cache of + * parse objects to service the entire parse. The second pass of + * the parse then performs another complete parse of the AML. + */ + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); + + status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, + table_index, start_node); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* + * AML Parse, pass 2 + * + * In this pass, we resolve forward references and other things + * that could not be completed during the first pass. + * Another complete parse of the AML is performed, but the + * overhead of this is compensated for by the fact that the + * parse objects are all cached. + */ + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); + status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, + table_index, start_node); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } } return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index 784a30b76e0f..691814dfed31 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -662,7 +662,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) /******************************************************************************* * - * FUNCTION: acpi_ns_get_node + * FUNCTION: acpi_ns_get_node_unlocked * * PARAMETERS: *pathname - Name to be found, in external (ASL) format. The * \ (backslash) and ^ (carat) prefixes, and the @@ -678,20 +678,21 @@ u32 acpi_ns_opens_scope(acpi_object_type type) * DESCRIPTION: Look up a name relative to a given scope and return the * corresponding Node. NOTE: Scope can be null. * - * MUTEX: Locks namespace + * MUTEX: Doesn't locks namespace * ******************************************************************************/ acpi_status -acpi_ns_get_node(struct acpi_namespace_node *prefix_node, - const char *pathname, - u32 flags, struct acpi_namespace_node **return_node) +acpi_ns_get_node_unlocked(struct acpi_namespace_node *prefix_node, + const char *pathname, + u32 flags, struct acpi_namespace_node **return_node) { union acpi_generic_state scope_info; acpi_status status; char *internal_path; - ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname)); + ACPI_FUNCTION_TRACE_PTR(ns_get_node_unlocked, + ACPI_CAST_PTR(char, pathname)); /* Simplest case is a null pathname */ @@ -718,13 +719,6 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node, return_ACPI_STATUS(status); } - /* Must lock namespace during lookup */ - - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto cleanup; - } - /* Setup lookup scope (search starting point) */ scope_info.scope.node = prefix_node; @@ -740,9 +734,49 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node, pathname, acpi_format_exception(status))); } - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); - -cleanup: ACPI_FREE(internal_path); return_ACPI_STATUS(status); } + +/******************************************************************************* + * + * FUNCTION: acpi_ns_get_node + * + * PARAMETERS: *pathname - Name to be found, in external (ASL) format. The + * \ (backslash) and ^ (carat) prefixes, and the + * . (period) to separate segments are supported. + * prefix_node - Root of subtree to be searched, or NS_ALL for the + * root of the name space. If Name is fully + * qualified (first s8 is '\'), the passed value + * of Scope will not be accessed. + * flags - Used to indicate whether to perform upsearch or + * not. + * return_node - Where the Node is returned + * + * DESCRIPTION: Look up a name relative to a given scope and return the + * corresponding Node. NOTE: Scope can be null. + * + * MUTEX: Locks namespace + * + ******************************************************************************/ + +acpi_status +acpi_ns_get_node(struct acpi_namespace_node *prefix_node, + const char *pathname, + u32 flags, struct acpi_namespace_node **return_node) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname)); + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_ns_get_node_unlocked(prefix_node, pathname, + flags, return_node); + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 0a23897d8adf..1ce26d9f8ff6 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -537,9 +537,11 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) /* Either the method parse or actual execution failed */ + acpi_ex_exit_interpreter(); ACPI_ERROR_METHOD("Method parse/execution failed", walk_state->method_node, NULL, status); + acpi_ex_enter_interpreter(); /* Check for possible multi-thread reentrancy problem */ @@ -571,7 +573,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) * cleanup to do */ if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) == - ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) { + ACPI_PARSE_EXECUTE && + !(walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL)) || + (ACPI_FAILURE(status))) { acpi_ds_terminate_control_method(walk_state-> method_desc, walk_state); diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index cf30cd821f5e..f3c87264bd1b 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -252,6 +252,90 @@ cleanup: /******************************************************************************* * + * FUNCTION: acpi_ps_execute_table + * + * PARAMETERS: info - Method info block, contains: + * node - Node to where the is entered into the + * namespace + * obj_desc - Pseudo method object describing the AML + * code of the entire table + * pass_number - Parse or execute pass + * + * RETURN: Status + * + * DESCRIPTION: Execute a table + * + ******************************************************************************/ + +acpi_status acpi_ps_execute_table(struct acpi_evaluate_info *info) +{ + acpi_status status; + union acpi_parse_object *op = NULL; + struct acpi_walk_state *walk_state = NULL; + + ACPI_FUNCTION_TRACE(ps_execute_table); + + /* Create and init a Root Node */ + + op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start); + if (!op) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Create and initialize a new walk state */ + + walk_state = + acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL, + NULL, NULL); + if (!walk_state) { + status = AE_NO_MEMORY; + goto cleanup; + } + + status = acpi_ds_init_aml_walk(walk_state, op, info->node, + info->obj_desc->method.aml_start, + info->obj_desc->method.aml_length, info, + info->pass_number); + if (ACPI_FAILURE(status)) { + goto cleanup; + } + + if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) { + walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL; + } + + /* Info->Node is the default location to load the table */ + + if (info->node && info->node != acpi_gbl_root_node) { + status = + acpi_ds_scope_stack_push(info->node, ACPI_TYPE_METHOD, + walk_state); + if (ACPI_FAILURE(status)) { + goto cleanup; + } + } + + /* + * Parse the AML, walk_state will be deleted by parse_aml + */ + acpi_ex_enter_interpreter(); + status = acpi_ps_parse_aml(walk_state); + acpi_ex_exit_interpreter(); + walk_state = NULL; + +cleanup: + if (walk_state) { + acpi_ds_delete_walk_state(walk_state); + } + if (op) { + acpi_ps_delete_parse_tree(op); + } + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * * FUNCTION: acpi_ps_update_parameter_list * * PARAMETERS: info - See struct acpi_evaluate_info diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 1388a19e5db8..d9ca8c2aa2d3 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -45,6 +45,7 @@ #include "accommon.h" #include "acnamesp.h" #include "actables.h" +#include "acevents.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbdata") @@ -613,17 +614,12 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index) * lock may block, and also since the execution of a namespace walk * must be allowed to use the interpreter. */ - (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock); - - acpi_ns_delete_namespace_by_owner(owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } - + acpi_ns_delete_namespace_by_owner(owner_id); acpi_ut_release_write_lock(&acpi_gbl_namespace_rw_lock); - - status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); return_ACPI_STATUS(status); } @@ -771,3 +767,142 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded) (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); } + +/******************************************************************************* + * + * FUNCTION: acpi_tb_load_table + * + * PARAMETERS: table_index - Table index + * parent_node - Where table index is returned + * + * RETURN: Status + * + * DESCRIPTION: Load an ACPI table + * + ******************************************************************************/ + +acpi_status +acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node) +{ + struct acpi_table_header *table; + acpi_status status; + acpi_owner_id owner_id; + + ACPI_FUNCTION_TRACE(tb_load_table); + + /* + * Note: Now table is "INSTALLED", it must be validated before + * using. + */ + status = acpi_get_table_by_index(table_index, &table); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_ns_load_table(table_index, parent_node); + + /* Execute any module-level code that was found in the table */ + + if (!acpi_gbl_parse_table_as_term_list + && acpi_gbl_group_module_level_code) { + acpi_ns_exec_module_code_list(); + } + + /* + * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is + * responsible for discovering any new wake GPEs by running _PRW methods + * that may have been loaded by this table. + */ + status = acpi_tb_get_owner_id(table_index, &owner_id); + if (ACPI_SUCCESS(status)) { + acpi_ev_update_gpes(owner_id); + } + + /* Invoke table handler if present */ + + if (acpi_gbl_table_handler) { + (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, + acpi_gbl_table_handler_context); + } + + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_tb_install_and_load_table + * + * PARAMETERS: table - Pointer to the table + * address - Physical address of the table + * flags - Allocation flags of the table + * table_index - Where table index is returned + * + * RETURN: Status + * + * DESCRIPTION: Install and load an ACPI table + * + ******************************************************************************/ + +acpi_status +acpi_tb_install_and_load_table(struct acpi_table_header *table, + acpi_physical_address address, + u8 flags, u8 override, u32 *table_index) +{ + acpi_status status; + u32 i; + acpi_owner_id owner_id; + + ACPI_FUNCTION_TRACE(acpi_load_table); + + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); + + /* Install the table and load it into the namespace */ + + status = acpi_tb_install_standard_table(address, flags, TRUE, + override, &i); + if (ACPI_FAILURE(status)) { + goto unlock_and_exit; + } + + /* + * Note: Now table is "INSTALLED", it must be validated before + * using. + */ + status = acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]); + if (ACPI_FAILURE(status)) { + goto unlock_and_exit; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); + status = acpi_ns_load_table(i, acpi_gbl_root_node); + + /* Execute any module-level code that was found in the table */ + + if (!acpi_gbl_parse_table_as_term_list + && acpi_gbl_group_module_level_code) { + acpi_ns_exec_module_code_list(); + } + + /* + * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is + * responsible for discovering any new wake GPEs by running _PRW methods + * that may have been loaded by this table. + */ + status = acpi_tb_get_owner_id(i, &owner_id); + if (ACPI_SUCCESS(status)) { + acpi_ev_update_gpes(owner_id); + } + + /* Invoke table handler if present */ + + if (acpi_gbl_table_handler) { + (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, + acpi_gbl_table_handler_context); + } + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); + +unlock_and_exit: + *table_index = i; + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); + return_ACPI_STATUS(status); +} diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 620806965243..046c4d0394ee 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -344,23 +344,27 @@ void acpi_tb_parse_fadt(void) /* Obtain the DSDT and FACS tables via their addresses within the FADT */ - acpi_tb_install_fixed_table((acpi_physical_address)acpi_gbl_FADT.Xdsdt, - ACPI_SIG_DSDT, &acpi_gbl_dsdt_index); + acpi_tb_install_standard_table((acpi_physical_address)acpi_gbl_FADT. + Xdsdt, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + FALSE, TRUE, &acpi_gbl_dsdt_index); /* If Hardware Reduced flag is set, there is no FACS */ if (!acpi_gbl_reduced_hardware) { if (acpi_gbl_FADT.facs) { - acpi_tb_install_fixed_table((acpi_physical_address) - acpi_gbl_FADT.facs, - ACPI_SIG_FACS, - &acpi_gbl_facs_index); + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.facs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + FALSE, TRUE, + &acpi_gbl_facs_index); } if (acpi_gbl_FADT.Xfacs) { - acpi_tb_install_fixed_table((acpi_physical_address) - acpi_gbl_FADT.Xfacs, - ACPI_SIG_FACS, - &acpi_gbl_xfacs_index); + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.Xfacs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + FALSE, TRUE, + &acpi_gbl_xfacs_index); } } } @@ -476,17 +480,19 @@ static void acpi_tb_convert_fadt(void) u32 i; /* - * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which + * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which * should be zero are indeed zero. This will workaround BIOSs that * inadvertently place values in these fields. * * The ACPI 1.0 reserved fields that will be zeroed are the bytes located * at offset 45, 55, 95, and the word located at offset 109, 110. * - * Note: The FADT revision value is unreliable. Only the length can be - * trusted. + * Note: The FADT revision value is unreliable because of BIOS errors. + * The table length is instead used as the final word on the version. + * + * Note: FADT revision 3 is the ACPI 2.0 version of the FADT. */ - if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) { + if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) { acpi_gbl_FADT.preferred_profile = 0; acpi_gbl_FADT.pstate_control = 0; acpi_gbl_FADT.cst_control = 0; @@ -552,78 +558,74 @@ static void acpi_tb_convert_fadt(void) * * Address32 zero, Address64 [don't care] - Use Address64 * + * No override: if acpi_gbl_use32_bit_fadt_addresses is FALSE, and: * Address32 non-zero, Address64 zero - Copy/use Address32 * Address32 non-zero == Address64 non-zero - Use Address64 * Address32 non-zero != Address64 non-zero - Warning, use Address64 * * Override: if acpi_gbl_use32_bit_fadt_addresses is TRUE, and: + * Address32 non-zero, Address64 zero - Copy/use Address32 + * Address32 non-zero == Address64 non-zero - Copy/use Address32 * Address32 non-zero != Address64 non-zero - Warning, copy/use Address32 * * Note: space_id is always I/O for 32-bit legacy address fields */ if (address32) { - if (!address64->address) { + if (address64->address) { + if (address64->address != (u64)address32) { + + /* Address mismatch */ + + ACPI_BIOS_WARNING((AE_INFO, + "32/64X address mismatch in FADT/%s: " + "0x%8.8X/0x%8.8X%8.8X, using %u-bit address", + name, address32, + ACPI_FORMAT_UINT64 + (address64->address), + acpi_gbl_use32_bit_fadt_addresses + ? 32 : 64)); + } - /* 64-bit address is zero, use 32-bit address */ + /* + * For each extended field, check for length mismatch + * between the legacy length field and the corresponding + * 64-bit X length field. + * Note: If the legacy length field is > 0xFF bits, ignore + * this check. (GPE registers can be larger than the + * 64-bit GAS structure can accomodate, 0xFF bits). + */ + if ((ACPI_MUL_8(length) <= ACPI_UINT8_MAX) && + (address64->bit_width != + ACPI_MUL_8(length))) { + ACPI_BIOS_WARNING((AE_INFO, + "32/64X length mismatch in FADT/%s: %u/%u", + name, + ACPI_MUL_8(length), + address64-> + bit_width)); + } + } + /* + * Hardware register access code always uses the 64-bit fields. + * So if the 64-bit field is zero or is to be overridden, + * initialize it with the 32-bit fields. + * Note that when the 32-bit address favor is specified, the + * 64-bit fields are always re-initialized so that + * access_size/bit_width/bit_offset fields can be correctly + * configured to the values to trigger a 32-bit compatible + * access mode in the hardware register access code. + */ + if (!address64->address + || acpi_gbl_use32_bit_fadt_addresses) { acpi_tb_init_generic_address(address64, ACPI_ADR_SPACE_SYSTEM_IO, - *ACPI_ADD_PTR(u8, - &acpi_gbl_FADT, - fadt_info_table - [i]. - length), + length, (u64)address32, name, flags); - } else if (address64->address != (u64)address32) { - - /* Address mismatch */ - - ACPI_BIOS_WARNING((AE_INFO, - "32/64X address mismatch in FADT/%s: " - "0x%8.8X/0x%8.8X%8.8X, using %u-bit address", - name, address32, - ACPI_FORMAT_UINT64 - (address64->address), - acpi_gbl_use32_bit_fadt_addresses - ? 32 : 64)); - - if (acpi_gbl_use32_bit_fadt_addresses) { - - /* 32-bit address override */ - - acpi_tb_init_generic_address(address64, - ACPI_ADR_SPACE_SYSTEM_IO, - *ACPI_ADD_PTR - (u8, - &acpi_gbl_FADT, - fadt_info_table - [i]. - length), - (u64) - address32, - name, - flags); - } } } - /* - * For each extended field, check for length mismatch between the - * legacy length field and the corresponding 64-bit X length field. - * Note: If the legacy length field is > 0xFF bits, ignore this - * check. (GPE registers can be larger than the 64-bit GAS structure - * can accomodate, 0xFF bits). - */ - if (address64->address && - (ACPI_MUL_8(length) <= ACPI_UINT8_MAX) && - (address64->bit_width != ACPI_MUL_8(length))) { - ACPI_BIOS_WARNING((AE_INFO, - "32/64X length mismatch in FADT/%s: %u/%u", - name, ACPI_MUL_8(length), - address64->bit_width)); - } - if (fadt_info_table[i].flags & ACPI_FADT_REQUIRED) { /* * Field is required (Pm1a_event, Pm1a_control). diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index e348d616e60f..f6b9b4e4298b 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -68,7 +68,7 @@ acpi_status acpi_tb_find_table(char *signature, char *oem_id, char *oem_table_id, u32 *table_index) { - acpi_status status; + acpi_status status = AE_OK; struct acpi_table_header header; u32 i; @@ -96,6 +96,7 @@ acpi_tb_find_table(char *signature, /* Search for the table */ + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature), header.signature, ACPI_NAME_SIZE)) { @@ -115,7 +116,7 @@ acpi_tb_find_table(char *signature, acpi_tb_validate_table(&acpi_gbl_root_table_list. tables[i]); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + goto unlock_and_exit; } if (!acpi_gbl_root_table_list.tables[i].pointer) { @@ -144,9 +145,12 @@ acpi_tb_find_table(char *signature, ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Found table [%4.4s]\n", header.signature)); - return_ACPI_STATUS(AE_OK); + goto unlock_and_exit; } } + status = AE_NOT_FOUND; - return_ACPI_STATUS(AE_NOT_FOUND); +unlock_and_exit: + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 8b13052128fc..5fdf251a9f97 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -157,68 +157,6 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc, /******************************************************************************* * - * FUNCTION: acpi_tb_install_fixed_table - * - * PARAMETERS: address - Physical address of DSDT or FACS - * signature - Table signature, NULL if no need to - * match - * table_index - Where the table index is returned - * - * RETURN: Status - * - * DESCRIPTION: Install a fixed ACPI table (DSDT/FACS) into the global data - * structure. - * - ******************************************************************************/ - -acpi_status -acpi_tb_install_fixed_table(acpi_physical_address address, - char *signature, u32 *table_index) -{ - struct acpi_table_desc new_table_desc; - acpi_status status; - - ACPI_FUNCTION_TRACE(tb_install_fixed_table); - - if (!address) { - ACPI_ERROR((AE_INFO, - "Null physical address for ACPI table [%s]", - signature)); - return (AE_NO_MEMORY); - } - - /* Fill a table descriptor for validation */ - - status = acpi_tb_acquire_temp_table(&new_table_desc, address, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL); - if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, - "Could not acquire table length at %8.8X%8.8X", - ACPI_FORMAT_UINT64(address))); - return_ACPI_STATUS(status); - } - - /* Validate and verify a table before installation */ - - status = acpi_tb_verify_temp_table(&new_table_desc, signature); - if (ACPI_FAILURE(status)) { - goto release_and_exit; - } - - /* Add the table to the global root table list */ - - acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index); - -release_and_exit: - - /* Release the temporary table descriptor */ - - acpi_tb_release_temp_table(&new_table_desc); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * * FUNCTION: acpi_tb_install_standard_table * * PARAMETERS: address - Address of the table (might be a virtual @@ -230,8 +168,7 @@ release_and_exit: * * RETURN: Status * - * DESCRIPTION: This function is called to install an ACPI table that is - * neither DSDT nor FACS (a "standard" table.) + * DESCRIPTION: This function is called to verify and install an ACPI table. * When this function is called by "Load" or "LoadTable" opcodes, * or by acpi_load_table() API, the "Reload" parameter is set. * After sucessfully returning from this function, table is @@ -364,6 +301,14 @@ acpi_tb_install_standard_table(acpi_physical_address address, acpi_tb_install_table_with_override(&new_table_desc, override, table_index); + /* Invoke table handler if present */ + + if (acpi_gbl_table_handler) { + (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, + new_table_desc.pointer, + acpi_gbl_table_handler_context); + } + release_and_exit: /* Release the temporary table descriptor */ diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index e28553914bf5..51eb07cf9898 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -252,7 +252,8 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size) * ******************************************************************************/ -acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) +acpi_status ACPI_INIT_FUNCTION +acpi_tb_parse_root_table(acpi_physical_address rsdp_address) { struct acpi_table_rsdp *rsdp; u32 table_entry_size; diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 3ecec937e8c9..4ab6b9cd0aec 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -98,7 +98,7 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count) * ******************************************************************************/ -acpi_status __init +acpi_status ACPI_INIT_FUNCTION acpi_initialize_tables(struct acpi_table_desc *initial_table_array, u32 initial_table_count, u8 allow_resize) { @@ -164,7 +164,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables) * kernel. * ******************************************************************************/ -acpi_status __init acpi_reallocate_root_table(void) +acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) { acpi_status status; diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index ac71abcd32bb..5569f637f669 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -63,7 +63,7 @@ ACPI_MODULE_NAME("tbxfload") * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT * ******************************************************************************/ -acpi_status __init acpi_load_tables(void) +acpi_status ACPI_INIT_FUNCTION acpi_load_tables(void) { acpi_status status; @@ -103,7 +103,8 @@ acpi_status __init acpi_load_tables(void) "While loading namespace from ACPI tables")); } - if (!acpi_gbl_group_module_level_code) { + if (acpi_gbl_parse_table_as_term_list + || !acpi_gbl_group_module_level_code) { /* * Initialize the objects that remain uninitialized. This * runs the executable AML that may be part of the @@ -188,11 +189,11 @@ acpi_status acpi_tb_load_namespace(void) memcpy(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT, sizeof(struct acpi_table_header)); - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); - /* Load and parse tables */ + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node); + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed")); tables_failed++; @@ -202,7 +203,6 @@ acpi_status acpi_tb_load_namespace(void) /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ - (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { table = &acpi_gbl_root_table_list.tables[i]; @@ -220,6 +220,7 @@ acpi_status acpi_tb_load_namespace(void) (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); status = acpi_ns_load_table(i, acpi_gbl_root_node); + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "(%4.4s:%8.8s) while loading table", @@ -235,8 +236,6 @@ acpi_status acpi_tb_load_namespace(void) } else { tables_loaded++; } - - (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); } if (!tables_failed) { @@ -272,7 +271,7 @@ unlock_and_exit: * ******************************************************************************/ -acpi_status __init +acpi_status ACPI_INIT_FUNCTION acpi_install_table(acpi_physical_address address, u8 physical) { acpi_status status; @@ -324,49 +323,13 @@ acpi_status acpi_load_table(struct acpi_table_header *table) return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Must acquire the interpreter lock during this operation */ - - status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - /* Install the table and load it into the namespace */ ACPI_INFO(("Host-directed Dynamic ACPI Table Load:")); - (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); - - status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table), - ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, - TRUE, FALSE, &table_index); - - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } - - /* - * Note: Now table is "INSTALLED", it must be validated before - * using. - */ status = - acpi_tb_validate_table(&acpi_gbl_root_table_list. - tables[table_index]); - if (ACPI_FAILURE(status)) { - goto unlock_and_exit; - } - - status = acpi_ns_load_table(table_index, acpi_gbl_root_node); - - /* Invoke table handler if present */ - - if (acpi_gbl_table_handler) { - (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table, - acpi_gbl_table_handler_context); - } - -unlock_and_exit: - (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); + acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table), + ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL, + FALSE, &table_index); return_ACPI_STATUS(status); } @@ -415,9 +378,9 @@ acpi_status acpi_unload_parent_table(acpi_handle object) return_ACPI_STATUS(AE_TYPE); } - /* Must acquire the interpreter lock during this operation */ + /* Must acquire the table lock during this operation */ - status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); + status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -444,8 +407,10 @@ acpi_status acpi_unload_parent_table(acpi_handle object) /* Ensure the table is actually loaded */ + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); if (!acpi_tb_is_table_loaded(i)) { status = AE_NOT_EXIST; + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); break; } @@ -471,10 +436,11 @@ acpi_status acpi_unload_parent_table(acpi_handle object) status = acpi_tb_release_owner_id(i); acpi_tb_set_table_loaded_flag(i, FALSE); + (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); break; } - (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); + (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index adb6cfc54661..0adb1c78d863 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -142,7 +142,8 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * ******************************************************************************/ -acpi_status __init acpi_find_root_pointer(acpi_physical_address *table_address) +acpi_status ACPI_INIT_FUNCTION +acpi_find_root_pointer(acpi_physical_address *table_address) { u8 *table_ptr; u8 *mem_rover; @@ -244,6 +245,8 @@ acpi_status __init acpi_find_root_pointer(acpi_physical_address *table_address) return_ACPI_STATUS(AE_NOT_FOUND); } +ACPI_EXPORT_SYMBOL_INIT(acpi_find_root_pointer) + /******************************************************************************* * * FUNCTION: acpi_tb_scan_memory_for_rsdp diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index c986ec66a118..433d822798b6 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -77,7 +77,6 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id, u32 length, struct acpi_namespace_node *region_node) { struct acpi_address_range *range_info; - acpi_status status; ACPI_FUNCTION_TRACE(ut_add_address_range); @@ -97,12 +96,6 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id, range_info->end_address = (address + length - 1); range_info->region_node = region_node; - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - ACPI_FREE(range_info); - return_ACPI_STATUS(status); - } - range_info->next = acpi_gbl_address_range_list[space_id]; acpi_gbl_address_range_list[space_id] = range_info; @@ -112,7 +105,6 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id, ACPI_FORMAT_UINT64(address), ACPI_FORMAT_UINT64(range_info->end_address))); - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index bd31faf5da7c..ff2981275b9a 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -239,8 +239,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file, u8 buf_char; if (!buffer) { - acpi_ut_file_printf(file, - "Null Buffer Pointer in DumpBuffer!\n"); + fprintf(file, "Null Buffer Pointer in DumpBuffer!\n"); return; } @@ -254,7 +253,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file, /* Print current offset */ - acpi_ut_file_printf(file, "%6.4X: ", (base_offset + i)); + fprintf(file, "%6.4X: ", (base_offset + i)); /* Print 16 hex chars */ @@ -263,8 +262,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file, /* Dump fill spaces */ - acpi_ut_file_printf(file, "%*s", - ((display * 2) + 1), " "); + fprintf(file, "%*s", ((display * 2) + 1), " "); j += display; continue; } @@ -273,34 +271,34 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file, case DB_BYTE_DISPLAY: default: /* Default is BYTE display */ - acpi_ut_file_printf(file, "%02X ", - buffer[(acpi_size)i + j]); + fprintf(file, "%02X ", + buffer[(acpi_size)i + j]); break; case DB_WORD_DISPLAY: ACPI_MOVE_16_TO_32(&temp32, &buffer[(acpi_size)i + j]); - acpi_ut_file_printf(file, "%04X ", temp32); + fprintf(file, "%04X ", temp32); break; case DB_DWORD_DISPLAY: ACPI_MOVE_32_TO_32(&temp32, &buffer[(acpi_size)i + j]); - acpi_ut_file_printf(file, "%08X ", temp32); + fprintf(file, "%08X ", temp32); break; case DB_QWORD_DISPLAY: ACPI_MOVE_32_TO_32(&temp32, &buffer[(acpi_size)i + j]); - acpi_ut_file_printf(file, "%08X", temp32); + fprintf(file, "%08X", temp32); ACPI_MOVE_32_TO_32(&temp32, &buffer[(acpi_size)i + j + 4]); - acpi_ut_file_printf(file, "%08X ", temp32); + fprintf(file, "%08X ", temp32); break; } @@ -311,24 +309,24 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file, * Print the ASCII equivalent characters but watch out for the bad * unprintable ones (printable chars are 0x20 through 0x7E) */ - acpi_ut_file_printf(file, " "); + fprintf(file, " "); for (j = 0; j < 16; j++) { if (i + j >= count) { - acpi_ut_file_printf(file, "\n"); + fprintf(file, "\n"); return; } buf_char = buffer[(acpi_size)i + j]; if (isprint(buf_char)) { - acpi_ut_file_printf(file, "%c", buf_char); + fprintf(file, "%c", buf_char); } else { - acpi_ut_file_printf(file, "."); + fprintf(file, "."); } } /* Done with that line. */ - acpi_ut_file_printf(file, "\n"); + fprintf(file, "\n"); i += 16; } diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index 574422205005..044df9b0356e 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -562,6 +562,43 @@ acpi_ut_ptr_exit(u32 line_number, /******************************************************************************* * + * FUNCTION: acpi_ut_str_exit + * + * PARAMETERS: line_number - Caller's line number + * function_name - Caller's procedure name + * module_name - Caller's module name + * component_id - Caller's component ID + * string - String to display + * + * RETURN: None + * + * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is + * set in debug_level. Prints exit value also. + * + ******************************************************************************/ + +void +acpi_ut_str_exit(u32 line_number, + const char *function_name, + const char *module_name, u32 component_id, const char *string) +{ + + /* Check if enabled up-front for performance */ + + if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) { + acpi_debug_print(ACPI_LV_FUNCTIONS, + line_number, function_name, module_name, + component_id, "%s %s\n", + acpi_gbl_function_exit_prefix, string); + } + + if (acpi_gbl_nesting_level) { + acpi_gbl_nesting_level--; + } +} + +/******************************************************************************* + * * FUNCTION: acpi_trace_point * * PARAMETERS: type - Trace event type @@ -591,27 +628,3 @@ acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname) ACPI_EXPORT_SYMBOL(acpi_trace_point) #endif -#ifdef ACPI_APPLICATION -/******************************************************************************* - * - * FUNCTION: acpi_log_error - * - * PARAMETERS: format - Printf format field - * ... - Optional printf arguments - * - * RETURN: None - * - * DESCRIPTION: Print error message to the console, used by applications. - * - ******************************************************************************/ -void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...) -{ - va_list args; - - va_start(args, format); - (void)acpi_ut_file_vprintf(ACPI_FILE_ERR, format, args); - va_end(args); -} - -ACPI_EXPORT_SYMBOL(acpi_log_error) -#endif diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index efd7988e34cb..15728ad8356b 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -253,7 +253,7 @@ const char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc) return_PTR("Invalid object"); } - return_PTR(acpi_ut_get_type_name(obj_desc->common.type)); + return_STR(acpi_ut_get_type_name(obj_desc->common.type)); } /******************************************************************************* diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index 4354fb800fe4..36d2fc789088 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -75,9 +75,41 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position) /******************************************************************************* * + * FUNCTION: acpi_ut_ascii_to_hex_byte + * + * PARAMETERS: two_ascii_chars - Pointer to two ASCII characters + * return_byte - Where converted byte is returned + * + * RETURN: Status and converted hex byte + * + * DESCRIPTION: Perform ascii-to-hex translation, exactly two ASCII characters + * to a single converted byte value. + * + ******************************************************************************/ + +acpi_status acpi_ut_ascii_to_hex_byte(char *two_ascii_chars, u8 *return_byte) +{ + + /* Both ASCII characters must be valid hex digits */ + + if (!isxdigit((int)two_ascii_chars[0]) || + !isxdigit((int)two_ascii_chars[1])) { + return (AE_BAD_HEX_CONSTANT); + } + + *return_byte = + acpi_ut_ascii_char_to_hex(two_ascii_chars[1]) | + (acpi_ut_ascii_char_to_hex(two_ascii_chars[0]) << 4); + + return (AE_OK); +} + +/******************************************************************************* + * * FUNCTION: acpi_ut_ascii_char_to_hex * - * PARAMETERS: hex_char - Hex character in Ascii + * PARAMETERS: hex_char - Hex character in Ascii. Must be: + * 0-9 or A-F or a-f * * RETURN: The binary value of the ascii/hex character * @@ -88,13 +120,19 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position) u8 acpi_ut_ascii_char_to_hex(int hex_char) { - if (hex_char <= 0x39) { - return ((u8)(hex_char - 0x30)); + /* Values 0-9 */ + + if (hex_char <= '9') { + return ((u8)(hex_char - '0')); } - if (hex_char <= 0x46) { + /* Upper case A-F */ + + if (hex_char <= 'F') { return ((u8)(hex_char - 0x37)); } + /* Lower case a-f */ + return ((u8)(hex_char - 0x57)); } diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index f91f724c487c..1711fdf41709 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -206,7 +206,7 @@ acpi_status acpi_ut_init_globals(void) acpi_gbl_next_owner_id_offset = 0; acpi_gbl_debugger_configuration = DEBUGGER_THREADING; acpi_gbl_osi_mutex = NULL; - acpi_gbl_max_loop_iterations = 0xFFFF; + acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT; /* Hardware oriented */ diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c index 3465fe2c5a5c..2514239282c2 100644 --- a/drivers/acpi/acpica/utnonansi.c +++ b/drivers/acpi/acpica/utnonansi.c @@ -48,8 +48,8 @@ ACPI_MODULE_NAME("utnonansi") /* - * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit - * version of strtoul. + * Non-ANSI C library functions - strlwr, strupr, stricmp, and "safe" + * string functions. */ /******************************************************************************* * @@ -200,356 +200,3 @@ acpi_ut_safe_strncat(char *dest, return (FALSE); } #endif - -/******************************************************************************* - * - * FUNCTION: acpi_ut_strtoul64 - * - * PARAMETERS: string - Null terminated string - * base - Radix of the string: 16 or 10 or - * ACPI_ANY_BASE - * max_integer_byte_width - Maximum allowable integer,in bytes: - * 4 or 8 (32 or 64 bits) - * ret_integer - Where the converted integer is - * returned - * - * RETURN: Status and Converted value - * - * DESCRIPTION: Convert a string into an unsigned value. Performs either a - * 32-bit or 64-bit conversion, depending on the input integer - * size (often the current mode of the interpreter). - * - * NOTES: Negative numbers are not supported, as they are not supported - * by ACPI. - * - * acpi_gbl_integer_byte_width should be set to the proper width. - * For the core ACPICA code, this width depends on the DSDT - * version. For iASL, the default byte width is always 8 for the - * parser, but error checking is performed later to flag cases - * where a 64-bit constant is defined in a 32-bit DSDT/SSDT. - * - * Does not support Octal strings, not needed at this time. - * - ******************************************************************************/ - -acpi_status -acpi_ut_strtoul64(char *string, - u32 base, u32 max_integer_byte_width, u64 *ret_integer) -{ - u32 this_digit = 0; - u64 return_value = 0; - u64 quotient; - u64 dividend; - u8 valid_digits = 0; - u8 sign_of0x = 0; - u8 term = 0; - - ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string); - - switch (base) { - case ACPI_ANY_BASE: - case 10: - case 16: - - break; - - default: - - /* Invalid Base */ - - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - - if (!string) { - goto error_exit; - } - - /* Skip over any white space in the buffer */ - - while ((*string) && (isspace((int)*string) || *string == '\t')) { - string++; - } - - if (base == ACPI_ANY_BASE) { - /* - * Base equal to ACPI_ANY_BASE means 'Either decimal or hex'. - * We need to determine if it is decimal or hexadecimal. - */ - if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) { - sign_of0x = 1; - base = 16; - - /* Skip over the leading '0x' */ - string += 2; - } else { - base = 10; - } - } - - /* Any string left? Check that '0x' is not followed by white space. */ - - if (!(*string) || isspace((int)*string) || *string == '\t') { - if (base == ACPI_ANY_BASE) { - goto error_exit; - } else { - goto all_done; - } - } - - /* - * Perform a 32-bit or 64-bit conversion, depending upon the input - * byte width - */ - dividend = (max_integer_byte_width <= ACPI_MAX32_BYTE_WIDTH) ? - ACPI_UINT32_MAX : ACPI_UINT64_MAX; - - /* Main loop: convert the string to a 32- or 64-bit integer */ - - while (*string) { - if (isdigit((int)*string)) { - - /* Convert ASCII 0-9 to Decimal value */ - - this_digit = ((u8)*string) - '0'; - } else if (base == 10) { - - /* Digit is out of range; possible in to_integer case only */ - - term = 1; - } else { - this_digit = (u8)toupper((int)*string); - if (isxdigit((int)this_digit)) { - - /* Convert ASCII Hex char to value */ - - this_digit = this_digit - 'A' + 10; - } else { - term = 1; - } - } - - if (term) { - if (base == ACPI_ANY_BASE) { - goto error_exit; - } else { - break; - } - } else if ((valid_digits == 0) && (this_digit == 0) - && !sign_of0x) { - - /* Skip zeros */ - string++; - continue; - } - - valid_digits++; - - if (sign_of0x && ((valid_digits > 16) || - ((valid_digits > 8) - && (max_integer_byte_width <= - ACPI_MAX32_BYTE_WIDTH)))) { - /* - * This is to_integer operation case. - * No restrictions for string-to-integer conversion, - * see ACPI spec. - */ - goto error_exit; - } - - /* Divide the digit into the correct position */ - - (void)acpi_ut_short_divide((dividend - (u64)this_digit), base, - "ient, NULL); - - if (return_value > quotient) { - if (base == ACPI_ANY_BASE) { - goto error_exit; - } else { - break; - } - } - - return_value *= base; - return_value += this_digit; - string++; - } - - /* All done, normal exit */ - -all_done: - - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", - ACPI_FORMAT_UINT64(return_value))); - - *ret_integer = return_value; - return_ACPI_STATUS(AE_OK); - -error_exit: - - /* Base was set/validated above (10 or 16) */ - - if (base == 10) { - return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT); - } else { - return_ACPI_STATUS(AE_BAD_HEX_CONSTANT); - } -} - -#ifdef _OBSOLETE_FUNCTIONS -/* Removed: 01/2016 */ - -/******************************************************************************* - * - * FUNCTION: strtoul64 - * - * PARAMETERS: string - Null terminated string - * terminater - Where a pointer to the terminating byte - * is returned - * base - Radix of the string - * - * RETURN: Converted value - * - * DESCRIPTION: Convert a string into an unsigned value. - * - ******************************************************************************/ - -acpi_status strtoul64(char *string, u32 base, u64 *ret_integer) -{ - u32 index; - u32 sign; - u64 return_value = 0; - acpi_status status = AE_OK; - - *ret_integer = 0; - - switch (base) { - case 0: - case 8: - case 10: - case 16: - - break; - - default: - /* - * The specified Base parameter is not in the domain of - * this function: - */ - return (AE_BAD_PARAMETER); - } - - /* Skip over any white space in the buffer: */ - - while (isspace((int)*string) || *string == '\t') { - ++string; - } - - /* - * The buffer may contain an optional plus or minus sign. - * If it does, then skip over it but remember what is was: - */ - if (*string == '-') { - sign = ACPI_SIGN_NEGATIVE; - ++string; - } else if (*string == '+') { - ++string; - sign = ACPI_SIGN_POSITIVE; - } else { - sign = ACPI_SIGN_POSITIVE; - } - - /* - * If the input parameter Base is zero, then we need to - * determine if it is octal, decimal, or hexadecimal: - */ - if (base == 0) { - if (*string == '0') { - if (tolower((int)*(++string)) == 'x') { - base = 16; - ++string; - } else { - base = 8; - } - } else { - base = 10; - } - } - - /* - * For octal and hexadecimal bases, skip over the leading - * 0 or 0x, if they are present. - */ - if (base == 8 && *string == '0') { - string++; - } - - if (base == 16 && *string == '0' && tolower((int)*(++string)) == 'x') { - string++; - } - - /* Main loop: convert the string to an unsigned long */ - - while (*string) { - if (isdigit((int)*string)) { - index = ((u8)*string) - '0'; - } else { - index = (u8)toupper((int)*string); - if (isupper((int)index)) { - index = index - 'A' + 10; - } else { - goto error_exit; - } - } - - if (index >= base) { - goto error_exit; - } - - /* Check to see if value is out of range: */ - - if (return_value > ((ACPI_UINT64_MAX - (u64)index) / (u64)base)) { - goto error_exit; - } else { - return_value *= base; - return_value += index; - } - - ++string; - } - - /* If a minus sign was present, then "the conversion is negated": */ - - if (sign == ACPI_SIGN_NEGATIVE) { - return_value = (ACPI_UINT32_MAX - return_value) + 1; - } - - *ret_integer = return_value; - return (status); - -error_exit: - switch (base) { - case 8: - - status = AE_BAD_OCTAL_CONSTANT; - break; - - case 10: - - status = AE_BAD_DECIMAL_CONSTANT; - break; - - case 16: - - status = AE_BAD_HEX_CONSTANT; - break; - - default: - - /* Base validated above */ - - break; - } - - return (status); -} -#endif diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 3f5fed670271..f0484b058c44 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -390,11 +390,22 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name) * PARAMETERS: walk_state - Current walk state * * RETURN: Status + * Integer: TRUE (0) if input string is matched + * FALSE (-1) if string is not matched * * DESCRIPTION: Implementation of the _OSI predefined control method. When * an invocation of _OSI is encountered in the system AML, * control is transferred to this function. * + * (August 2016) + * Note: _OSI is now defined to return "Ones" to indicate a match, for + * compatibility with other ACPI implementations. On a 32-bit DSDT, Ones + * is 0xFFFFFFFF. On a 64-bit DSDT, Ones is 0xFFFFFFFFFFFFFFFF + * (ACPI_UINT64_MAX). + * + * This function always returns ACPI_UINT64_MAX for TRUE, and later code + * will truncate this to 32 bits if necessary. + * ******************************************************************************/ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) @@ -404,7 +415,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) struct acpi_interface_info *interface_info; acpi_interface_handler interface_handler; acpi_status status; - u32 return_value; + u64 return_value; ACPI_FUNCTION_TRACE(ut_osi_implementation); @@ -444,7 +455,7 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) acpi_gbl_osi_data = interface_info->value; } - return_value = ACPI_UINT32_MAX; + return_value = ACPI_UINT64_MAX; } acpi_os_release_mutex(acpi_gbl_osi_mutex); @@ -456,9 +467,10 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) */ interface_handler = acpi_gbl_interface_handler; if (interface_handler) { - return_value = - interface_handler(string_desc->string.pointer, - return_value); + if (interface_handler + (string_desc->string.pointer, (u32)return_value)) { + return_value = ACPI_UINT64_MAX; + } } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 770a1775b264..ce18346b6144 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -176,8 +176,6 @@ void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes) ******************************************************************************/ #if (defined ACPI_ASL_COMPILER || defined ACPI_HELP_APP) -#include <stdio.h> -#include <string.h> /* Local prototypes */ diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index dd084cf52502..40eba804d49c 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -336,7 +336,7 @@ static char *acpi_ut_format_number(char *string, /******************************************************************************* * - * FUNCTION: acpi_ut_vsnprintf + * FUNCTION: vsnprintf * * PARAMETERS: string - String with boundary * size - Boundary of the string @@ -349,9 +349,7 @@ static char *acpi_ut_format_number(char *string, * ******************************************************************************/ -int -acpi_ut_vsnprintf(char *string, - acpi_size size, const char *format, va_list args) +int vsnprintf(char *string, acpi_size size, const char *format, va_list args) { u8 base; u8 type; @@ -586,7 +584,7 @@ acpi_ut_vsnprintf(char *string, /******************************************************************************* * - * FUNCTION: acpi_ut_snprintf + * FUNCTION: snprintf * * PARAMETERS: string - String with boundary * size - Boundary of the string @@ -598,13 +596,38 @@ acpi_ut_vsnprintf(char *string, * ******************************************************************************/ -int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...) +int snprintf(char *string, acpi_size size, const char *format, ...) { va_list args; int length; va_start(args, format); - length = acpi_ut_vsnprintf(string, size, format, args); + length = vsnprintf(string, size, format, args); + va_end(args); + + return (length); +} + +/******************************************************************************* + * + * FUNCTION: sprintf + * + * PARAMETERS: string - String with boundary + * Format, ... - Standard printf format + * + * RETURN: Number of bytes actually written. + * + * DESCRIPTION: Formatted output to a string. + * + ******************************************************************************/ + +int sprintf(char *string, const char *format, ...) +{ + va_list args; + int length; + + va_start(args, format); + length = vsnprintf(string, ACPI_UINT32_MAX, format, args); va_end(args); return (length); @@ -613,7 +636,59 @@ int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...) #ifdef ACPI_APPLICATION /******************************************************************************* * - * FUNCTION: acpi_ut_file_vprintf + * FUNCTION: vprintf + * + * PARAMETERS: format - Standard printf format + * args - Argument list + * + * RETURN: Number of bytes actually written. + * + * DESCRIPTION: Formatted output to stdout using argument list pointer. + * + ******************************************************************************/ + +int vprintf(const char *format, va_list args) +{ + acpi_cpu_flags flags; + int length; + + flags = acpi_os_acquire_lock(acpi_gbl_print_lock); + length = vsnprintf(acpi_gbl_print_buffer, + sizeof(acpi_gbl_print_buffer), format, args); + + (void)fwrite(acpi_gbl_print_buffer, length, 1, ACPI_FILE_OUT); + acpi_os_release_lock(acpi_gbl_print_lock, flags); + + return (length); +} + +/******************************************************************************* + * + * FUNCTION: printf + * + * PARAMETERS: Format, ... - Standard printf format + * + * RETURN: Number of bytes actually written. + * + * DESCRIPTION: Formatted output to stdout. + * + ******************************************************************************/ + +int printf(const char *format, ...) +{ + va_list args; + int length; + + va_start(args, format); + length = vprintf(format, args); + va_end(args); + + return (length); +} + +/******************************************************************************* + * + * FUNCTION: vfprintf * * PARAMETERS: file - File descriptor * format - Standard printf format @@ -625,16 +700,16 @@ int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...) * ******************************************************************************/ -int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args) +int vfprintf(FILE * file, const char *format, va_list args) { acpi_cpu_flags flags; int length; flags = acpi_os_acquire_lock(acpi_gbl_print_lock); - length = acpi_ut_vsnprintf(acpi_gbl_print_buffer, - sizeof(acpi_gbl_print_buffer), format, args); + length = vsnprintf(acpi_gbl_print_buffer, + sizeof(acpi_gbl_print_buffer), format, args); - (void)acpi_os_write_file(file, acpi_gbl_print_buffer, length, 1); + (void)fwrite(acpi_gbl_print_buffer, length, 1, file); acpi_os_release_lock(acpi_gbl_print_lock, flags); return (length); @@ -642,7 +717,7 @@ int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args) /******************************************************************************* * - * FUNCTION: acpi_ut_file_printf + * FUNCTION: fprintf * * PARAMETERS: file - File descriptor * Format, ... - Standard printf format @@ -653,13 +728,13 @@ int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args) * ******************************************************************************/ -int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...) +int fprintf(FILE * file, const char *format, ...) { va_list args; int length; va_start(args, format); - length = acpi_ut_file_vprintf(file, format, args); + length = vfprintf(file, format, args); va_end(args); return (length); diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c new file mode 100644 index 000000000000..b4f341c98a95 --- /dev/null +++ b/drivers/acpi/acpica/utstrtoul64.c @@ -0,0 +1,348 @@ +/******************************************************************************* + * + * Module Name: utstrtoul64 - string to 64-bit integer support + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" + +/******************************************************************************* + * + * The functions in this module satisfy the need for 64-bit string-to-integer + * conversions on both 32-bit and 64-bit platforms. + * + ******************************************************************************/ + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utstrtoul64") + +/* Local prototypes */ +static u64 acpi_ut_strtoul_base10(char *string, u32 flags); + +static u64 acpi_ut_strtoul_base16(char *string, u32 flags); + +/******************************************************************************* + * + * String conversion rules as written in the ACPI specification. The error + * conditions and behavior are different depending on the type of conversion. + * + * + * Implicit data type conversion: string-to-integer + * -------------------------------------------------- + * + * Base is always 16. This is the ACPI_STRTOUL_BASE16 case. + * + * Example: + * Add ("BA98", Arg0, Local0) + * + * The integer is initialized to the value zero. + * The ASCII string is interpreted as a hexadecimal constant. + * + * 1) A "0x" prefix is not allowed. However, ACPICA allows this for + * compatibility with previous ACPICA. (NO ERROR) + * + * 2) Terminates when the size of an integer is reached (32 or 64 bits). + * (NO ERROR) + * + * 3) The first non-hex character terminates the conversion without error. + * (NO ERROR) + * + * 4) Conversion of a null (zero-length) string to an integer is not + * allowed. However, ACPICA allows this for compatibility with previous + * ACPICA. This conversion returns the value 0. (NO ERROR) + * + * + * Explicit data type conversion: to_integer() with string operand + * --------------------------------------------------------------- + * + * Base is either 10 (default) or 16 (with 0x prefix) + * + * Examples: + * to_integer ("1000") + * to_integer ("0xABCD") + * + * 1) Can be (must be) either a decimal or hexadecimal numeric string. + * A hex value must be prefixed by "0x" or it is interpreted as a decimal. + * + * 2) The value must not exceed the maximum of an integer value. ACPI spec + * states the behavior is "unpredictable", so ACPICA matches the behavior + * of the implicit conversion case.(NO ERROR) + * + * 3) Behavior on the first non-hex character is not specified by the ACPI + * spec, so ACPICA matches the behavior of the implicit conversion case + * and terminates. (NO ERROR) + * + * 4) A null (zero-length) string is illegal. + * However, ACPICA allows this for compatibility with previous ACPICA. + * This conversion returns the value 0. (NO ERROR) + * + ******************************************************************************/ + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul64 + * + * PARAMETERS: string - Null terminated input string + * flags - Conversion info, see below + * return_value - Where the converted integer is + * returned + * + * RETURN: Status and Converted value + * + * DESCRIPTION: Convert a string into an unsigned value. Performs either a + * 32-bit or 64-bit conversion, depending on the input integer + * size in Flags (often the current mode of the interpreter). + * + * Values for Flags: + * ACPI_STRTOUL_32BIT - Max integer value is 32 bits + * ACPI_STRTOUL_64BIT - Max integer value is 64 bits + * ACPI_STRTOUL_BASE16 - Input string is hexadecimal. Default + * is 10/16 based on string prefix (0x). + * + * NOTES: + * Negative numbers are not supported, as they are not supported by ACPI. + * + * Supports only base 16 or base 10 strings/values. Does not + * support Octal strings, as these are not supported by ACPI. + * + * Current users of this support: + * + * interpreter - Implicit and explicit conversions, GPE method names + * debugger - Command line input string conversion + * iASL - Main parser, conversion of constants to integers + * iASL - Data Table Compiler parser (constant math expressions) + * iASL - Preprocessor (constant math expressions) + * acpi_dump - Input table addresses + * acpi_exec - Testing of the acpi_ut_strtoul64 function + * + * Note concerning callers: + * acpi_gbl_integer_byte_width can be used to set the 32/64 limit. If used, + * this global should be set to the proper width. For the core ACPICA code, + * this width depends on the DSDT version. For iASL, the default byte + * width is always 8 for the parser, but error checking is performed later + * to flag cases where a 64-bit constant is defined in a 32-bit DSDT/SSDT. + * + ******************************************************************************/ + +acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *return_value) +{ + acpi_status status = AE_OK; + u32 base; + + ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string); + + /* Parameter validation */ + + if (!string || !return_value) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + *return_value = 0; + + /* Check for zero-length string, returns 0 */ + + if (*string == 0) { + return_ACPI_STATUS(AE_OK); + } + + /* Skip over any white space at start of string */ + + while (isspace((int)*string)) { + string++; + } + + /* End of string? return 0 */ + + if (*string == 0) { + return_ACPI_STATUS(AE_OK); + } + + /* + * 1) The "0x" prefix indicates base 16. Per the ACPI specification, + * the "0x" prefix is only allowed for implicit (non-strict) conversions. + * However, we always allow it for compatibility with older ACPICA. + */ + if ((*string == ACPI_ASCII_ZERO) && + (tolower((int)*(string + 1)) == 'x')) { + string += 2; /* Go past the 0x */ + if (*string == 0) { + return_ACPI_STATUS(AE_OK); /* Return value 0 */ + } + + base = 16; + } + + /* 2) Force to base 16 (implicit conversion case) */ + + else if (flags & ACPI_STRTOUL_BASE16) { + base = 16; + } + + /* 3) Default fallback is to Base 10 */ + + else { + base = 10; + } + + /* Skip all leading zeros */ + + while (*string == ACPI_ASCII_ZERO) { + string++; + if (*string == 0) { + return_ACPI_STATUS(AE_OK); /* Return value 0 */ + } + } + + /* Perform the base 16 or 10 conversion */ + + if (base == 16) { + *return_value = acpi_ut_strtoul_base16(string, flags); + } else { + *return_value = acpi_ut_strtoul_base10(string, flags); + } + + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul_base10 + * + * PARAMETERS: string - Null terminated input string + * flags - Conversion info + * + * RETURN: 64-bit converted integer + * + * DESCRIPTION: Performs a base 10 conversion of the input string to an + * integer value, either 32 or 64 bits. + * Note: String must be valid and non-null. + * + ******************************************************************************/ + +static u64 acpi_ut_strtoul_base10(char *string, u32 flags) +{ + int ascii_digit; + u64 next_value; + u64 return_value = 0; + + /* Main loop: convert each ASCII byte in the input string */ + + while (*string) { + ascii_digit = *string; + if (!isdigit(ascii_digit)) { + + /* Not ASCII 0-9, terminate */ + + goto exit; + } + + /* Convert and insert (add) the decimal digit */ + + next_value = + (return_value * 10) + (ascii_digit - ACPI_ASCII_ZERO); + + /* Check for overflow (32 or 64 bit) - return current converted value */ + + if (((flags & ACPI_STRTOUL_32BIT) && (next_value > ACPI_UINT32_MAX)) || (next_value < return_value)) { /* 64-bit overflow case */ + goto exit; + } + + return_value = next_value; + string++; + } + +exit: + return (return_value); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul_base16 + * + * PARAMETERS: string - Null terminated input string + * flags - conversion info + * + * RETURN: 64-bit converted integer + * + * DESCRIPTION: Performs a base 16 conversion of the input string to an + * integer value, either 32 or 64 bits. + * Note: String must be valid and non-null. + * + ******************************************************************************/ + +static u64 acpi_ut_strtoul_base16(char *string, u32 flags) +{ + int ascii_digit; + u32 valid_digits = 1; + u64 return_value = 0; + + /* Main loop: convert each ASCII byte in the input string */ + + while (*string) { + + /* Check for overflow (32 or 64 bit) - return current converted value */ + + if ((valid_digits > 16) || + ((valid_digits > 8) && (flags & ACPI_STRTOUL_32BIT))) { + goto exit; + } + + ascii_digit = *string; + if (!isxdigit(ascii_digit)) { + + /* Not Hex ASCII A-F, a-f, or 0-9, terminate */ + + goto exit; + } + + /* Convert and insert the hex digit */ + + return_value = + (return_value << 4) | + acpi_ut_ascii_char_to_hex(ascii_digit); + + string++; + valid_digits++; + } + +exit: + return (return_value); +} diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 0df07dfa53b6..df31d71ce596 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -95,13 +95,11 @@ acpi_ut_create_list(const char *list_name, { struct acpi_memory_list *cache; - cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); + cache = acpi_os_allocate_zeroed(sizeof(struct acpi_memory_list)); if (!cache) { return (AE_NO_MEMORY); } - memset(cache, 0, sizeof(struct acpi_memory_list)); - cache->list_name = list_name; cache->object_size = object_size; diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index d9e6aac7dc83..ec503c862961 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -61,7 +61,7 @@ ACPI_MODULE_NAME("utxface") * DESCRIPTION: Shutdown the ACPICA subsystem and release all resources. * ******************************************************************************/ -acpi_status __init acpi_terminate(void) +acpi_status ACPI_INIT_FUNCTION acpi_terminate(void) { acpi_status status; diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index 75b5f27da267..a5ca0f57cd08 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -69,7 +69,7 @@ void ae_do_object_overrides(void); * ******************************************************************************/ -acpi_status __init acpi_initialize_subsystem(void) +acpi_status ACPI_INIT_FUNCTION acpi_initialize_subsystem(void) { acpi_status status; @@ -141,7 +141,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem) * Puts system into ACPI mode if it isn't already. * ******************************************************************************/ -acpi_status __init acpi_enable_subsystem(u32 flags) +acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags) { acpi_status status = AE_OK; @@ -239,7 +239,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem) * objects and executing AML code for Regions, buffers, etc. * ******************************************************************************/ -acpi_status __init acpi_initialize_objects(u32 flags) +acpi_status ACPI_INIT_FUNCTION acpi_initialize_objects(u32 flags) { acpi_status status = AE_OK; @@ -265,7 +265,8 @@ acpi_status __init acpi_initialize_objects(u32 flags) * all of the tables have been loaded. It is a legacy option and is * not compatible with other ACPI implementations. See acpi_ns_load_table. */ - if (acpi_gbl_group_module_level_code) { + if (!acpi_gbl_parse_table_as_term_list + && acpi_gbl_group_module_level_code) { acpi_ns_exec_module_code_list(); /* diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index ab234791a0ba..93ecae55fe6a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -733,15 +733,17 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) return result; acpi_battery_init_alarm(battery); } + + result = acpi_battery_get_state(battery); + if (result) + return result; + acpi_battery_quirks(battery); + if (!battery->bat) { result = sysfs_add_battery(battery); if (result) return result; } - result = acpi_battery_get_state(battery); - if (result) - return result; - acpi_battery_quirks(battery); /* * Wakeup the system if battery is critical low diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 85b7d07fe5c8..658b4c4f169f 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -985,7 +985,8 @@ void __init acpi_early_init(void) goto error0; } - if (acpi_gbl_group_module_level_code) { + if (!acpi_gbl_parse_table_as_term_list && + acpi_gbl_group_module_level_code) { status = acpi_load_tables(); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX @@ -1074,7 +1075,8 @@ static int __init acpi_bus_init(void) status = acpi_ec_ecdt_probe(); /* Ignore result. Not having an ECDT is not fatal. */ - if (!acpi_gbl_group_module_level_code) { + if (acpi_gbl_parse_table_as_term_list || + !acpi_gbl_group_module_level_code) { status = acpi_load_tables(); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 31abb0bdd4f2..e19f530f1083 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -19,6 +19,8 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +#define pr_fmt(fmt) "ACPI : button: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -104,6 +106,8 @@ struct acpi_button { struct input_dev *input; char phys[32]; /* for input device */ unsigned long pushed; + int last_state; + ktime_t last_time; bool suspended; }; @@ -111,6 +115,10 @@ static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); static struct acpi_device *lid_device; static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; +static unsigned long lid_report_interval __read_mostly = 500; +module_param(lid_report_interval, ulong, 0644); +MODULE_PARM_DESC(lid_report_interval, "Interval (ms) between lid key events"); + /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ @@ -134,10 +142,79 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) { struct acpi_button *button = acpi_driver_data(device); int ret; + ktime_t next_report; + bool do_update; + + /* + * In lid_init_state=ignore mode, if user opens/closes lid + * frequently with "open" missing, and "last_time" is also updated + * frequently, "close" cannot be delivered to the userspace. + * So "last_time" is only updated after a timeout or an actual + * switch. + */ + if (lid_init_state != ACPI_BUTTON_LID_INIT_IGNORE || + button->last_state != !!state) + do_update = true; + else + do_update = false; + + next_report = ktime_add(button->last_time, + ms_to_ktime(lid_report_interval)); + if (button->last_state == !!state && + ktime_after(ktime_get(), next_report)) { + /* Complain the buggy firmware */ + pr_warn_once("The lid device is not compliant to SW_LID.\n"); - /* input layer checks if event is redundant */ - input_report_switch(button->input, SW_LID, !state); - input_sync(button->input); + /* + * Send the unreliable complement switch event: + * + * On most platforms, the lid device is reliable. However + * there are exceptions: + * 1. Platforms returning initial lid state as "close" by + * default after booting/resuming: + * https://bugzilla.kernel.org/show_bug.cgi?id=89211 + * https://bugzilla.kernel.org/show_bug.cgi?id=106151 + * 2. Platforms never reporting "open" events: + * https://bugzilla.kernel.org/show_bug.cgi?id=106941 + * On these buggy platforms, the usage model of the ACPI + * lid device actually is: + * 1. The initial returning value of _LID may not be + * reliable. + * 2. The open event may not be reliable. + * 3. The close event is reliable. + * + * But SW_LID is typed as input switch event, the input + * layer checks if the event is redundant. Hence if the + * state is not switched, the userspace cannot see this + * platform triggered reliable event. By inserting a + * complement switch event, it then is guaranteed that the + * platform triggered reliable one can always be seen by + * the userspace. + */ + if (lid_init_state == ACPI_BUTTON_LID_INIT_IGNORE) { + do_update = true; + /* + * Do generate complement switch event for "close" + * as "close" is reliable and wrong "open" won't + * trigger unexpected behaviors. + * Do not generate complement switch event for + * "open" as "open" is not reliable and wrong + * "close" will trigger unexpected behaviors. + */ + if (!state) { + input_report_switch(button->input, + SW_LID, state); + input_sync(button->input); + } + } + } + /* Send the platform triggered reliable event */ + if (do_update) { + input_report_switch(button->input, SW_LID, !state); + input_sync(button->input); + button->last_state = !!state; + button->last_time = ktime_get(); + } if (state) pm_wakeup_event(&device->dev, 0); @@ -411,6 +488,8 @@ static int acpi_button_add(struct acpi_device *device) strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); sprintf(class, "%s/%s", ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); + button->last_state = !!acpi_lid_evaluate_state(device); + button->last_time = ktime_get(); } else { printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); error = -ENODEV; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 2e981732805b..d0d0504b7c89 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -40,15 +40,48 @@ #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/ktime.h> +#include <linux/rwsem.h> +#include <linux/wait.h> #include <acpi/cppc_acpi.h> -/* - * Lock to provide mutually exclusive access to the PCC - * channel. e.g. When the remote updates the shared region - * with new data, the reader needs to be protected from - * other CPUs activity on the same channel. - */ -static DEFINE_SPINLOCK(pcc_lock); + +struct cppc_pcc_data { + struct mbox_chan *pcc_channel; + void __iomem *pcc_comm_addr; + int pcc_subspace_idx; + bool pcc_channel_acquired; + ktime_t deadline; + unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; + + bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ + bool platform_owns_pcc; /* Ownership of PCC subspace */ + unsigned int pcc_write_cnt; /* Running count of PCC write commands */ + + /* + * Lock to provide controlled access to the PCC channel. + * + * For performance critical usecases(currently cppc_set_perf) + * We need to take read_lock and check if channel belongs to OSPM + * before reading or writing to PCC subspace + * We need to take write_lock before transferring the channel + * ownership to the platform via a Doorbell + * This allows us to batch a number of CPPC requests if they happen + * to originate in about the same time + * + * For non-performance critical usecases(init) + * Take write_lock for all purposes which gives exclusive access + */ + struct rw_semaphore pcc_lock; + + /* Wait queue for CPUs whose requests were batched */ + wait_queue_head_t pcc_write_wait_q; +}; + +/* Structure to represent the single PCC channel */ +static struct cppc_pcc_data pcc_data = { + .pcc_subspace_idx = -1, + .platform_owns_pcc = true, +}; /* * The cpc_desc structure contains the ACPI register details @@ -59,18 +92,25 @@ static DEFINE_SPINLOCK(pcc_lock); */ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); -/* This layer handles all the PCC specifics for CPPC. */ -static struct mbox_chan *pcc_channel; -static void __iomem *pcc_comm_addr; -static u64 comm_base_addr; -static int pcc_subspace_idx = -1; -static bool pcc_channel_acquired; -static ktime_t deadline; -static unsigned int pcc_mpar, pcc_mrtt; - /* pcc mapped address + header size + offset within PCC subspace */ -#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) - +#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) + +/* Check if a CPC regsiter is in PCC */ +#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_PLATFORM_COMM) + +/* Evalutes to True if reg is a NULL register descriptor */ +#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ + (reg)->address == 0 && \ + (reg)->bit_width == 0 && \ + (reg)->bit_offset == 0 && \ + (reg)->access_width == 0) + +/* Evalutes to True if an optional cpc field is supported */ +#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ + !!(cpc)->cpc_entry.int_value : \ + !IS_NULL_REG(&(cpc)->cpc_entry.reg)) /* * Arbitrary Retries in case the remote processor is slow to respond * to PCC commands. Keeping it high enough to cover emulators where @@ -78,11 +118,79 @@ static unsigned int pcc_mpar, pcc_mrtt; */ #define NUM_RETRIES 500 -static int check_pcc_chan(void) +struct cppc_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *c, ssize_t count); +}; + +#define define_one_cppc_ro(_name) \ +static struct cppc_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) + +static ssize_t show_feedback_ctrs(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + + cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); + + return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", + fb_ctrs.reference, fb_ctrs.delivered); +} +define_one_cppc_ro(feedback_ctrs); + +static ssize_t show_reference_perf(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + + cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", + fb_ctrs.reference_perf); +} +define_one_cppc_ro(reference_perf); + +static ssize_t show_wraparound_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + + cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time); + +} +define_one_cppc_ro(wraparound_time); + +static struct attribute *cppc_attrs[] = { + &feedback_ctrs.attr, + &reference_perf.attr, + &wraparound_time.attr, + NULL +}; + +static struct kobj_type cppc_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = cppc_attrs, +}; + +static int check_pcc_chan(bool chk_err_bit) { - int ret = -EIO; - struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr; - ktime_t next_deadline = ktime_add(ktime_get(), deadline); + int ret = -EIO, status = 0; + struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr; + ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline); + + if (!pcc_data.platform_owns_pcc) + return 0; /* Retry in case the remote processor was too slow to catch up. */ while (!ktime_after(ktime_get(), next_deadline)) { @@ -91,8 +199,11 @@ static int check_pcc_chan(void) * platform and should have set the command completion bit when * PCC can be used by OSPM */ - if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { + status = readw_relaxed(&generic_comm_base->status); + if (status & PCC_CMD_COMPLETE_MASK) { ret = 0; + if (chk_err_bit && (status & PCC_ERROR_MASK)) + ret = -EIO; break; } /* @@ -102,14 +213,23 @@ static int check_pcc_chan(void) udelay(3); } + if (likely(!ret)) + pcc_data.platform_owns_pcc = false; + else + pr_err("PCC check channel failed. Status=%x\n", status); + return ret; } +/* + * This function transfers the ownership of the PCC to the platform + * So it must be called while holding write_lock(pcc_lock) + */ static int send_pcc_cmd(u16 cmd) { - int ret = -EIO; + int ret = -EIO, i; struct acpi_pcct_shared_memory *generic_comm_base = - (struct acpi_pcct_shared_memory *) pcc_comm_addr; + (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr; static ktime_t last_cmd_cmpl_time, last_mpar_reset; static int mpar_count; unsigned int time_delta; @@ -119,20 +239,29 @@ static int send_pcc_cmd(u16 cmd) * the channel before writing to PCC space */ if (cmd == CMD_READ) { - ret = check_pcc_chan(); + /* + * If there are pending cpc_writes, then we stole the channel + * before write completion, so first send a WRITE command to + * platform + */ + if (pcc_data.pending_pcc_write_cmd) + send_pcc_cmd(CMD_WRITE); + + ret = check_pcc_chan(false); if (ret) - return ret; - } + goto end; + } else /* CMD_WRITE */ + pcc_data.pending_pcc_write_cmd = FALSE; /* * Handle the Minimum Request Turnaround Time(MRTT) * "The minimum amount of time that OSPM must wait after the completion * of a command before issuing the next command, in microseconds" */ - if (pcc_mrtt) { + if (pcc_data.pcc_mrtt) { time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); - if (pcc_mrtt > time_delta) - udelay(pcc_mrtt - time_delta); + if (pcc_data.pcc_mrtt > time_delta) + udelay(pcc_data.pcc_mrtt - time_delta); } /* @@ -146,15 +275,16 @@ static int send_pcc_cmd(u16 cmd) * not send the request to the platform after hitting the MPAR limit in * any 60s window */ - if (pcc_mpar) { + if (pcc_data.pcc_mpar) { if (mpar_count == 0) { time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); if (time_delta < 60 * MSEC_PER_SEC) { pr_debug("PCC cmd not sent due to MPAR limit"); - return -EIO; + ret = -EIO; + goto end; } last_mpar_reset = ktime_get(); - mpar_count = pcc_mpar; + mpar_count = pcc_data.pcc_mpar; } mpar_count--; } @@ -165,33 +295,43 @@ static int send_pcc_cmd(u16 cmd) /* Flip CMD COMPLETE bit */ writew_relaxed(0, &generic_comm_base->status); + pcc_data.platform_owns_pcc = true; + /* Ring doorbell */ - ret = mbox_send_message(pcc_channel, &cmd); + ret = mbox_send_message(pcc_data.pcc_channel, &cmd); if (ret < 0) { pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", cmd, ret); - return ret; + goto end; } - /* - * For READs we need to ensure the cmd completed to ensure - * the ensuing read()s can proceed. For WRITEs we dont care - * because the actual write()s are done before coming here - * and the next READ or WRITE will check if the channel - * is busy/free at the entry of this call. - * - * If Minimum Request Turnaround Time is non-zero, we need - * to record the completion time of both READ and WRITE - * command for proper handling of MRTT, so we need to check - * for pcc_mrtt in addition to CMD_READ - */ - if (cmd == CMD_READ || pcc_mrtt) { - ret = check_pcc_chan(); - if (pcc_mrtt) - last_cmd_cmpl_time = ktime_get(); + /* wait for completion and check for PCC errro bit */ + ret = check_pcc_chan(true); + + if (pcc_data.pcc_mrtt) + last_cmd_cmpl_time = ktime_get(); + + if (pcc_data.pcc_channel->mbox->txdone_irq) + mbox_chan_txdone(pcc_data.pcc_channel, ret); + else + mbox_client_txdone(pcc_data.pcc_channel, ret); + +end: + if (cmd == CMD_WRITE) { + if (unlikely(ret)) { + for_each_possible_cpu(i) { + struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); + if (!desc) + continue; + + if (desc->write_cmd_id == pcc_data.pcc_write_cnt) + desc->write_cmd_status = ret; + } + } + pcc_data.pcc_write_cnt++; + wake_up_all(&pcc_data.pcc_write_wait_q); } - mbox_client_txdone(pcc_channel, ret); return ret; } @@ -272,13 +412,13 @@ end: * * Return: 0 for success or negative value for err. */ -int acpi_get_psd_map(struct cpudata **all_cpu_data) +int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) { int count_target; int retval = 0; unsigned int i, j; cpumask_var_t covered_cpus; - struct cpudata *pr, *match_pr; + struct cppc_cpudata *pr, *match_pr; struct acpi_psd_package *pdomain; struct acpi_psd_package *match_pdomain; struct cpc_desc *cpc_ptr, *match_cpc_ptr; @@ -394,14 +534,13 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); static int register_pcc_channel(int pcc_subspace_idx) { struct acpi_pcct_hw_reduced *cppc_ss; - unsigned int len; u64 usecs_lat; if (pcc_subspace_idx >= 0) { - pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, + pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_subspace_idx); - if (IS_ERR(pcc_channel)) { + if (IS_ERR(pcc_data.pcc_channel)) { pr_err("Failed to find PCC communication channel\n"); return -ENODEV; } @@ -412,7 +551,7 @@ static int register_pcc_channel(int pcc_subspace_idx) * PCC channels) and stored pointers to the * subspace communication region in con_priv. */ - cppc_ss = pcc_channel->con_priv; + cppc_ss = (pcc_data.pcc_channel)->con_priv; if (!cppc_ss) { pr_err("No PCC subspace found for CPPC\n"); @@ -420,35 +559,42 @@ static int register_pcc_channel(int pcc_subspace_idx) } /* - * This is the shared communication region - * for the OS and Platform to communicate over. - */ - comm_base_addr = cppc_ss->base_address; - len = cppc_ss->length; - - /* * cppc_ss->latency is just a Nominal value. In reality * the remote processor could be much slower to reply. * So add an arbitrary amount of wait on top of Nominal. */ usecs_lat = NUM_RETRIES * cppc_ss->latency; - deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); - pcc_mrtt = cppc_ss->min_turnaround_time; - pcc_mpar = cppc_ss->max_access_rate; + pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); + pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time; + pcc_data.pcc_mpar = cppc_ss->max_access_rate; + pcc_data.pcc_nominal = cppc_ss->latency; - pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); - if (!pcc_comm_addr) { + pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); + if (!pcc_data.pcc_comm_addr) { pr_err("Failed to ioremap PCC comm region mem\n"); return -ENOMEM; } /* Set flag so that we dont come here for each CPU. */ - pcc_channel_acquired = true; + pcc_data.pcc_channel_acquired = true; } return 0; } +/** + * cpc_ffh_supported() - check if FFH reading supported + * + * Check if the architecture has support for functional fixed hardware + * read/write capability. + * + * Return: true for supported, false for not supported + */ +bool __weak cpc_ffh_supported(void) +{ + return false; +} + /* * An example CPC table looks like the following. * @@ -507,6 +653,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) union acpi_object *out_obj, *cpc_obj; struct cpc_desc *cpc_ptr; struct cpc_reg *gas_t; + struct device *cpu_dev; acpi_handle handle = pr->handle; unsigned int num_ent, i, cpc_rev; acpi_status status; @@ -545,6 +692,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } + cpc_ptr->num_entries = num_ent; + /* Second entry should be revision. */ cpc_obj = &out_obj->package.elements[1]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { @@ -579,16 +728,27 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) * so extract it only once. */ if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - if (pcc_subspace_idx < 0) - pcc_subspace_idx = gas_t->access_width; - else if (pcc_subspace_idx != gas_t->access_width) { + if (pcc_data.pcc_subspace_idx < 0) + pcc_data.pcc_subspace_idx = gas_t->access_width; + else if (pcc_data.pcc_subspace_idx != gas_t->access_width) { pr_debug("Mismatched PCC ids.\n"); goto out_free; } - } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { - /* Support only PCC and SYS MEM type regs */ - pr_debug("Unsupported register type: %d\n", gas_t->space_id); - goto out_free; + } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + if (gas_t->address) { + void __iomem *addr; + + addr = ioremap(gas_t->address, gas_t->bit_width/8); + if (!addr) + goto out_free; + cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; + } + } else { + if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { + /* Support only PCC ,SYS MEM and FFH type regs */ + pr_debug("Unsupported register type: %d\n", gas_t->space_id); + goto out_free; + } } cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; @@ -607,10 +767,13 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; /* Register PCC channel once for all CPUs. */ - if (!pcc_channel_acquired) { - ret = register_pcc_channel(pcc_subspace_idx); + if (!pcc_data.pcc_channel_acquired) { + ret = register_pcc_channel(pcc_data.pcc_subspace_idx); if (ret) goto out_free; + + init_rwsem(&pcc_data.pcc_lock); + init_waitqueue_head(&pcc_data.pcc_write_wait_q); } /* Plug PSD data into this CPUs CPC descriptor. */ @@ -619,10 +782,27 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) /* Everything looks okay */ pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); + /* Add per logical CPU nodes for reading its feedback counters. */ + cpu_dev = get_cpu_device(pr->id); + if (!cpu_dev) + goto out_free; + + ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, + "acpi_cppc"); + if (ret) + goto out_free; + kfree(output.pointer); return 0; out_free: + /* Free all the mapped sys mem areas for this CPU */ + for (i = 2; i < cpc_ptr->num_entries; i++) { + void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; + + if (addr) + iounmap(addr); + } kfree(cpc_ptr); out_buf_free: @@ -640,26 +820,82 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); void acpi_cppc_processor_exit(struct acpi_processor *pr) { struct cpc_desc *cpc_ptr; + unsigned int i; + void __iomem *addr; + cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); + + /* Free all the mapped sys mem areas for this CPU */ + for (i = 2; i < cpc_ptr->num_entries; i++) { + addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; + if (addr) + iounmap(addr); + } + + kobject_put(&cpc_ptr->kobj); kfree(cpc_ptr); } EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); +/** + * cpc_read_ffh() - Read FFH register + * @cpunum: cpu number to read + * @reg: cppc register information + * @val: place holder for return value + * + * Read bit_width bits from a specified address and bit_offset + * + * Return: 0 for success and error code + */ +int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) +{ + return -ENOTSUPP; +} + +/** + * cpc_write_ffh() - Write FFH register + * @cpunum: cpu number to write + * @reg: cppc register information + * @val: value to write + * + * Write value of bit_width bits to a specified address and bit_offset + * + * Return: 0 for success and error code + */ +int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + return -ENOTSUPP; +} + /* * Since cpc_read and cpc_write are called while holding pcc_lock, it should be * as fast as possible. We have already mapped the PCC subspace during init, so * we can directly write to it. */ -static int cpc_read(struct cpc_reg *reg, u64 *val) +static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) { int ret_val = 0; + void __iomem *vaddr = 0; + struct cpc_reg *reg = ®_res->cpc_entry.reg; + + if (reg_res->type == ACPI_TYPE_INTEGER) { + *val = reg_res->cpc_entry.int_value; + return ret_val; + } *val = 0; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - void __iomem *vaddr = GET_PCC_VADDR(reg->address); + if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) + vaddr = GET_PCC_VADDR(reg->address); + else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + vaddr = reg_res->sys_mem_vaddr; + else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) + return cpc_read_ffh(cpu, reg, val); + else + return acpi_os_read_memory((acpi_physical_address)reg->address, + val, reg->bit_width); - switch (reg->bit_width) { + switch (reg->bit_width) { case 8: *val = readb_relaxed(vaddr); break; @@ -674,23 +910,30 @@ static int cpc_read(struct cpc_reg *reg, u64 *val) break; default: pr_debug("Error: Cannot read %u bit width from PCC\n", - reg->bit_width); + reg->bit_width); ret_val = -EFAULT; - } - } else - ret_val = acpi_os_read_memory((acpi_physical_address)reg->address, - val, reg->bit_width); + } + return ret_val; } -static int cpc_write(struct cpc_reg *reg, u64 val) +static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; + void __iomem *vaddr = 0; + struct cpc_reg *reg = ®_res->cpc_entry.reg; + + if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) + vaddr = GET_PCC_VADDR(reg->address); + else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + vaddr = reg_res->sys_mem_vaddr; + else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) + return cpc_write_ffh(cpu, reg, val); + else + return acpi_os_write_memory((acpi_physical_address)reg->address, + val, reg->bit_width); - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - void __iomem *vaddr = GET_PCC_VADDR(reg->address); - - switch (reg->bit_width) { + switch (reg->bit_width) { case 8: writeb_relaxed(val, vaddr); break; @@ -705,13 +948,11 @@ static int cpc_write(struct cpc_reg *reg, u64 val) break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", - reg->bit_width); + reg->bit_width); ret_val = -EFAULT; break; - } - } else - ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, - val, reg->bit_width); + } + return ret_val; } @@ -727,8 +968,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, *nom_perf; - u64 high, low, ref, nom; - int ret = 0; + u64 high, low, nom; + int ret = 0, regs_in_pcc = 0; if (!cpc_desc) { pr_debug("No CPC descriptor for CPU:%d\n", cpunum); @@ -740,13 +981,11 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF]; nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; - spin_lock(&pcc_lock); - /* Are any of the regs PCC ?*/ - if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { + if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || + CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) { + regs_in_pcc = 1; + down_write(&pcc_data.pcc_lock); /* Ring doorbell once to update PCC subspace */ if (send_pcc_cmd(CMD_READ) < 0) { ret = -EIO; @@ -754,26 +993,21 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) } } - cpc_read(&highest_reg->cpc_entry.reg, &high); + cpc_read(cpunum, highest_reg, &high); perf_caps->highest_perf = high; - cpc_read(&lowest_reg->cpc_entry.reg, &low); + cpc_read(cpunum, lowest_reg, &low); perf_caps->lowest_perf = low; - cpc_read(&ref_perf->cpc_entry.reg, &ref); - perf_caps->reference_perf = ref; - - cpc_read(&nom_perf->cpc_entry.reg, &nom); + cpc_read(cpunum, nom_perf, &nom); perf_caps->nominal_perf = nom; - if (!ref) - perf_caps->reference_perf = perf_caps->nominal_perf; - if (!high || !low || !nom) ret = -EFAULT; out_err: - spin_unlock(&pcc_lock); + if (regs_in_pcc) + up_write(&pcc_data.pcc_lock); return ret; } EXPORT_SYMBOL_GPL(cppc_get_perf_caps); @@ -788,9 +1022,10 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps); int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); - struct cpc_register_resource *delivered_reg, *reference_reg; - u64 delivered, reference; - int ret = 0; + struct cpc_register_resource *delivered_reg, *reference_reg, + *ref_perf_reg, *ctr_wrap_reg; + u64 delivered, reference, ref_perf, ctr_wrap_time; + int ret = 0, regs_in_pcc = 0; if (!cpc_desc) { pr_debug("No CPC descriptor for CPU:%d\n", cpunum); @@ -799,12 +1034,21 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; + ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; + ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; - spin_lock(&pcc_lock); + /* + * If refernce perf register is not supported then we should + * use the nominal perf value + */ + if (!CPC_SUPPORTED(ref_perf_reg)) + ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; /* Are any of the regs PCC ?*/ - if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { + if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || + CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { + down_write(&pcc_data.pcc_lock); + regs_in_pcc = 1; /* Ring doorbell once to update PCC subspace */ if (send_pcc_cmd(CMD_READ) < 0) { ret = -EIO; @@ -812,25 +1056,31 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) } } - cpc_read(&delivered_reg->cpc_entry.reg, &delivered); - cpc_read(&reference_reg->cpc_entry.reg, &reference); + cpc_read(cpunum, delivered_reg, &delivered); + cpc_read(cpunum, reference_reg, &reference); + cpc_read(cpunum, ref_perf_reg, &ref_perf); - if (!delivered || !reference) { + /* + * Per spec, if ctr_wrap_time optional register is unsupported, then the + * performance counters are assumed to never wrap during the lifetime of + * platform + */ + ctr_wrap_time = (u64)(~((u64)0)); + if (CPC_SUPPORTED(ctr_wrap_reg)) + cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); + + if (!delivered || !reference || !ref_perf) { ret = -EFAULT; goto out_err; } perf_fb_ctrs->delivered = delivered; perf_fb_ctrs->reference = reference; - - perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered; - perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference; - - perf_fb_ctrs->prev_delivered = delivered; - perf_fb_ctrs->prev_reference = reference; - + perf_fb_ctrs->reference_perf = ref_perf; + perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time; out_err: - spin_unlock(&pcc_lock); + if (regs_in_pcc) + up_write(&pcc_data.pcc_lock); return ret; } EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); @@ -855,30 +1105,142 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; - spin_lock(&pcc_lock); - - /* If this is PCC reg, check if channel is free before writing */ - if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - ret = check_pcc_chan(); - if (ret) - goto busy_channel; + /* + * This is Phase-I where we want to write to CPC registers + * -> We want all CPUs to be able to execute this phase in parallel + * + * Since read_lock can be acquired by multiple CPUs simultaneously we + * achieve that goal here + */ + if (CPC_IN_PCC(desired_reg)) { + down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */ + if (pcc_data.platform_owns_pcc) { + ret = check_pcc_chan(false); + if (ret) { + up_read(&pcc_data.pcc_lock); + return ret; + } + } + /* + * Update the pending_write to make sure a PCC CMD_READ will not + * arrive and steal the channel during the switch to write lock + */ + pcc_data.pending_pcc_write_cmd = true; + cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt; + cpc_desc->write_cmd_status = 0; } /* * Skip writing MIN/MAX until Linux knows how to come up with * useful values. */ - cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf); + cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); - /* Is this a PCC reg ?*/ - if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - /* Ring doorbell so Remote can get our perf request. */ - if (send_pcc_cmd(CMD_WRITE) < 0) - ret = -EIO; + if (CPC_IN_PCC(desired_reg)) + up_read(&pcc_data.pcc_lock); /* END Phase-I */ + /* + * This is Phase-II where we transfer the ownership of PCC to Platform + * + * Short Summary: Basically if we think of a group of cppc_set_perf + * requests that happened in short overlapping interval. The last CPU to + * come out of Phase-I will enter Phase-II and ring the doorbell. + * + * We have the following requirements for Phase-II: + * 1. We want to execute Phase-II only when there are no CPUs + * currently executing in Phase-I + * 2. Once we start Phase-II we want to avoid all other CPUs from + * entering Phase-I. + * 3. We want only one CPU among all those who went through Phase-I + * to run phase-II + * + * If write_trylock fails to get the lock and doesn't transfer the + * PCC ownership to the platform, then one of the following will be TRUE + * 1. There is at-least one CPU in Phase-I which will later execute + * write_trylock, so the CPUs in Phase-I will be responsible for + * executing the Phase-II. + * 2. Some other CPU has beaten this CPU to successfully execute the + * write_trylock and has already acquired the write_lock. We know for a + * fact it(other CPU acquiring the write_lock) couldn't have happened + * before this CPU's Phase-I as we held the read_lock. + * 3. Some other CPU executing pcc CMD_READ has stolen the + * down_write, in which case, send_pcc_cmd will check for pending + * CMD_WRITE commands by checking the pending_pcc_write_cmd. + * So this CPU can be certain that its request will be delivered + * So in all cases, this CPU knows that its request will be delivered + * by another CPU and can return + * + * After getting the down_write we still need to check for + * pending_pcc_write_cmd to take care of the following scenario + * The thread running this code could be scheduled out between + * Phase-I and Phase-II. Before it is scheduled back on, another CPU + * could have delivered the request to Platform by triggering the + * doorbell and transferred the ownership of PCC to platform. So this + * avoids triggering an unnecessary doorbell and more importantly before + * triggering the doorbell it makes sure that the PCC channel ownership + * is still with OSPM. + * pending_pcc_write_cmd can also be cleared by a different CPU, if + * there was a pcc CMD_READ waiting on down_write and it steals the lock + * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this + * case during a CMD_READ and if there are pending writes it delivers + * the write command before servicing the read command + */ + if (CPC_IN_PCC(desired_reg)) { + if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */ + /* Update only if there are pending write commands */ + if (pcc_data.pending_pcc_write_cmd) + send_pcc_cmd(CMD_WRITE); + up_write(&pcc_data.pcc_lock); /* END Phase-II */ + } else + /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ + wait_event(pcc_data.pcc_write_wait_q, + cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt); + + /* send_pcc_cmd updates the status in case of failure */ + ret = cpc_desc->write_cmd_status; } -busy_channel: - spin_unlock(&pcc_lock); - return ret; } EXPORT_SYMBOL_GPL(cppc_set_perf); + +/** + * cppc_get_transition_latency - returns frequency transition latency in ns + * + * ACPI CPPC does not explicitly specifiy how a platform can specify the + * transition latency for perfromance change requests. The closest we have + * is the timing information from the PCCT tables which provides the info + * on the number and frequency of PCC commands the platform can handle. + */ +unsigned int cppc_get_transition_latency(int cpu_num) +{ + /* + * Expected transition latency is based on the PCCT timing values + * Below are definition from ACPI spec: + * pcc_nominal- Expected latency to process a command, in microseconds + * pcc_mpar - The maximum number of periodic requests that the subspace + * channel can support, reported in commands per minute. 0 + * indicates no limitation. + * pcc_mrtt - The minimum amount of time that OSPM must wait after the + * completion of a command before issuing the next command, + * in microseconds. + */ + unsigned int latency_ns = 0; + struct cpc_desc *cpc_desc; + struct cpc_register_resource *desired_reg; + + cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); + if (!cpc_desc) + return CPUFREQ_ETERNAL; + + desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; + if (!CPC_IN_PCC(desired_reg)) + return CPUFREQ_ETERNAL; + + if (pcc_data.pcc_mpar) + latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar); + + latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000); + latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000); + + return latency_ns; +} +EXPORT_SYMBOL_GPL(cppc_get_transition_latency); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index e7bd57cc550a..680531062160 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -104,10 +104,12 @@ enum ec_command { #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */ enum { + EC_FLAGS_QUERY_ENABLED, /* Query is enabled */ EC_FLAGS_QUERY_PENDING, /* Query is pending */ EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */ EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */ EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */ + EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */ EC_FLAGS_STARTED, /* Driver is started */ EC_FLAGS_STOPPED, /* Driver is stopped */ EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the @@ -145,6 +147,10 @@ static unsigned int ec_storm_threshold __read_mostly = 8; module_param(ec_storm_threshold, uint, 0644); MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); +static bool ec_freeze_events __read_mostly = true; +module_param(ec_freeze_events, bool, 0644); +MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); + struct acpi_ec_query_handler { struct list_head node; acpi_ec_query_func func; @@ -179,6 +185,7 @@ static void acpi_ec_event_processor(struct work_struct *work); struct acpi_ec *boot_ec, *first_ec; EXPORT_SYMBOL(first_ec); +static bool boot_ec_is_ecdt = false; static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -239,6 +246,30 @@ static bool acpi_ec_started(struct acpi_ec *ec) !test_bit(EC_FLAGS_STOPPED, &ec->flags); } +static bool acpi_ec_event_enabled(struct acpi_ec *ec) +{ + /* + * There is an OSPM early stage logic. During the early stages + * (boot/resume), OSPMs shouldn't enable the event handling, only + * the EC transactions are allowed to be performed. + */ + if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) + return false; + /* + * However, disabling the event handling is experimental for late + * stage (suspend), and is controlled by the boot parameter of + * "ec_freeze_events": + * 1. true: The EC event handling is disabled before entering + * the noirq stage. + * 2. false: The EC event handling is automatically disabled as + * soon as the EC driver is stopped. + */ + if (ec_freeze_events) + return acpi_ec_started(ec); + else + return test_bit(EC_FLAGS_STARTED, &ec->flags); +} + static bool acpi_ec_flushed(struct acpi_ec *ec) { return ec->reference_count == 1; @@ -429,7 +460,8 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) static void acpi_ec_submit_query(struct acpi_ec *ec) { - if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { + if (acpi_ec_event_enabled(ec) && + !test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { ec_dbg_evt("Command(%s) submitted/blocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); ec->nr_pending_queries++; @@ -446,6 +478,86 @@ static void acpi_ec_complete_query(struct acpi_ec *ec) } } +static inline void __acpi_ec_enable_event(struct acpi_ec *ec) +{ + if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) + ec_log_drv("event unblocked"); + if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) + advance_transaction(ec); +} + +static inline void __acpi_ec_disable_event(struct acpi_ec *ec) +{ + if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) + ec_log_drv("event blocked"); +} + +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_query(ec, &value); + if (status || !value) + break; + } + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + +static void acpi_ec_enable_event(struct acpi_ec *ec) +{ + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); + if (acpi_ec_started(ec)) + __acpi_ec_enable_event(ec); + spin_unlock_irqrestore(&ec->lock, flags); + + /* Drain additional events if hardware requires that */ + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); +} + +static bool acpi_ec_query_flushed(struct acpi_ec *ec) +{ + bool flushed; + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); + flushed = !ec->nr_pending_queries; + spin_unlock_irqrestore(&ec->lock, flags); + return flushed; +} + +static void __acpi_ec_flush_event(struct acpi_ec *ec) +{ + /* + * When ec_freeze_events is true, we need to flush events in + * the proper position before entering the noirq stage. + */ + wait_event(ec->wait, acpi_ec_query_flushed(ec)); + if (ec_query_wq) + flush_workqueue(ec_query_wq); +} + +static void acpi_ec_disable_event(struct acpi_ec *ec) +{ + unsigned long flags; + + spin_lock_irqsave(&ec->lock, flags); + __acpi_ec_disable_event(ec); + spin_unlock_irqrestore(&ec->lock, flags); + __acpi_ec_flush_event(ec); +} + static bool acpi_ec_guard_event(struct acpi_ec *ec) { bool guarded = true; @@ -832,27 +944,6 @@ acpi_handle ec_get_handle(void) } EXPORT_SYMBOL(ec_get_handle); -/* - * Process _Q events that might have accumulated in the EC. - * Run with locked ec mutex. - */ -static void acpi_ec_clear(struct acpi_ec *ec) -{ - int i, status; - u8 value = 0; - - for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { - status = acpi_ec_query(ec, &value); - if (status || !value) - break; - } - - if (unlikely(i == ACPI_EC_CLEAR_MAX)) - pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); - else - pr_info("%d stale EC events cleared\n", i); -} - static void acpi_ec_start(struct acpi_ec *ec, bool resuming) { unsigned long flags; @@ -896,7 +987,8 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) if (!suspending) { acpi_ec_complete_request(ec); ec_dbg_ref(ec, "Decrease driver"); - } + } else if (!ec_freeze_events) + __acpi_ec_disable_event(ec); clear_bit(EC_FLAGS_STARTED, &ec->flags); clear_bit(EC_FLAGS_STOPPED, &ec->flags); ec_log_drv("EC stopped"); @@ -919,20 +1011,6 @@ void acpi_ec_block_transactions(void) void acpi_ec_unblock_transactions(void) { - struct acpi_ec *ec = first_ec; - - if (!ec) - return; - - /* Allow transactions to be carried out again */ - acpi_ec_start(ec, true); - - if (EC_FLAGS_CLEAR_ON_RESUME) - acpi_ec_clear(ec); -} - -void acpi_ec_unblock_transactions_early(void) -{ /* * Allow transactions to happen again (this function is called from * atomic context during wakeup, so we don't need to acquire the mutex). @@ -1228,13 +1306,21 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, static acpi_status ec_parse_io_ports(struct acpi_resource *resource, void *context); -static struct acpi_ec *make_acpi_ec(void) +static void acpi_ec_free(struct acpi_ec *ec) +{ + if (first_ec == ec) + first_ec = NULL; + if (boot_ec == ec) + boot_ec = NULL; + kfree(ec); +} + +static struct acpi_ec *acpi_ec_alloc(void) { struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); if (!ec) return NULL; - ec->flags = 1 << EC_FLAGS_QUERY_PENDING; mutex_init(&ec->mutex); init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); @@ -1290,7 +1376,12 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) return AE_CTRL_TERMINATE; } -static int ec_install_handlers(struct acpi_ec *ec) +/* + * Note: This function returns an error code only when the address space + * handler is not installed, which means "not able to handle + * transactions". + */ +static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) { acpi_status status; @@ -1319,6 +1410,16 @@ static int ec_install_handlers(struct acpi_ec *ec) set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } + if (!handle_events) + return 0; + + if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) { + /* Find and register all query methods */ + acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, + acpi_ec_register_query_methods, + NULL, ec, NULL); + set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags); + } if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) { status = acpi_install_gpe_raw_handler(NULL, ec->gpe, ACPI_GPE_EDGE_TRIGGERED, @@ -1329,6 +1430,9 @@ static int ec_install_handlers(struct acpi_ec *ec) if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) acpi_ec_enable_gpe(ec, true); + + /* EC is fully operational, allow queries */ + acpi_ec_enable_event(ec); } } @@ -1363,23 +1467,104 @@ static void ec_remove_handlers(struct acpi_ec *ec) pr_err("failed to remove gpe handler\n"); clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags); } + if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) { + acpi_ec_remove_query_handlers(ec, true, 0); + clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags); + } } -static struct acpi_ec *acpi_ec_alloc(void) +static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events) { - struct acpi_ec *ec; + int ret; - /* Check for boot EC */ - if (boot_ec) { - ec = boot_ec; - boot_ec = NULL; - ec_remove_handlers(ec); - if (first_ec == ec) - first_ec = NULL; - } else { - ec = make_acpi_ec(); + ret = ec_install_handlers(ec, handle_events); + if (ret) + return ret; + + /* First EC capable of handling transactions */ + if (!first_ec) { + first_ec = ec; + acpi_handle_info(first_ec->handle, "Used as first EC\n"); } - return ec; + + acpi_handle_info(ec->handle, + "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", + ec->gpe, ec->command_addr, ec->data_addr); + return ret; +} + +static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle, + bool handle_events, bool is_ecdt) +{ + int ret; + + /* + * Changing the ACPI handle results in a re-configuration of the + * boot EC. And if it happens after the namespace initialization, + * it causes _REG evaluations. + */ + if (boot_ec && boot_ec->handle != handle) + ec_remove_handlers(boot_ec); + + /* Unset old boot EC */ + if (boot_ec != ec) + acpi_ec_free(boot_ec); + + /* + * ECDT device creation is split into acpi_ec_ecdt_probe() and + * acpi_ec_ecdt_start(). This function takes care of completing the + * ECDT parsing logic as the handle update should be performed + * between the installation/uninstallation of the handlers. + */ + if (ec->handle != handle) + ec->handle = handle; + + ret = acpi_ec_setup(ec, handle_events); + if (ret) + return ret; + + /* Set new boot EC */ + if (!boot_ec) { + boot_ec = ec; + boot_ec_is_ecdt = is_ecdt; + } + + acpi_handle_info(boot_ec->handle, + "Used as boot %s EC to handle transactions%s\n", + is_ecdt ? "ECDT" : "DSDT", + handle_events ? " and events" : ""); + return ret; +} + +static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle) +{ + struct acpi_table_ecdt *ecdt_ptr; + acpi_status status; + acpi_handle handle; + + status = acpi_get_table(ACPI_SIG_ECDT, 1, + (struct acpi_table_header **)&ecdt_ptr); + if (ACPI_FAILURE(status)) + return false; + + status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); + if (ACPI_FAILURE(status)) + return false; + + *phandle = handle; + return true; +} + +static bool acpi_is_boot_ec(struct acpi_ec *ec) +{ + if (!boot_ec) + return false; + if (ec->handle == boot_ec->handle && + ec->gpe == boot_ec->gpe && + ec->command_addr == boot_ec->command_addr && + ec->data_addr == boot_ec->data_addr) + return true; + return false; } static int acpi_ec_add(struct acpi_device *device) @@ -1395,16 +1580,21 @@ static int acpi_ec_add(struct acpi_device *device) return -ENOMEM; if (ec_parse_device(device->handle, 0, ec, NULL) != AE_CTRL_TERMINATE) { - kfree(ec); - return -EINVAL; + ret = -EINVAL; + goto err_alloc; } - /* Find and register all query methods */ - acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, - acpi_ec_register_query_methods, NULL, ec, NULL); + if (acpi_is_boot_ec(ec)) { + boot_ec_is_ecdt = false; + acpi_handle_debug(ec->handle, "duplicated.\n"); + acpi_ec_free(ec); + ec = boot_ec; + ret = acpi_config_boot_ec(ec, ec->handle, true, false); + } else + ret = acpi_ec_setup(ec, true); + if (ret) + goto err_query; - if (!first_ec) - first_ec = ec; device->driver_data = ec; ret = !!request_region(ec->data_addr, 1, "EC data"); @@ -1412,20 +1602,17 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", - ec->gpe, ec->command_addr, ec->data_addr); - - ret = ec_install_handlers(ec); - /* Reprobe devices depending on the EC */ acpi_walk_dep_device_list(ec->handle); + acpi_handle_debug(ec->handle, "enumerated.\n"); + return 0; - /* EC is fully operational, allow queries */ - clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - - /* Clear stale _Q events if hardware might require that */ - if (EC_FLAGS_CLEAR_ON_RESUME) - acpi_ec_clear(ec); +err_query: + if (ec != boot_ec) + acpi_ec_remove_query_handlers(ec, true, 0); +err_alloc: + if (ec != boot_ec) + acpi_ec_free(ec); return ret; } @@ -1437,14 +1624,13 @@ static int acpi_ec_remove(struct acpi_device *device) return -EINVAL; ec = acpi_driver_data(device); - ec_remove_handlers(ec); - acpi_ec_remove_query_handlers(ec, true, 0); release_region(ec->data_addr, 1); release_region(ec->command_addr, 1); device->driver_data = NULL; - if (ec == first_ec) - first_ec = NULL; - kfree(ec); + if (ec != boot_ec) { + ec_remove_handlers(ec); + acpi_ec_free(ec); + } return 0; } @@ -1486,9 +1672,8 @@ int __init acpi_ec_dsdt_probe(void) if (!ec) return -ENOMEM; /* - * Finding EC from DSDT if there is no ECDT EC available. When this - * function is invoked, ACPI tables have been fully loaded, we can - * walk namespace now. + * At this point, the namespace is initialized, so start to find + * the namespace objects. */ status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL); @@ -1496,16 +1681,47 @@ int __init acpi_ec_dsdt_probe(void) ret = -ENODEV; goto error; } - ret = ec_install_handlers(ec); - + /* + * When the DSDT EC is available, always re-configure boot EC to + * have _REG evaluated. _REG can only be evaluated after the + * namespace initialization. + * At this point, the GPE is not fully initialized, so do not to + * handle the events. + */ + ret = acpi_config_boot_ec(ec, ec->handle, false, false); error: if (ret) - kfree(ec); - else - first_ec = boot_ec = ec; + acpi_ec_free(ec); return ret; } +/* + * If the DSDT EC is not functioning, we still need to prepare a fully + * functioning ECDT EC first in order to handle the events. + * https://bugzilla.kernel.org/show_bug.cgi?id=115021 + */ +int __init acpi_ec_ecdt_start(void) +{ + acpi_handle handle; + + if (!boot_ec) + return -ENODEV; + /* + * The DSDT EC should have already been started in + * acpi_ec_add(). + */ + if (!boot_ec_is_ecdt) + return -ENODEV; + + /* + * At this point, the namespace and the GPE is initialized, so + * start to find the namespace objects and handle the events. + */ + if (!acpi_ec_ecdt_get_handle(&handle)) + return -ENODEV; + return acpi_config_boot_ec(boot_ec, handle, true, true); +} + #if 0 /* * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not @@ -1600,7 +1816,6 @@ int __init acpi_ec_ecdt_probe(void) goto error; } - pr_info("EC description table is found, configuring boot EC\n"); if (EC_FLAGS_CORRECT_ECDT) { ec->command_addr = ecdt_ptr->data.address; ec->data_addr = ecdt_ptr->control.address; @@ -1609,16 +1824,90 @@ int __init acpi_ec_ecdt_probe(void) ec->data_addr = ecdt_ptr->data.address; } ec->gpe = ecdt_ptr->gpe; - ec->handle = ACPI_ROOT_OBJECT; - ret = ec_install_handlers(ec); + + /* + * At this point, the namespace is not initialized, so do not find + * the namespace objects, or handle the events. + */ + ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true); error: if (ret) - kfree(ec); - else - first_ec = boot_ec = ec; + acpi_ec_free(ec); return ret; } +#ifdef CONFIG_PM_SLEEP +static void acpi_ec_enter_noirq(struct acpi_ec *ec) +{ + unsigned long flags; + + if (ec == first_ec) { + spin_lock_irqsave(&ec->lock, flags); + ec->saved_busy_polling = ec_busy_polling; + ec->saved_polling_guard = ec_polling_guard; + ec_busy_polling = true; + ec_polling_guard = 0; + ec_log_drv("interrupt blocked"); + spin_unlock_irqrestore(&ec->lock, flags); + } +} + +static void acpi_ec_leave_noirq(struct acpi_ec *ec) +{ + unsigned long flags; + + if (ec == first_ec) { + spin_lock_irqsave(&ec->lock, flags); + ec_busy_polling = ec->saved_busy_polling; + ec_polling_guard = ec->saved_polling_guard; + ec_log_drv("interrupt unblocked"); + spin_unlock_irqrestore(&ec->lock, flags); + } +} + +static int acpi_ec_suspend_noirq(struct device *dev) +{ + struct acpi_ec *ec = + acpi_driver_data(to_acpi_device(dev)); + + acpi_ec_enter_noirq(ec); + return 0; +} + +static int acpi_ec_resume_noirq(struct device *dev) +{ + struct acpi_ec *ec = + acpi_driver_data(to_acpi_device(dev)); + + acpi_ec_leave_noirq(ec); + return 0; +} + +static int acpi_ec_suspend(struct device *dev) +{ + struct acpi_ec *ec = + acpi_driver_data(to_acpi_device(dev)); + + if (ec_freeze_events) + acpi_ec_disable_event(ec); + return 0; +} + +static int acpi_ec_resume(struct device *dev) +{ + struct acpi_ec *ec = + acpi_driver_data(to_acpi_device(dev)); + + acpi_ec_enable_event(ec); + return 0; +} +#endif + +static const struct dev_pm_ops acpi_ec_pm = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) + SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) +}; + static int param_set_event_clearing(const char *val, struct kernel_param *kp) { int result = 0; @@ -1664,6 +1953,7 @@ static struct acpi_driver acpi_ec_driver = { .add = acpi_ec_add, .remove = acpi_ec_remove, }, + .drv.pm = &acpi_ec_pm, }; static inline int acpi_ec_query_init(void) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 940218ff0193..4bf8bf16d096 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -116,7 +116,6 @@ bool acpi_device_is_present(struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); -struct device *acpi_get_first_physical_node(struct acpi_device *adev); /* -------------------------------------------------------------------------- Device Matching and Notification @@ -174,6 +173,8 @@ struct acpi_ec { struct work_struct work; unsigned long timestamp; unsigned long nr_pending_queries; + bool saved_busy_polling; + unsigned int saved_polling_guard; }; extern struct acpi_ec *first_ec; @@ -185,9 +186,9 @@ typedef int (*acpi_ec_query_func) (void *data); int acpi_ec_init(void); int acpi_ec_ecdt_probe(void); int acpi_ec_dsdt_probe(void); +int acpi_ec_ecdt_start(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); -void acpi_ec_unblock_transactions_early(void); int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data); @@ -225,4 +226,14 @@ static inline void suspend_nvs_restore(void) {} void acpi_init_properties(struct acpi_device *adev); void acpi_free_properties(struct acpi_device *adev); +/*-------------------------------------------------------------------------- + Watchdog + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_WATCHDOG +void acpi_watchdog_init(void); +#else +static inline void acpi_watchdog_init(void) {} +#endif + #endif /* _ACPI_INTERNAL_H_ */ diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 2c45dd3acc17..c576a6fe4ebb 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -411,7 +411,15 @@ int acpi_pci_irq_enable(struct pci_dev *dev) int gsi; u8 pin; int triggering = ACPI_LEVEL_SENSITIVE; - int polarity = ACPI_ACTIVE_LOW; + /* + * On ARM systems with the GIC interrupt model, level interrupts + * are always polarity high by specification; PCI legacy + * IRQs lines are inverted before reaching the interrupt + * controller and must therefore be considered active high + * as default. + */ + int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ? + ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW; char *link = NULL; char link_desc[16]; int rc; diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 0553aeebb228..8f8552a19e63 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -245,8 +245,8 @@ static int __acpi_processor_start(struct acpi_device *device) return 0; result = acpi_cppc_processor_probe(pr); - if (result) - return -ENODEV; + if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS)) + dev_warn(&device->dev, "CPPC data invalid or not present\n"); if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr); diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index f2fd3fee588a..03f5ec11ab31 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, } /** - * acpi_data_get_property_reference - returns handle to the referenced object - * @data: ACPI device data object containing the property + * __acpi_node_get_property_reference - returns handle to the referenced object + * @fwnode: Firmware node to get the property from * @propname: Name of the property * @index: Index of the reference to return + * @num_args: Maximum number of arguments after each reference * @args: Location to store the returned reference with optional arguments * * Find property with @name, verifify that it is a package containing at least @@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data, * If there's more than one reference in the property value package, @index is * used to select the one to return. * + * It is possible to leave holes in the property value set like in the + * example below: + * + * Package () { + * "cs-gpios", + * Package () { + * ^GPIO, 19, 0, 0, + * ^GPIO, 20, 0, 0, + * 0, + * ^GPIO, 21, 0, 0, + * } + * } + * + * Calling this function with index %2 return %-ENOENT and with index %3 + * returns the last entry. If the property does not contain any more values + * %-ENODATA is returned. The NULL entry must be single integer and + * preferably contain value %0. + * * Return: %0 on success, negative error code on failure. */ -static int acpi_data_get_property_reference(struct acpi_device_data *data, - const char *propname, size_t index, - struct acpi_reference_args *args) +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *propname, size_t index, size_t num_args, + struct acpi_reference_args *args) { const union acpi_object *element, *end; const union acpi_object *obj; + struct acpi_device_data *data; struct acpi_device *device; int ret, idx = 0; + data = acpi_device_data_of_node(fwnode); + if (!data) + return -EINVAL; + ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); if (ret) return ret; @@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data, while (element < end) { u32 nargs, i; - if (element->type != ACPI_TYPE_LOCAL_REFERENCE) - return -EPROTO; - - ret = acpi_bus_get_device(element->reference.handle, &device); - if (ret) - return -ENODEV; - - element++; - nargs = 0; - - /* assume following integer elements are all args */ - for (i = 0; element + i < end; i++) { - int type = element[i].type; + if (element->type == ACPI_TYPE_LOCAL_REFERENCE) { + ret = acpi_bus_get_device(element->reference.handle, + &device); + if (ret) + return -ENODEV; + + nargs = 0; + element++; + + /* assume following integer elements are all args */ + for (i = 0; element + i < end && i < num_args; i++) { + int type = element[i].type; + + if (type == ACPI_TYPE_INTEGER) + nargs++; + else if (type == ACPI_TYPE_LOCAL_REFERENCE) + break; + else + return -EPROTO; + } - if (type == ACPI_TYPE_INTEGER) - nargs++; - else if (type == ACPI_TYPE_LOCAL_REFERENCE) - break; - else + if (nargs > MAX_ACPI_REFERENCE_ARGS) return -EPROTO; - } - if (idx++ == index) { - args->adev = device; - args->nargs = nargs; - for (i = 0; i < nargs; i++) - args->args[i] = element[i].integer.value; + if (idx == index) { + args->adev = device; + args->nargs = nargs; + for (i = 0; i < nargs; i++) + args->args[i] = element[i].integer.value; - return 0; + return 0; + } + + element += nargs; + } else if (element->type == ACPI_TYPE_INTEGER) { + if (idx == index) + return -ENOENT; + element++; + } else { + return -EPROTO; } - element += nargs; + idx++; } - return -EPROTO; -} - -/** - * acpi_node_get_property_reference - get a handle to the referenced object. - * @fwnode: Firmware node to get the property from. - * @propname: Name of the property. - * @index: Index of the reference to return. - * @args: Location to store the returned reference with optional arguments. - */ -int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, size_t index, - struct acpi_reference_args *args) -{ - struct acpi_device_data *data = acpi_device_data_of_node(fwnode); - - return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL; + return -ENODATA; } -EXPORT_SYMBOL_GPL(acpi_node_get_property_reference); +EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); static int acpi_data_prop_read_single(struct acpi_device_data *data, const char *propname, diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e878fc799af7..035ac646d8db 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2002,6 +2002,7 @@ int __init acpi_scan_init(void) acpi_pnp_init(); acpi_int340x_thermal_init(); acpi_amba_init(); + acpi_watchdog_init(); acpi_scan_add_handler(&generic_device_handler); @@ -2044,6 +2045,7 @@ int __init acpi_scan_init(void) } acpi_update_all_gpes(); + acpi_ec_ecdt_start(); acpi_scan_initialized = true; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2b38c1bb0446..deb0ff78eba8 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -572,7 +572,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); - if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { + if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) { acpi_clear_event(ACPI_EVENT_POWER_BUTTON); /* Flag for later */ pwr_btn_event_pending = true; @@ -586,7 +586,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) */ acpi_disable_all_gpes(); /* Allow EC transactions to happen. */ - acpi_ec_unblock_transactions_early(); + acpi_ec_unblock_transactions(); suspend_nvs_restore(); @@ -784,7 +784,7 @@ static void acpi_hibernation_leave(void) /* Restore the NVS memory area */ suspend_nvs_restore(); /* Allow EC transactions to happen. */ - acpi_ec_unblock_transactions_early(); + acpi_ec_unblock_transactions(); } static void acpi_pm_thaw(void) diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 358165e9f5b8..703c26e7022c 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -314,10 +314,14 @@ static struct kobject *tables_kobj; static struct kobject *dynamic_tables_kobj; static struct kobject *hotplug_kobj; +#define ACPI_MAX_TABLE_INSTANCES 999 +#define ACPI_INST_SIZE 4 /* including trailing 0 */ + struct acpi_table_attr { struct bin_attribute attr; - char name[8]; + char name[ACPI_NAME_SIZE]; int instance; + char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE]; struct list_head node; }; @@ -329,14 +333,9 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, container_of(bin_attr, struct acpi_table_attr, attr); struct acpi_table_header *table_header = NULL; acpi_status status; - char name[ACPI_NAME_SIZE]; - - if (strncmp(table_attr->name, "NULL", 4)) - memcpy(name, table_attr->name, ACPI_NAME_SIZE); - else - memcpy(name, "\0\0\0\0", 4); - status = acpi_get_table(name, table_attr->instance, &table_header); + status = acpi_get_table(table_attr->name, table_attr->instance, + &table_header); if (ACPI_FAILURE(status)) return -ENODEV; @@ -344,38 +343,45 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, table_header, table_header->length); } -static void acpi_table_attr_init(struct acpi_table_attr *table_attr, - struct acpi_table_header *table_header) +static int acpi_table_attr_init(struct kobject *tables_obj, + struct acpi_table_attr *table_attr, + struct acpi_table_header *table_header) { struct acpi_table_header *header = NULL; struct acpi_table_attr *attr = NULL; + char instance_str[ACPI_INST_SIZE]; sysfs_attr_init(&table_attr->attr.attr); - if (table_header->signature[0] != '\0') - memcpy(table_attr->name, table_header->signature, - ACPI_NAME_SIZE); - else - memcpy(table_attr->name, "NULL", 4); + ACPI_MOVE_NAME(table_attr->name, table_header->signature); list_for_each_entry(attr, &acpi_table_attr_list, node) { - if (!memcmp(table_attr->name, attr->name, ACPI_NAME_SIZE)) + if (ACPI_COMPARE_NAME(table_attr->name, attr->name)) if (table_attr->instance < attr->instance) table_attr->instance = attr->instance; } table_attr->instance++; + if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) { + pr_warn("%4.4s: too many table instances\n", + table_attr->name); + return -ERANGE; + } + ACPI_MOVE_NAME(table_attr->filename, table_header->signature); + table_attr->filename[ACPI_NAME_SIZE] = '\0'; if (table_attr->instance > 1 || (table_attr->instance == 1 && !acpi_get_table - (table_header->signature, 2, &header))) - sprintf(table_attr->name + ACPI_NAME_SIZE, "%d", - table_attr->instance); + (table_header->signature, 2, &header))) { + snprintf(instance_str, sizeof(instance_str), "%u", + table_attr->instance); + strcat(table_attr->filename, instance_str); + } table_attr->attr.size = table_header->length; table_attr->attr.read = acpi_table_show; - table_attr->attr.attr.name = table_attr->name; + table_attr->attr.attr.name = table_attr->filename; table_attr->attr.attr.mode = 0400; - return; + return sysfs_create_bin_file(tables_obj, &table_attr->attr); } acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) @@ -383,21 +389,22 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) struct acpi_table_attr *table_attr; switch (event) { - case ACPI_TABLE_EVENT_LOAD: + case ACPI_TABLE_EVENT_INSTALL: table_attr = kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); if (!table_attr) return AE_NO_MEMORY; - acpi_table_attr_init(table_attr, table); - if (sysfs_create_bin_file(dynamic_tables_kobj, - &table_attr->attr)) { + if (acpi_table_attr_init(dynamic_tables_kobj, + table_attr, table)) { kfree(table_attr); return AE_ERROR; - } else - list_add_tail(&table_attr->node, &acpi_table_attr_list); + } + list_add_tail(&table_attr->node, &acpi_table_attr_list); break; + case ACPI_TABLE_EVENT_LOAD: case ACPI_TABLE_EVENT_UNLOAD: + case ACPI_TABLE_EVENT_UNINSTALL: /* * we do not need to do anything right now * because the table is not deleted from the @@ -435,13 +442,12 @@ static int acpi_tables_sysfs_init(void) if (ACPI_FAILURE(status)) continue; - table_attr = NULL; table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); if (!table_attr) return -ENOMEM; - acpi_table_attr_init(table_attr, table_header); - ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr); + ret = acpi_table_attr_init(tables_kobj, + table_attr, table_header); if (ret) { kfree(table_attr); return ret; @@ -597,14 +603,27 @@ static ssize_t counter_show(struct kobject *kobj, if (result) goto end; + if (status & ACPI_EVENT_FLAG_ENABLE_SET) + size += sprintf(buf + size, " EN"); + else + size += sprintf(buf + size, " "); + if (status & ACPI_EVENT_FLAG_STATUS_SET) + size += sprintf(buf + size, " STS"); + else + size += sprintf(buf + size, " "); + if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) - size += sprintf(buf + size, " invalid"); + size += sprintf(buf + size, " invalid "); else if (status & ACPI_EVENT_FLAG_ENABLED) - size += sprintf(buf + size, " enabled"); + size += sprintf(buf + size, " enabled "); else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED) - size += sprintf(buf + size, " wake_enabled"); + size += sprintf(buf + size, " wake_enabled"); + else + size += sprintf(buf + size, " disabled "); + if (status & ACPI_EVENT_FLAG_MASKED) + size += sprintf(buf + size, " masked "); else - size += sprintf(buf + size, " disabled"); + size += sprintf(buf + size, " unmasked"); end: size += sprintf(buf + size, "\n"); @@ -655,8 +674,12 @@ static ssize_t counter_set(struct kobject *kobj, !(status & ACPI_EVENT_FLAG_ENABLED)) result = acpi_enable_gpe(handle, index); else if (!strcmp(buf, "clear\n") && - (status & ACPI_EVENT_FLAG_SET)) + (status & ACPI_EVENT_FLAG_STATUS_SET)) result = acpi_clear_gpe(handle, index); + else if (!strcmp(buf, "mask\n")) + result = acpi_mask_gpe(handle, index, TRUE); + else if (!strcmp(buf, "unmask\n")) + result = acpi_mask_gpe(handle, index, FALSE); else if (!kstrtoul(buf, 0, &tmp)) all_counters[index].count = tmp; else @@ -664,13 +687,13 @@ static ssize_t counter_set(struct kobject *kobj, } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { int event = index - num_gpes; if (!strcmp(buf, "disable\n") && - (status & ACPI_EVENT_FLAG_ENABLED)) + (status & ACPI_EVENT_FLAG_ENABLE_SET)) result = acpi_disable_event(event, ACPI_NOT_ISR); else if (!strcmp(buf, "enable\n") && - !(status & ACPI_EVENT_FLAG_ENABLED)) + !(status & ACPI_EVENT_FLAG_ENABLE_SET)) result = acpi_enable_event(event, ACPI_NOT_ISR); else if (!strcmp(buf, "clear\n") && - (status & ACPI_EVENT_FLAG_SET)) + (status & ACPI_EVENT_FLAG_STATUS_SET)) result = acpi_clear_event(event); else if (!kstrtoul(buf, 0, &tmp)) all_counters[index].count = tmp; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 9f0ad6ebb368..cdd56c4657e0 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -35,7 +35,6 @@ #include <linux/earlycpio.h> #include <linux/memblock.h> #include <linux/initrd.h> -#include <linux/acpi.h> #include "internal.h" #ifdef CONFIG_ACPI_CUSTOM_DSDT @@ -246,6 +245,7 @@ acpi_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_header *entry; unsigned long table_end; int count = 0; + int errs = 0; int i; if (acpi_disabled) @@ -278,10 +278,12 @@ acpi_parse_entries_array(char *id, unsigned long table_size, if (entry->type != proc[i].id) continue; if (!proc[i].handler || - proc[i].handler(entry, table_end)) - return -EINVAL; + (!errs && proc[i].handler(entry, table_end))) { + errs++; + continue; + } - proc->count++; + proc[i].count++; break; } if (i != proc_num) @@ -301,11 +303,11 @@ acpi_parse_entries_array(char *id, unsigned long table_size, } if (max_entries && count > max_entries) { - pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n", - id, proc->id, count - max_entries, count); + pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n", + id, proc->id, count); } - return count; + return errs ? -EINVAL : count; } int __init diff --git a/drivers/base/core.c b/drivers/base/core.c index 0a8bdade53f2..70c5be5b03a7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1266,6 +1266,7 @@ void device_del(struct device *dev) bus_remove_device(dev); device_pm_remove(dev); driver_deferred_probe_del(dev); + device_remove_properties(dev); /* Notify the platform of the removal, in case they * need to do anything... diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index a1f2aff33997..e023066e4215 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -45,7 +45,7 @@ static DEFINE_MUTEX(gpd_list_lock); * and checks that the PM domain pointer is a real generic PM domain. * Any failure results in NULL being returned. */ -struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) +static struct generic_pm_domain *genpd_lookup_dev(struct device *dev) { struct generic_pm_domain *genpd = NULL, *gpd; @@ -586,7 +586,7 @@ static int __init genpd_poweroff_unused(void) } late_initcall(genpd_poweroff_unused); -#ifdef CONFIG_PM_SLEEP +#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF) /** * pm_genpd_present - Check if the given PM domain has been initialized. @@ -606,6 +606,10 @@ static bool pm_genpd_present(const struct generic_pm_domain *genpd) return false; } +#endif + +#ifdef CONFIG_PM_SLEEP + static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, struct device *dev) { @@ -613,9 +617,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, } /** - * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. + * genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. - * @timed: True if latency measurements are allowed. * * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. @@ -625,8 +628,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, * executed sequentially, so it is guaranteed that it will never run twice in * parallel). */ -static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, - bool timed) +static void genpd_sync_poweroff(struct generic_pm_domain *genpd) { struct gpd_link *link; @@ -639,28 +641,26 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, /* Choose the deepest state when suspending */ genpd->state_idx = genpd->state_count - 1; - genpd_power_off(genpd, timed); + genpd_power_off(genpd, false); genpd->status = GPD_STATE_POWER_OFF; list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - pm_genpd_sync_poweroff(link->master, timed); + genpd_sync_poweroff(link->master); } } /** - * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. + * genpd_sync_poweron - Synchronously power on a PM domain and its masters. * @genpd: PM domain to power on. - * @timed: True if latency measurements are allowed. * * This function is only called in "noirq" and "syscore" stages of system power * transitions, so it need not acquire locks (all of the "noirq" callbacks are * executed sequentially, so it is guaranteed that it will never run twice in * parallel). */ -static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, - bool timed) +static void genpd_sync_poweron(struct generic_pm_domain *genpd) { struct gpd_link *link; @@ -668,11 +668,11 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, return; list_for_each_entry(link, &genpd->slave_links, slave_node) { - pm_genpd_sync_poweron(link->master, timed); + genpd_sync_poweron(link->master); genpd_sd_counter_inc(link->master); } - genpd_power_on(genpd, timed); + genpd_power_on(genpd, false); genpd->status = GPD_STATE_ACTIVE; } @@ -784,7 +784,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) * the same PM domain, so it is not necessary to use locking here. */ genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd, true); + genpd_sync_poweroff(genpd); return 0; } @@ -814,7 +814,7 @@ static int pm_genpd_resume_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - pm_genpd_sync_poweron(genpd, true); + genpd_sync_poweron(genpd); genpd->suspended_count--; if (genpd->dev_ops.stop && genpd->dev_ops.start) @@ -902,12 +902,12 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspended_count++ == 0) /* * The boot kernel might put the domain into arbitrary state, - * so make it appear as powered off to pm_genpd_sync_poweron(), + * so make it appear as powered off to genpd_sync_poweron(), * so that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; - pm_genpd_sync_poweron(genpd, true); + genpd_sync_poweron(genpd); if (genpd->dev_ops.stop && genpd->dev_ops.start) ret = pm_runtime_force_resume(dev); @@ -962,9 +962,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend) if (suspend) { genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd, false); + genpd_sync_poweroff(genpd); } else { - pm_genpd_sync_poweron(genpd, false); + genpd_sync_poweron(genpd); genpd->suspended_count--; } } @@ -1056,14 +1056,8 @@ static void genpd_free_dev_data(struct device *dev, dev_pm_put_subsys_data(dev); } -/** - * __pm_genpd_add_device - Add a device to an I/O PM domain. - * @genpd: PM domain to add the device to. - * @dev: Device to be added. - * @td: Set of PM QoS timing parameters to attach to the device. - */ -int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, - struct gpd_timing_data *td) +static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + struct gpd_timing_data *td) { struct generic_pm_domain_data *gpd_data; int ret = 0; @@ -1103,15 +1097,28 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, return ret; } -EXPORT_SYMBOL_GPL(__pm_genpd_add_device); /** - * pm_genpd_remove_device - Remove a device from an I/O PM domain. - * @genpd: PM domain to remove the device from. - * @dev: Device to be removed. + * __pm_genpd_add_device - Add a device to an I/O PM domain. + * @genpd: PM domain to add the device to. + * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. */ -int pm_genpd_remove_device(struct generic_pm_domain *genpd, - struct device *dev) +int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + struct gpd_timing_data *td) +{ + int ret; + + mutex_lock(&gpd_list_lock); + ret = genpd_add_device(genpd, dev, td); + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(__pm_genpd_add_device); + +static int genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; @@ -1119,10 +1126,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, dev_dbg(dev, "%s()\n", __func__); - if (!genpd || genpd != pm_genpd_lookup_dev(dev)) - return -EINVAL; - - /* The above validation also means we have existing domain_data. */ pdd = dev->power.subsys_data->domain_data; gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); @@ -1154,15 +1157,24 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, return ret; } -EXPORT_SYMBOL_GPL(pm_genpd_remove_device); /** - * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. - * @genpd: Master PM domain to add the subdomain to. - * @subdomain: Subdomain to be added. + * pm_genpd_remove_device - Remove a device from an I/O PM domain. + * @genpd: PM domain to remove the device from. + * @dev: Device to be removed. */ -int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *subdomain) +int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + if (!genpd || genpd != genpd_lookup_dev(dev)) + return -EINVAL; + + return genpd_remove_device(genpd, dev); +} +EXPORT_SYMBOL_GPL(pm_genpd_remove_device); + +static int genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) { struct gpd_link *link, *itr; int ret = 0; @@ -1205,6 +1217,23 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, kfree(link); return ret; } + +/** + * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. + * @genpd: Master PM domain to add the subdomain to. + * @subdomain: Subdomain to be added. + */ +int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) +{ + int ret; + + mutex_lock(&gpd_list_lock); + ret = genpd_add_subdomain(genpd, subdomain); + mutex_unlock(&gpd_list_lock); + + return ret; +} EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); /** @@ -1278,27 +1307,17 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; + genpd->provider = NULL; + genpd->has_provider = false; genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; - genpd->domain.ops.suspend = pm_generic_suspend; - genpd->domain.ops.suspend_late = pm_generic_suspend_late; genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; - genpd->domain.ops.resume_early = pm_generic_resume_early; - genpd->domain.ops.resume = pm_generic_resume; - genpd->domain.ops.freeze = pm_generic_freeze; - genpd->domain.ops.freeze_late = pm_generic_freeze_late; genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; - genpd->domain.ops.thaw_early = pm_generic_thaw_early; - genpd->domain.ops.thaw = pm_generic_thaw; - genpd->domain.ops.poweroff = pm_generic_poweroff; - genpd->domain.ops.poweroff_late = pm_generic_poweroff_late; genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; - genpd->domain.ops.restore_early = pm_generic_restore_early; - genpd->domain.ops.restore = pm_generic_restore; genpd->domain.ops.complete = pm_genpd_complete; if (genpd->flags & GENPD_FLAG_PM_CLK) { @@ -1328,7 +1347,71 @@ int pm_genpd_init(struct generic_pm_domain *genpd, } EXPORT_SYMBOL_GPL(pm_genpd_init); +static int genpd_remove(struct generic_pm_domain *genpd) +{ + struct gpd_link *l, *link; + + if (IS_ERR_OR_NULL(genpd)) + return -EINVAL; + + mutex_lock(&genpd->lock); + + if (genpd->has_provider) { + mutex_unlock(&genpd->lock); + pr_err("Provider present, unable to remove %s\n", genpd->name); + return -EBUSY; + } + + if (!list_empty(&genpd->master_links) || genpd->device_count) { + mutex_unlock(&genpd->lock); + pr_err("%s: unable to remove %s\n", __func__, genpd->name); + return -EBUSY; + } + + list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) { + list_del(&link->master_node); + list_del(&link->slave_node); + kfree(link); + } + + list_del(&genpd->gpd_list_node); + mutex_unlock(&genpd->lock); + cancel_work_sync(&genpd->power_off_work); + pr_debug("%s: removed %s\n", __func__, genpd->name); + + return 0; +} + +/** + * pm_genpd_remove - Remove a generic I/O PM domain + * @genpd: Pointer to PM domain that is to be removed. + * + * To remove the PM domain, this function: + * - Removes the PM domain as a subdomain to any parent domains, + * if it was added. + * - Removes the PM domain from the list of registered PM domains. + * + * The PM domain will only be removed, if the associated provider has + * been removed, it is not a parent to any other PM domain and has no + * devices associated with it. + */ +int pm_genpd_remove(struct generic_pm_domain *genpd) +{ + int ret; + + mutex_lock(&gpd_list_lock); + ret = genpd_remove(genpd); + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_remove); + #ifdef CONFIG_PM_GENERIC_DOMAINS_OF + +typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, + void *data); + /* * Device Tree based PM domain providers. * @@ -1340,8 +1423,8 @@ EXPORT_SYMBOL_GPL(pm_genpd_init); * maps a PM domain specifier retrieved from the device tree to a PM domain. * * Two simple mapping functions have been provided for convenience: - * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. - * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by + * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. + * - genpd_xlate_onecell() for mapping of multiple PM domains per node by * index. */ @@ -1366,7 +1449,7 @@ static LIST_HEAD(of_genpd_providers); static DEFINE_MUTEX(of_genpd_mutex); /** - * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping + * genpd_xlate_simple() - Xlate function for direct node-domain mapping * @genpdspec: OF phandle args to map into a PM domain * @data: xlate function private data - pointer to struct generic_pm_domain * @@ -1374,7 +1457,7 @@ static DEFINE_MUTEX(of_genpd_mutex); * have their own device tree nodes. The private data of xlate function needs * to be a valid pointer to struct generic_pm_domain. */ -struct generic_pm_domain *__of_genpd_xlate_simple( +static struct generic_pm_domain *genpd_xlate_simple( struct of_phandle_args *genpdspec, void *data) { @@ -1382,10 +1465,9 @@ struct generic_pm_domain *__of_genpd_xlate_simple( return ERR_PTR(-EINVAL); return data; } -EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); /** - * __of_genpd_xlate_onecell() - Xlate function using a single index. + * genpd_xlate_onecell() - Xlate function using a single index. * @genpdspec: OF phandle args to map into a PM domain * @data: xlate function private data - pointer to struct genpd_onecell_data * @@ -1394,7 +1476,7 @@ EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple); * A single cell is used as an index into an array of PM domains specified in * the genpd_onecell_data struct when registering the provider. */ -struct generic_pm_domain *__of_genpd_xlate_onecell( +static struct generic_pm_domain *genpd_xlate_onecell( struct of_phandle_args *genpdspec, void *data) { @@ -1414,16 +1496,15 @@ struct generic_pm_domain *__of_genpd_xlate_onecell( return genpd_data->domains[idx]; } -EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell); /** - * __of_genpd_add_provider() - Register a PM domain provider for a node + * genpd_add_provider() - Register a PM domain provider for a node * @np: Device node pointer associated with the PM domain provider. * @xlate: Callback for decoding PM domain from phandle arguments. * @data: Context pointer for @xlate callback. */ -int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, - void *data) +static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, + void *data) { struct of_genpd_provider *cp; @@ -1442,7 +1523,83 @@ int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, return 0; } -EXPORT_SYMBOL_GPL(__of_genpd_add_provider); + +/** + * of_genpd_add_provider_simple() - Register a simple PM domain provider + * @np: Device node pointer associated with the PM domain provider. + * @genpd: Pointer to PM domain associated with the PM domain provider. + */ +int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) +{ + int ret = -EINVAL; + + if (!np || !genpd) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + + if (pm_genpd_present(genpd)) + ret = genpd_add_provider(np, genpd_xlate_simple, genpd); + + if (!ret) { + genpd->provider = &np->fwnode; + genpd->has_provider = true; + } + + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); + +/** + * of_genpd_add_provider_onecell() - Register a onecell PM domain provider + * @np: Device node pointer associated with the PM domain provider. + * @data: Pointer to the data associated with the PM domain provider. + */ +int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) +{ + unsigned int i; + int ret = -EINVAL; + + if (!np || !data) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + + for (i = 0; i < data->num_domains; i++) { + if (!data->domains[i]) + continue; + if (!pm_genpd_present(data->domains[i])) + goto error; + + data->domains[i]->provider = &np->fwnode; + data->domains[i]->has_provider = true; + } + + ret = genpd_add_provider(np, genpd_xlate_onecell, data); + if (ret < 0) + goto error; + + mutex_unlock(&gpd_list_lock); + + return 0; + +error: + while (i--) { + if (!data->domains[i]) + continue; + data->domains[i]->provider = NULL; + data->domains[i]->has_provider = false; + } + + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); /** * of_genpd_del_provider() - Remove a previously registered PM domain provider @@ -1451,10 +1608,21 @@ EXPORT_SYMBOL_GPL(__of_genpd_add_provider); void of_genpd_del_provider(struct device_node *np) { struct of_genpd_provider *cp; + struct generic_pm_domain *gpd; + mutex_lock(&gpd_list_lock); mutex_lock(&of_genpd_mutex); list_for_each_entry(cp, &of_genpd_providers, link) { if (cp->node == np) { + /* + * For each PM domain associated with the + * provider, set the 'has_provider' to false + * so that the PM domain can be safely removed. + */ + list_for_each_entry(gpd, &gpd_list, gpd_list_node) + if (gpd->provider == &np->fwnode) + gpd->has_provider = false; + list_del(&cp->link); of_node_put(cp->node); kfree(cp); @@ -1462,11 +1630,12 @@ void of_genpd_del_provider(struct device_node *np) } } mutex_unlock(&of_genpd_mutex); + mutex_unlock(&gpd_list_lock); } EXPORT_SYMBOL_GPL(of_genpd_del_provider); /** - * of_genpd_get_from_provider() - Look-up PM domain + * genpd_get_from_provider() - Look-up PM domain * @genpdspec: OF phandle args to use for look-up * * Looks for a PM domain provider under the node specified by @genpdspec and if @@ -1476,7 +1645,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider); * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() * on failure. */ -struct generic_pm_domain *of_genpd_get_from_provider( +static struct generic_pm_domain *genpd_get_from_provider( struct of_phandle_args *genpdspec) { struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); @@ -1499,7 +1668,109 @@ struct generic_pm_domain *of_genpd_get_from_provider( return genpd; } -EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); + +/** + * of_genpd_add_device() - Add a device to an I/O PM domain + * @genpdspec: OF phandle args to use for look-up PM domain + * @dev: Device to be added. + * + * Looks-up an I/O PM domain based upon phandle args provided and adds + * the device to the PM domain. Returns a negative error code on failure. + */ +int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + mutex_lock(&gpd_list_lock); + + genpd = genpd_get_from_provider(genpdspec); + if (IS_ERR(genpd)) { + ret = PTR_ERR(genpd); + goto out; + } + + ret = genpd_add_device(genpd, dev, NULL); + +out: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_device); + +/** + * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. + * @parent_spec: OF phandle args to use for parent PM domain look-up + * @subdomain_spec: OF phandle args to use for subdomain look-up + * + * Looks-up a parent PM domain and subdomain based upon phandle args + * provided and adds the subdomain to the parent PM domain. Returns a + * negative error code on failure. + */ +int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec) +{ + struct generic_pm_domain *parent, *subdomain; + int ret; + + mutex_lock(&gpd_list_lock); + + parent = genpd_get_from_provider(parent_spec); + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); + goto out; + } + + subdomain = genpd_get_from_provider(subdomain_spec); + if (IS_ERR(subdomain)) { + ret = PTR_ERR(subdomain); + goto out; + } + + ret = genpd_add_subdomain(parent, subdomain); + +out: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); + +/** + * of_genpd_remove_last - Remove the last PM domain registered for a provider + * @provider: Pointer to device structure associated with provider + * + * Find the last PM domain that was added by a particular provider and + * remove this PM domain from the list of PM domains. The provider is + * identified by the 'provider' device structure that is passed. The PM + * domain will only be removed, if the provider associated with domain + * has been removed. + * + * Returns a valid pointer to struct generic_pm_domain on success or + * ERR_PTR() on failure. + */ +struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) +{ + struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT); + int ret; + + if (IS_ERR_OR_NULL(np)) + return ERR_PTR(-EINVAL); + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (gpd->provider == &np->fwnode) { + ret = genpd_remove(gpd); + genpd = ret ? ERR_PTR(ret) : gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + + return genpd; +} +EXPORT_SYMBOL_GPL(of_genpd_remove_last); /** * genpd_dev_pm_detach - Detach a device from its PM domain. @@ -1515,14 +1786,14 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off) unsigned int i; int ret = 0; - pd = pm_genpd_lookup_dev(dev); - if (!pd) + pd = dev_to_genpd(dev); + if (IS_ERR(pd)) return; dev_dbg(dev, "removing from PM domain %s\n", pd->name); for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { - ret = pm_genpd_remove_device(pd, dev); + ret = genpd_remove_device(pd, dev); if (ret != -EAGAIN) break; @@ -1596,9 +1867,11 @@ int genpd_dev_pm_attach(struct device *dev) return -ENOENT; } - pd = of_genpd_get_from_provider(&pd_args); + mutex_lock(&gpd_list_lock); + pd = genpd_get_from_provider(&pd_args); of_node_put(pd_args.np); if (IS_ERR(pd)) { + mutex_unlock(&gpd_list_lock); dev_dbg(dev, "%s() failed to find PM domain: %ld\n", __func__, PTR_ERR(pd)); return -EPROBE_DEFER; @@ -1607,13 +1880,14 @@ int genpd_dev_pm_attach(struct device *dev) dev_dbg(dev, "adding to PM domain %s\n", pd->name); for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { - ret = pm_genpd_add_device(pd, dev); + ret = genpd_add_device(pd, dev, NULL); if (ret != -EAGAIN) break; mdelay(i); cond_resched(); } + mutex_unlock(&gpd_list_lock); if (ret < 0) { dev_err(dev, "failed to add to PM domain %s: %d", @@ -1636,7 +1910,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); /*** debugfs support ***/ -#ifdef CONFIG_PM_ADVANCED_DEBUG +#ifdef CONFIG_DEBUG_FS #include <linux/pm.h> #include <linux/device.h> #include <linux/debugfs.h> @@ -1784,4 +2058,4 @@ static void __exit pm_genpd_debug_exit(void) debugfs_remove_recursive(pm_genpd_debugfs_dir); } __exitcall(pm_genpd_debug_exit); -#endif /* CONFIG_PM_ADVANCED_DEBUG */ +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index df0c70963d9e..4c7c6da7a989 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -584,7 +584,6 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) struct clk *clk; unsigned long freq, old_freq; unsigned long u_volt, u_volt_min, u_volt_max; - unsigned long ou_volt, ou_volt_min, ou_volt_max; int ret; if (unlikely(!target_freq)) { @@ -620,11 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) } old_opp = _find_freq_ceil(opp_table, &old_freq); - if (!IS_ERR(old_opp)) { - ou_volt = old_opp->u_volt; - ou_volt_min = old_opp->u_volt_min; - ou_volt_max = old_opp->u_volt_max; - } else { + if (IS_ERR(old_opp)) { dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", __func__, old_freq, PTR_ERR(old_opp)); } @@ -683,7 +678,8 @@ restore_freq: restore_voltage: /* This shouldn't harm even if the voltages weren't updated earlier */ if (!IS_ERR(old_opp)) - _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max); + _set_opp_voltage(dev, reg, old_opp->u_volt, + old_opp->u_volt_min, old_opp->u_volt_max); return ret; } diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 1dfd3dd92624..5552211e6fcd 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c @@ -71,8 +71,18 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, u32 version; int ret; - if (!opp_table->supported_hw) - return true; + if (!opp_table->supported_hw) { + /* + * In the case that no supported_hw has been set by the + * platform but there is an opp-supported-hw value set for + * an OPP then the OPP should not be enabled as there is + * no way to see if the hardware supports it. + */ + if (of_find_property(np, "opp-supported-hw", NULL)) + return false; + else + return true; + } while (count--) { ret = of_property_read_u32_index(np, "opp-supported-hw", count, diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 567788664723..8a753fd5b79d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -305,6 +305,16 @@ config ARM_ARCH_TIMER_EVTSTREAM This must be disabled for hardware validation purposes to detect any hardware anomalies of missing events. +config FSL_ERRATUM_A008585 + bool "Workaround for Freescale/NXP Erratum A-008585" + default y + depends on ARM_ARCH_TIMER && ARM64 + help + This option enables a workaround for Freescale/NXP Erratum + A-008585 ("ARM generic timer may contain an erroneous + value"). The workaround will only be active if the + fsl,erratum-a008585 property is found in the timer node. + config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select CLKSRC_OF if OF diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57700541f951..73c487da6d2a 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -94,6 +94,43 @@ early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); * Architected system timer support. */ +#ifdef CONFIG_FSL_ERRATUM_A008585 +DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); +EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled); + +static int fsl_a008585_enable = -1; + +static int __init early_fsl_a008585_cfg(char *buf) +{ + int ret; + bool val; + + ret = strtobool(buf, &val); + if (ret) + return ret; + + fsl_a008585_enable = val; + return 0; +} +early_param("clocksource.arm_arch_timer.fsl-a008585", early_fsl_a008585_cfg); + +u32 __fsl_a008585_read_cntp_tval_el0(void) +{ + return __fsl_a008585_read_reg(cntp_tval_el0); +} + +u32 __fsl_a008585_read_cntv_tval_el0(void) +{ + return __fsl_a008585_read_reg(cntv_tval_el0); +} + +u64 __fsl_a008585_read_cntvct_el0(void) +{ + return __fsl_a008585_read_reg(cntvct_el0); +} +EXPORT_SYMBOL(__fsl_a008585_read_cntvct_el0); +#endif /* CONFIG_FSL_ERRATUM_A008585 */ + static __always_inline void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, struct clock_event_device *clk) @@ -243,6 +280,40 @@ static __always_inline void set_next_event(const int access, unsigned long evt, arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } +#ifdef CONFIG_FSL_ERRATUM_A008585 +static __always_inline void fsl_a008585_set_next_event(const int access, + unsigned long evt, struct clock_event_device *clk) +{ + unsigned long ctrl; + u64 cval = evt + arch_counter_get_cntvct(); + + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); + ctrl |= ARCH_TIMER_CTRL_ENABLE; + ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; + + if (access == ARCH_TIMER_PHYS_ACCESS) + write_sysreg(cval, cntp_cval_el0); + else if (access == ARCH_TIMER_VIRT_ACCESS) + write_sysreg(cval, cntv_cval_el0); + + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); +} + +static int fsl_a008585_set_next_event_virt(unsigned long evt, + struct clock_event_device *clk) +{ + fsl_a008585_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); + return 0; +} + +static int fsl_a008585_set_next_event_phys(unsigned long evt, + struct clock_event_device *clk) +{ + fsl_a008585_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); + return 0; +} +#endif /* CONFIG_FSL_ERRATUM_A008585 */ + static int arch_timer_set_next_event_virt(unsigned long evt, struct clock_event_device *clk) { @@ -271,6 +342,19 @@ static int arch_timer_set_next_event_phys_mem(unsigned long evt, return 0; } +static void fsl_a008585_set_sne(struct clock_event_device *clk) +{ +#ifdef CONFIG_FSL_ERRATUM_A008585 + if (!static_branch_unlikely(&arch_timer_read_ool_enabled)) + return; + + if (arch_timer_uses_ppi == VIRT_PPI) + clk->set_next_event = fsl_a008585_set_next_event_virt; + else + clk->set_next_event = fsl_a008585_set_next_event_phys; +#endif +} + static void __arch_timer_setup(unsigned type, struct clock_event_device *clk) { @@ -299,6 +383,8 @@ static void __arch_timer_setup(unsigned type, default: BUG(); } + + fsl_a008585_set_sne(clk); } else { clk->features |= CLOCK_EVT_FEAT_DYNIRQ; clk->name = "arch_mem_timer"; @@ -515,15 +601,19 @@ static void __init arch_counter_register(unsigned type) arch_timer_read_counter = arch_counter_get_cntvct; else arch_timer_read_counter = arch_counter_get_cntpct; - } else { - arch_timer_read_counter = arch_counter_get_cntvct_mem; - /* If the clocksource name is "arch_sys_counter" the - * VDSO will attempt to read the CP15-based counter. - * Ensure this does not happen when CP15-based - * counter is not available. + clocksource_counter.archdata.vdso_direct = true; + +#ifdef CONFIG_FSL_ERRATUM_A008585 + /* + * Don't use the vdso fastpath if errata require using + * the out-of-line counter accessor. */ - clocksource_counter.name = "arch_mem_counter"; + if (static_branch_unlikely(&arch_timer_read_ool_enabled)) + clocksource_counter.archdata.vdso_direct = false; +#endif + } else { + arch_timer_read_counter = arch_counter_get_cntvct_mem; } start_count = arch_timer_read_counter(); @@ -800,6 +890,15 @@ static int __init arch_timer_of_init(struct device_node *np) arch_timer_c3stop = !of_property_read_bool(np, "always-on"); +#ifdef CONFIG_FSL_ERRATUM_A008585 + if (fsl_a008585_enable < 0) + fsl_a008585_enable = of_property_read_bool(np, "fsl,erratum-a008585"); + if (fsl_a008585_enable) { + static_branch_enable(&arch_timer_read_ool_enabled); + pr_info("Enabling workaround for FSL erratum A-008585\n"); + } +#endif + /* * If we cannot rely on firmware initializing the timer registers then * we should use the physical timers instead. diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 74919aa81dcb..d8b164a7c4e5 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -194,7 +194,7 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. config CPU_FREQ_GOV_SCHEDUTIL - tristate "'schedutil' cpufreq policy governor" + bool "'schedutil' cpufreq policy governor" depends on CPU_FREQ && SMP select CPU_FREQ_GOV_ATTR_SET select IRQ_WORK @@ -208,9 +208,6 @@ config CPU_FREQ_GOV_SCHEDUTIL frequency tipping point is at utilization/capacity equal to 80% in both cases. - To compile this driver as a module, choose M here: the module will - be called cpufreq_schedutil. - If in doubt, say N. comment "CPU frequency scaling drivers" @@ -225,7 +222,7 @@ config CPUFREQ_DT help This adds a generic DT based cpufreq driver for frequency management. It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) - systems which share clock and voltage across all CPUs. + systems. If in doubt, say N. diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8882b8e2ecd0..1b2f28f69a81 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -19,10 +19,19 @@ #include <linux/delay.h> #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/dmi.h> #include <linux/vmalloc.h> +#include <asm/unaligned.h> + #include <acpi/cppc_acpi.h> +/* Minimum struct length needed for the DMI processor entry we want */ +#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 + +/* Offest in the DMI processor structure for the max frequency */ +#define DMI_PROCESSOR_MAX_SPEED 0x14 + /* * These structs contain information parsed from per CPU * ACPI _CPC structures. @@ -30,19 +39,52 @@ * performance capabilities, desired performance level * requested etc. */ -static struct cpudata **all_cpu_data; +static struct cppc_cpudata **all_cpu_data; + +/* Capture the max KHz from DMI */ +static u64 cppc_dmi_max_khz; + +/* Callback function used to retrieve the max frequency from DMI */ +static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) +{ + const u8 *dmi_data = (const u8 *)dm; + u16 *mhz = (u16 *)private; + + if (dm->type == DMI_ENTRY_PROCESSOR && + dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { + u16 val = (u16)get_unaligned((const u16 *) + (dmi_data + DMI_PROCESSOR_MAX_SPEED)); + *mhz = val > *mhz ? val : *mhz; + } +} + +/* Look up the max frequency in DMI */ +static u64 cppc_get_dmi_max_khz(void) +{ + u16 mhz = 0; + + dmi_walk(cppc_find_dmi_mhz, &mhz); + + /* + * Real stupid fallback value, just in case there is no + * actual value set. + */ + mhz = mhz ? mhz : 1; + + return (1000 * mhz); +} static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - struct cpudata *cpu; + struct cppc_cpudata *cpu; struct cpufreq_freqs freqs; int ret = 0; cpu = all_cpu_data[policy->cpu]; - cpu->perf_ctrls.desired_perf = target_freq; + cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz; freqs.old = policy->cur; freqs.new = target_freq; @@ -66,7 +108,7 @@ static int cppc_verify_policy(struct cpufreq_policy *policy) static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) { int cpu_num = policy->cpu; - struct cpudata *cpu = all_cpu_data[cpu_num]; + struct cppc_cpudata *cpu = all_cpu_data[cpu_num]; int ret; cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf; @@ -79,7 +121,7 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { - struct cpudata *cpu; + struct cppc_cpudata *cpu; unsigned int cpu_num = policy->cpu; int ret = 0; @@ -94,10 +136,13 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) return ret; } - policy->min = cpu->perf_caps.lowest_perf; - policy->max = cpu->perf_caps.highest_perf; + cppc_dmi_max_khz = cppc_get_dmi_max_khz(); + + policy->min = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / cpu->perf_caps.highest_perf; + policy->max = cppc_dmi_max_khz; policy->cpuinfo.min_freq = policy->min; policy->cpuinfo.max_freq = policy->max; + policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num); policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) @@ -112,7 +157,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu->cur_policy = policy; /* Set policy->cur to max now. The governors will adjust later. */ - policy->cur = cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf; + policy->cur = cppc_dmi_max_khz; + cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf; ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls); if (ret) @@ -134,7 +180,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = { static int __init cppc_cpufreq_init(void) { int i, ret = 0; - struct cpudata *cpu; + struct cppc_cpudata *cpu; if (acpi_disabled) return -ENODEV; @@ -144,7 +190,7 @@ static int __init cppc_cpufreq_init(void) return -ENOMEM; for_each_possible_cpu(i) { - all_cpu_data[i] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); + all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); if (!all_cpu_data[i]) goto out; @@ -175,7 +221,7 @@ out: static void __exit cppc_cpufreq_exit(void) { - struct cpudata *cpu; + struct cppc_cpudata *cpu; int i; cpufreq_unregister_driver(&cppc_cpufreq_driver); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 2ee40fd360ca..71267626456b 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -11,6 +11,8 @@ #include <linux/of.h> #include <linux/platform_device.h> +#include "cpufreq-dt.h" + static const struct of_device_id machines[] __initconst = { { .compatible = "allwinner,sun4i-a10", }, { .compatible = "allwinner,sun5i-a10s", }, @@ -40,6 +42,7 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "samsung,exynos5250", }, #ifndef CONFIG_BL_SWITCHER { .compatible = "samsung,exynos5420", }, + { .compatible = "samsung,exynos5433", }, { .compatible = "samsung,exynos5800", }, #endif @@ -51,6 +54,7 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "renesas,r8a7779", }, { .compatible = "renesas,r8a7790", }, { .compatible = "renesas,r8a7791", }, + { .compatible = "renesas,r8a7792", }, { .compatible = "renesas,r8a7793", }, { .compatible = "renesas,r8a7794", }, { .compatible = "renesas,sh73a0", }, @@ -68,6 +72,8 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "sigma,tango4" }, + { .compatible = "ti,am33xx", }, + { .compatible = "ti,dra7", }, { .compatible = "ti,omap2", }, { .compatible = "ti,omap3", }, { .compatible = "ti,omap4", }, @@ -91,7 +97,8 @@ static int __init cpufreq_dt_platdev_init(void) if (!match) return -ENODEV; - return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, - NULL, 0)); + return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", + -1, match->data, + sizeof(struct cpufreq_dt_platform_data))); } device_initcall(cpufreq_dt_platdev_init); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 3957de801ae8..5c07ae05d69a 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -25,6 +25,8 @@ #include <linux/slab.h> #include <linux/thermal.h> +#include "cpufreq-dt.h" + struct private_data { struct device *cpu_dev; struct thermal_cooling_device *cdev; @@ -353,6 +355,7 @@ static struct cpufreq_driver dt_cpufreq_driver = { static int dt_cpufreq_probe(struct platform_device *pdev) { + struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev); int ret; /* @@ -366,7 +369,8 @@ static int dt_cpufreq_probe(struct platform_device *pdev) if (ret) return ret; - dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); + if (data && data->have_governor_per_policy) + dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h new file mode 100644 index 000000000000..54d774e46c43 --- /dev/null +++ b/drivers/cpufreq/cpufreq-dt.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2016 Linaro + * Viresh Kumar <viresh.kumar@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CPUFREQ_DT_H__ +#define __CPUFREQ_DT_H__ + +#include <linux/types.h> + +struct cpufreq_dt_platform_data { + bool have_governor_per_policy; +}; + +#endif /* __CPUFREQ_DT_H__ */ diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3dd4884c6f9e..3a64136bf21b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -916,58 +916,18 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; -static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) +static int add_cpu_dev_symlink(struct cpufreq_policy *policy, + struct device *dev) { - struct device *cpu_dev; - - pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); - - if (!policy) - return 0; - - cpu_dev = get_cpu_device(cpu); - if (WARN_ON(!cpu_dev)) - return 0; - - return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); -} - -static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) -{ - struct device *cpu_dev; - - pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); - - cpu_dev = get_cpu_device(cpu); - if (WARN_ON(!cpu_dev)) - return; - - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); + dev_dbg(dev, "%s: Adding symlink\n", __func__); + return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); } -/* Add/remove symlinks for all related CPUs */ -static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) +static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, + struct device *dev) { - unsigned int j; - int ret = 0; - - /* Some related CPUs might not be present (physically hotplugged) */ - for_each_cpu(j, policy->real_cpus) { - ret = add_cpu_dev_symlink(policy, j); - if (ret) - break; - } - - return ret; -} - -static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) -{ - unsigned int j; - - /* Some related CPUs might not be present (physically hotplugged) */ - for_each_cpu(j, policy->real_cpus) - remove_cpu_dev_symlink(policy, j); + dev_dbg(dev, "%s: Removing symlink\n", __func__); + sysfs_remove_link(&dev->kobj, "cpufreq"); } static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) @@ -999,7 +959,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) return ret; } - return cpufreq_add_dev_symlink(policy); + return 0; } __weak struct cpufreq_governor *cpufreq_default_governor(void) @@ -1073,13 +1033,9 @@ static void handle_update(struct work_struct *work) static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) { - struct device *dev = get_cpu_device(cpu); struct cpufreq_policy *policy; int ret; - if (WARN_ON(!dev)) - return NULL; - policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) return NULL; @@ -1133,7 +1089,6 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) down_write(&policy->rwsem); cpufreq_stats_free_table(policy); - cpufreq_remove_dev_symlink(policy); kobj = &policy->kobj; cmp = &policy->kobj_unregister; up_write(&policy->rwsem); @@ -1215,8 +1170,8 @@ static int cpufreq_online(unsigned int cpu) if (new_policy) { /* related_cpus should at least include policy->cpus. */ cpumask_copy(policy->related_cpus, policy->cpus); - /* Remember CPUs present at the policy creation time. */ - cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); + /* Clear mask of registered CPUs */ + cpumask_clear(policy->real_cpus); } /* @@ -1331,6 +1286,8 @@ out_free_policy: return ret; } +static void cpufreq_offline(unsigned int cpu); + /** * cpufreq_add_dev - the cpufreq interface for a CPU device. * @dev: CPU device. @@ -1340,22 +1297,28 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { struct cpufreq_policy *policy; unsigned cpu = dev->id; + int ret; dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); - if (cpu_online(cpu)) - return cpufreq_online(cpu); + if (cpu_online(cpu)) { + ret = cpufreq_online(cpu); + if (ret) + return ret; + } - /* - * A hotplug notifier will follow and we will handle it as CPU online - * then. For now, just create the sysfs link, unless there is no policy - * or the link is already present. - */ + /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) return 0; - return add_cpu_dev_symlink(policy, cpu); + ret = add_cpu_dev_symlink(policy, dev); + if (ret) { + cpumask_clear_cpu(cpu, policy->real_cpus); + cpufreq_offline(cpu); + } + + return ret; } static void cpufreq_offline(unsigned int cpu) @@ -1436,7 +1399,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) cpufreq_offline(cpu); cpumask_clear_cpu(cpu, policy->real_cpus); - remove_cpu_dev_symlink(policy, cpu); + remove_cpu_dev_symlink(policy, dev); if (cpumask_empty(policy->real_cpus)) cpufreq_policy_free(policy, true); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index e415349ab31b..642dd0f183a8 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -260,7 +260,7 @@ static void dbs_irq_work(struct irq_work *irq_work) } static void dbs_update_util_handler(struct update_util_data *data, u64 time, - unsigned long util, unsigned long max) + unsigned int flags) { struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index be9eade147f2..806f2039571e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -181,6 +181,8 @@ struct _pid { * @cpu: CPU number for this instance data * @update_util: CPUFreq utility callback information * @update_util_set: CPUFreq utility callback is set + * @iowait_boost: iowait-related boost fraction + * @last_update: Time of the last update. * @pstate: Stores P state limits for this CPU * @vid: Stores VID limits for this CPU * @pid: Stores PID parameters for this CPU @@ -206,6 +208,7 @@ struct cpudata { struct vid_data vid; struct _pid pid; + u64 last_update; u64 last_sample_time; u64 prev_aperf; u64 prev_mperf; @@ -216,6 +219,7 @@ struct cpudata { struct acpi_processor_performance acpi_perf_data; bool valid_pss_table; #endif + unsigned int iowait_boost; }; static struct cpudata **all_cpu_data; @@ -229,6 +233,7 @@ static struct cpudata **all_cpu_data; * @p_gain_pct: PID proportional gain * @i_gain_pct: PID integral gain * @d_gain_pct: PID derivative gain + * @boost_iowait: Whether or not to use iowait boosting. * * Stores per CPU model static PID configuration data. */ @@ -240,6 +245,7 @@ struct pstate_adjust_policy { int p_gain_pct; int d_gain_pct; int i_gain_pct; + bool boost_iowait; }; /** @@ -1029,7 +1035,7 @@ static struct cpu_defaults core_params = { }, }; -static struct cpu_defaults silvermont_params = { +static const struct cpu_defaults silvermont_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -1037,6 +1043,7 @@ static struct cpu_defaults silvermont_params = { .p_gain_pct = 14, .d_gain_pct = 0, .i_gain_pct = 4, + .boost_iowait = true, }, .funcs = { .get_max = atom_get_max_pstate, @@ -1050,7 +1057,7 @@ static struct cpu_defaults silvermont_params = { }, }; -static struct cpu_defaults airmont_params = { +static const struct cpu_defaults airmont_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -1058,6 +1065,7 @@ static struct cpu_defaults airmont_params = { .p_gain_pct = 14, .d_gain_pct = 0, .i_gain_pct = 4, + .boost_iowait = true, }, .funcs = { .get_max = atom_get_max_pstate, @@ -1071,7 +1079,7 @@ static struct cpu_defaults airmont_params = { }, }; -static struct cpu_defaults knl_params = { +static const struct cpu_defaults knl_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -1091,7 +1099,7 @@ static struct cpu_defaults knl_params = { }, }; -static struct cpu_defaults bxt_params = { +static const struct cpu_defaults bxt_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -1099,6 +1107,7 @@ static struct cpu_defaults bxt_params = { .p_gain_pct = 14, .d_gain_pct = 0, .i_gain_pct = 4, + .boost_iowait = true, }, .funcs = { .get_max = core_get_max_pstate, @@ -1222,36 +1231,18 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu) static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) { struct sample *sample = &cpu->sample; - u64 cummulative_iowait, delta_iowait_us; - u64 delta_iowait_mperf; - u64 mperf, now; - int32_t cpu_load; + int32_t busy_frac, boost; - cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); + busy_frac = div_fp(sample->mperf, sample->tsc); - /* - * Convert iowait time into number of IO cycles spent at max_freq. - * IO is considered as busy only for the cpu_load algorithm. For - * performance this is not needed since we always try to reach the - * maximum P-State, so we are already boosting the IOs. - */ - delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; - delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * - cpu->pstate.max_pstate, MSEC_PER_SEC); + boost = cpu->iowait_boost; + cpu->iowait_boost >>= 1; - mperf = cpu->sample.mperf + delta_iowait_mperf; - cpu->prev_cummulative_iowait = cummulative_iowait; + if (busy_frac < boost) + busy_frac = boost; - /* - * The load can be estimated as the ratio of the mperf counter - * running at a constant frequency during active periods - * (C0) and the time stamp counter running at the same frequency - * also during C-states. - */ - cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); - cpu->sample.busy_scaled = cpu_load; - - return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load); + sample->busy_scaled = busy_frac * 100; + return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled); } static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) @@ -1325,15 +1316,29 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) sample->mperf, sample->aperf, sample->tsc, - get_avg_frequency(cpu)); + get_avg_frequency(cpu), + fp_toint(cpu->iowait_boost * 100)); } static void intel_pstate_update_util(struct update_util_data *data, u64 time, - unsigned long util, unsigned long max) + unsigned int flags) { struct cpudata *cpu = container_of(data, struct cpudata, update_util); - u64 delta_ns = time - cpu->sample.time; + u64 delta_ns; + + if (pid_params.boost_iowait) { + if (flags & SCHED_CPUFREQ_IOWAIT) { + cpu->iowait_boost = int_tofp(1); + } else if (cpu->iowait_boost) { + /* Clear iowait_boost if the CPU may have been idle. */ + delta_ns = time - cpu->last_update; + if (delta_ns > TICK_NSEC) + cpu->iowait_boost = 0; + } + cpu->last_update = time; + } + delta_ns = time - cpu->sample.time; if ((s64)delta_ns >= pid_params.sample_rate_ns) { bool sample_taken = intel_pstate_sample(cpu, time); diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index be42f103db60..1b9bcd76c60e 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c @@ -123,7 +123,7 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); if (IS_ERR(priv.cpu_clk)) { - dev_err(priv.dev, "Unable to get cpuclk"); + dev_err(priv.dev, "Unable to get cpuclk\n"); return PTR_ERR(priv.cpu_clk); } @@ -132,7 +132,7 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); if (IS_ERR(priv.ddr_clk)) { - dev_err(priv.dev, "Unable to get ddrclk"); + dev_err(priv.dev, "Unable to get ddrclk\n"); err = PTR_ERR(priv.ddr_clk); goto out_cpu; } @@ -142,7 +142,7 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.powersave_clk = of_clk_get_by_name(np, "powersave"); if (IS_ERR(priv.powersave_clk)) { - dev_err(priv.dev, "Unable to get powersave"); + dev_err(priv.dev, "Unable to get powersave\n"); err = PTR_ERR(priv.powersave_clk); goto out_ddr; } @@ -155,7 +155,7 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) if (!err) return 0; - dev_err(priv.dev, "Failed to register cpufreq driver"); + dev_err(priv.dev, "Failed to register cpufreq driver\n"); clk_disable_unprepare(priv.powersave_clk); out_ddr: diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index e8a7bf57b31b..ea7a4e1b68c2 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -105,7 +105,6 @@ static int scpi_cpufreq_remove(struct platform_device *pdev) static struct platform_driver scpi_cpufreq_platdrv = { .driver = { .name = "scpi-cpufreq", - .owner = THIS_MODULE, }, .probe = scpi_cpufreq_probe, .remove = scpi_cpufreq_remove, diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index 04042038ec4b..b366e6d830ea 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c @@ -163,7 +163,7 @@ static int sti_cpufreq_set_opp_info(void) reg_fields = sti_cpufreq_match(); if (!reg_fields) { - dev_err(dev, "This SoC doesn't support voltage scaling"); + dev_err(dev, "This SoC doesn't support voltage scaling\n"); return -ENODEV; } diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 4ba3d3fe142f..f440d385ed34 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -121,6 +121,7 @@ static int __init arm_idle_init(void) dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate cpuidle device\n"); + ret = -ENOMEM; goto out_fail; } dev->cpu = cpu; diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index a5be56ec57f2..41254e702f1e 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -76,7 +76,7 @@ comment "DEVFREQ Drivers" config ARM_EXYNOS_BUS_DEVFREQ tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver" - depends on ARCH_EXYNOS + depends on ARCH_EXYNOS || COMPILE_TEST select DEVFREQ_GOV_SIMPLE_ONDEMAND select DEVFREQ_GOV_PASSIVE select DEVFREQ_EVENT_EXYNOS_PPMU @@ -91,14 +91,26 @@ config ARM_EXYNOS_BUS_DEVFREQ This does not yet operate with optimal voltages. config ARM_TEGRA_DEVFREQ - tristate "Tegra DEVFREQ Driver" - depends on ARCH_TEGRA_124_SOC - select DEVFREQ_GOV_SIMPLE_ONDEMAND - select PM_OPP - help - This adds the DEVFREQ driver for the Tegra family of SoCs. - It reads ACTMON counters of memory controllers and adjusts the - operating frequencies and voltages with OPP support. + tristate "Tegra DEVFREQ Driver" + depends on ARCH_TEGRA_124_SOC + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select PM_OPP + help + This adds the DEVFREQ driver for the Tegra family of SoCs. + It reads ACTMON counters of memory controllers and adjusts the + operating frequencies and voltages with OPP support. + +config ARM_RK3399_DMC_DEVFREQ + tristate "ARM RK3399 DMC DEVFREQ Driver" + depends on ARCH_ROCKCHIP + select DEVFREQ_EVENT_ROCKCHIP_DFI + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select PM_DEVFREQ_EVENT + select PM_OPP + help + This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller). + It sets the frequency for the memory controller and reads the usage counts + from hardware. source "drivers/devfreq/event/Kconfig" diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 09f11d9d40d5..fbff40a508a4 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o +obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o # DEVFREQ Event Drivers diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig index eb6f74a2b6b9..0fdae8608961 100644 --- a/drivers/devfreq/event/Kconfig +++ b/drivers/devfreq/event/Kconfig @@ -15,7 +15,7 @@ if PM_DEVFREQ_EVENT config DEVFREQ_EVENT_EXYNOS_NOCP tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver" - depends on ARCH_EXYNOS + depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP help This add the devfreq-event driver for Exynos SoC. It provides NoC @@ -23,11 +23,18 @@ config DEVFREQ_EVENT_EXYNOS_NOCP config DEVFREQ_EVENT_EXYNOS_PPMU tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver" - depends on ARCH_EXYNOS + depends on ARCH_EXYNOS || COMPILE_TEST select PM_OPP help This add the devfreq-event driver for Exynos SoC. It provides PPMU (Platform Performance Monitoring Unit) counters to estimate the utilization of each module. +config DEVFREQ_EVENT_ROCKCHIP_DFI + tristate "ROCKCHIP DFI DEVFREQ event Driver" + depends on ARCH_ROCKCHIP + help + This add the devfreq-event driver for Rockchip SoC. It provides DFI + (DDR Monitor Module) driver to count ddr load. + endif # PM_DEVFREQ_EVENT diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile index 3d6afd352253..dda7090a47c6 100644 --- a/drivers/devfreq/event/Makefile +++ b/drivers/devfreq/event/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o +obj-$(CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI) += rockchip-dfi.o diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 845bf25fb9fb..f55cf0eb2a66 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -406,8 +406,6 @@ static int of_get_devfreq_events(struct device_node *np, of_property_read_string(node, "event-name", &desc[j].name); j++; - - of_node_put(node); } info->desc = desc; diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c new file mode 100644 index 000000000000..43fcc5a7f515 --- /dev/null +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd + * Author: Lin Huang <hl@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/clk.h> +#include <linux/devfreq-event.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/of.h> + +#define RK3399_DMC_NUM_CH 2 + +/* DDRMON_CTRL */ +#define DDRMON_CTRL 0x04 +#define CLR_DDRMON_CTRL (0x1f0000 << 0) +#define LPDDR4_EN (0x10001 << 4) +#define HARDWARE_EN (0x10001 << 3) +#define LPDDR3_EN (0x10001 << 2) +#define SOFTWARE_EN (0x10001 << 1) +#define SOFTWARE_DIS (0x10000 << 1) +#define TIME_CNT_EN (0x10001 << 0) + +#define DDRMON_CH0_COUNT_NUM 0x28 +#define DDRMON_CH0_DFI_ACCESS_NUM 0x2c +#define DDRMON_CH1_COUNT_NUM 0x3c +#define DDRMON_CH1_DFI_ACCESS_NUM 0x40 + +/* pmu grf */ +#define PMUGRF_OS_REG2 0x308 +#define DDRTYPE_SHIFT 13 +#define DDRTYPE_MASK 7 + +enum { + DDR3 = 3, + LPDDR3 = 6, + LPDDR4 = 7, + UNUSED = 0xFF +}; + +struct dmc_usage { + u32 access; + u32 total; +}; + +/* + * The dfi controller can monitor DDR load. It has an upper and lower threshold + * for the operating points. Whenever the usage leaves these bounds an event is + * generated to indicate the DDR frequency should be changed. + */ +struct rockchip_dfi { + struct devfreq_event_dev *edev; + struct devfreq_event_desc *desc; + struct dmc_usage ch_usage[RK3399_DMC_NUM_CH]; + struct device *dev; + void __iomem *regs; + struct regmap *regmap_pmu; + struct clk *clk; +}; + +static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + void __iomem *dfi_regs = info->regs; + u32 val; + u32 ddr_type; + + /* get ddr type */ + regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val); + ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK; + + /* clear DDRMON_CTRL setting */ + writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL); + + /* set ddr type to dfi */ + if (ddr_type == LPDDR3) + writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL); + else if (ddr_type == LPDDR4) + writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL); + + /* enable count, use software mode */ + writel_relaxed(SOFTWARE_EN, dfi_regs + DDRMON_CTRL); +} + +static void rockchip_dfi_stop_hardware_counter(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + void __iomem *dfi_regs = info->regs; + + writel_relaxed(SOFTWARE_DIS, dfi_regs + DDRMON_CTRL); +} + +static int rockchip_dfi_get_busier_ch(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + u32 tmp, max = 0; + u32 i, busier_ch = 0; + void __iomem *dfi_regs = info->regs; + + rockchip_dfi_stop_hardware_counter(edev); + + /* Find out which channel is busier */ + for (i = 0; i < RK3399_DMC_NUM_CH; i++) { + info->ch_usage[i].access = readl_relaxed(dfi_regs + + DDRMON_CH0_DFI_ACCESS_NUM + i * 20) * 4; + info->ch_usage[i].total = readl_relaxed(dfi_regs + + DDRMON_CH0_COUNT_NUM + i * 20); + tmp = info->ch_usage[i].access; + if (tmp > max) { + busier_ch = i; + max = tmp; + } + } + rockchip_dfi_start_hardware_counter(edev); + + return busier_ch; +} + +static int rockchip_dfi_disable(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + + rockchip_dfi_stop_hardware_counter(edev); + clk_disable_unprepare(info->clk); + + return 0; +} + +static int rockchip_dfi_enable(struct devfreq_event_dev *edev) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + int ret; + + ret = clk_prepare_enable(info->clk); + if (ret) { + dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret); + return ret; + } + + rockchip_dfi_start_hardware_counter(edev); + return 0; +} + +static int rockchip_dfi_set_event(struct devfreq_event_dev *edev) +{ + return 0; +} + +static int rockchip_dfi_get_event(struct devfreq_event_dev *edev, + struct devfreq_event_data *edata) +{ + struct rockchip_dfi *info = devfreq_event_get_drvdata(edev); + int busier_ch; + + busier_ch = rockchip_dfi_get_busier_ch(edev); + + edata->load_count = info->ch_usage[busier_ch].access; + edata->total_count = info->ch_usage[busier_ch].total; + + return 0; +} + +static const struct devfreq_event_ops rockchip_dfi_ops = { + .disable = rockchip_dfi_disable, + .enable = rockchip_dfi_enable, + .get_event = rockchip_dfi_get_event, + .set_event = rockchip_dfi_set_event, +}; + +static const struct of_device_id rockchip_dfi_id_match[] = { + { .compatible = "rockchip,rk3399-dfi" }, + { }, +}; + +static int rockchip_dfi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rockchip_dfi *data; + struct resource *res; + struct devfreq_event_desc *desc; + struct device_node *np = pdev->dev.of_node, *node; + + data = devm_kzalloc(dev, sizeof(struct rockchip_dfi), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + data->clk = devm_clk_get(dev, "pclk_ddr_mon"); + if (IS_ERR(data->clk)) { + dev_err(dev, "Cannot get the clk dmc_clk\n"); + return PTR_ERR(data->clk); + }; + + /* try to find the optional reference to the pmu syscon */ + node = of_parse_phandle(np, "rockchip,pmu", 0); + if (node) { + data->regmap_pmu = syscon_node_to_regmap(node); + if (IS_ERR(data->regmap_pmu)) + return PTR_ERR(data->regmap_pmu); + } + data->dev = dev; + + desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + desc->ops = &rockchip_dfi_ops; + desc->driver_data = data; + desc->name = np->name; + data->desc = desc; + + data->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); + if (IS_ERR(data->edev)) { + dev_err(&pdev->dev, + "failed to add devfreq-event device\n"); + return PTR_ERR(data->edev); + } + + platform_set_drvdata(pdev, data); + + return 0; +} + +static struct platform_driver rockchip_dfi_driver = { + .probe = rockchip_dfi_probe, + .driver = { + .name = "rockchip-dfi", + .of_match_table = rockchip_dfi_id_match, + }, +}; +module_platform_driver(rockchip_dfi_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>"); +MODULE_DESCRIPTION("Rockchip DFI driver"); diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c new file mode 100644 index 000000000000..e24b73d66659 --- /dev/null +++ b/drivers/devfreq/rk3399_dmc.c @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd. + * Author: Lin Huang <hl@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/arm-smccc.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/devfreq.h> +#include <linux/devfreq-event.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/regulator/consumer.h> +#include <linux/rwsem.h> +#include <linux/suspend.h> + +#include <soc/rockchip/rockchip_sip.h> + +struct dram_timing { + unsigned int ddr3_speed_bin; + unsigned int pd_idle; + unsigned int sr_idle; + unsigned int sr_mc_gate_idle; + unsigned int srpd_lite_idle; + unsigned int standby_idle; + unsigned int auto_pd_dis_freq; + unsigned int dram_dll_dis_freq; + unsigned int phy_dll_dis_freq; + unsigned int ddr3_odt_dis_freq; + unsigned int ddr3_drv; + unsigned int ddr3_odt; + unsigned int phy_ddr3_ca_drv; + unsigned int phy_ddr3_dq_drv; + unsigned int phy_ddr3_odt; + unsigned int lpddr3_odt_dis_freq; + unsigned int lpddr3_drv; + unsigned int lpddr3_odt; + unsigned int phy_lpddr3_ca_drv; + unsigned int phy_lpddr3_dq_drv; + unsigned int phy_lpddr3_odt; + unsigned int lpddr4_odt_dis_freq; + unsigned int lpddr4_drv; + unsigned int lpddr4_dq_odt; + unsigned int lpddr4_ca_odt; + unsigned int phy_lpddr4_ca_drv; + unsigned int phy_lpddr4_ck_cs_drv; + unsigned int phy_lpddr4_dq_drv; + unsigned int phy_lpddr4_odt; +}; + +struct rk3399_dmcfreq { + struct device *dev; + struct devfreq *devfreq; + struct devfreq_simple_ondemand_data ondemand_data; + struct clk *dmc_clk; + struct devfreq_event_dev *edev; + struct mutex lock; + struct dram_timing timing; + + /* + * DDR Converser of Frequency (DCF) is used to implement DDR frequency + * conversion without the participation of CPU, we will implement and + * control it in arm trust firmware. + */ + wait_queue_head_t wait_dcf_queue; + int irq; + int wait_dcf_flag; + struct regulator *vdd_center; + unsigned long rate, target_rate; + unsigned long volt, target_volt; + struct dev_pm_opp *curr_opp; +}; + +static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq, + u32 flags) +{ + struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + unsigned long old_clk_rate = dmcfreq->rate; + unsigned long target_volt, target_rate; + int err; + + rcu_read_lock(); + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) { + rcu_read_unlock(); + return PTR_ERR(opp); + } + + target_rate = dev_pm_opp_get_freq(opp); + target_volt = dev_pm_opp_get_voltage(opp); + + dmcfreq->rate = dev_pm_opp_get_freq(dmcfreq->curr_opp); + dmcfreq->volt = dev_pm_opp_get_voltage(dmcfreq->curr_opp); + + rcu_read_unlock(); + + if (dmcfreq->rate == target_rate) + return 0; + + mutex_lock(&dmcfreq->lock); + + /* + * If frequency scaling from low to high, adjust voltage first. + * If frequency scaling from high to low, adjust frequency first. + */ + if (old_clk_rate < target_rate) { + err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, + target_volt); + if (err) { + dev_err(dev, "Cannot to set voltage %lu uV\n", + target_volt); + goto out; + } + } + dmcfreq->wait_dcf_flag = 1; + + err = clk_set_rate(dmcfreq->dmc_clk, target_rate); + if (err) { + dev_err(dev, "Cannot to set frequency %lu (%d)\n", + target_rate, err); + regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt, + dmcfreq->volt); + goto out; + } + + /* + * Wait until bcf irq happen, it means freq scaling finish in + * arm trust firmware, use 100ms as timeout time. + */ + if (!wait_event_timeout(dmcfreq->wait_dcf_queue, + !dmcfreq->wait_dcf_flag, HZ / 10)) + dev_warn(dev, "Timeout waiting for dcf interrupt\n"); + + /* + * Check the dpll rate, + * There only two result we will get, + * 1. Ddr frequency scaling fail, we still get the old rate. + * 2. Ddr frequency scaling sucessful, we get the rate we set. + */ + dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk); + + /* If get the incorrect rate, set voltage to old value. */ + if (dmcfreq->rate != target_rate) { + dev_err(dev, "Get wrong ddr frequency, Request frequency %lu,\ + Current frequency %lu\n", target_rate, dmcfreq->rate); + regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt, + dmcfreq->volt); + goto out; + } else if (old_clk_rate > target_rate) + err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, + target_volt); + if (err) + dev_err(dev, "Cannot to set vol %lu uV\n", target_volt); + + dmcfreq->curr_opp = opp; +out: + mutex_unlock(&dmcfreq->lock); + return err; +} + +static int rk3399_dmcfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); + struct devfreq_event_data edata; + int ret = 0; + + ret = devfreq_event_get_event(dmcfreq->edev, &edata); + if (ret < 0) + return ret; + + stat->current_frequency = dmcfreq->rate; + stat->busy_time = edata.load_count; + stat->total_time = edata.total_count; + + return ret; +} + +static int rk3399_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); + + *freq = dmcfreq->rate; + + return 0; +} + +static struct devfreq_dev_profile rk3399_devfreq_dmc_profile = { + .polling_ms = 200, + .target = rk3399_dmcfreq_target, + .get_dev_status = rk3399_dmcfreq_get_dev_status, + .get_cur_freq = rk3399_dmcfreq_get_cur_freq, +}; + +static __maybe_unused int rk3399_dmcfreq_suspend(struct device *dev) +{ + struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); + int ret = 0; + + ret = devfreq_event_disable_edev(dmcfreq->edev); + if (ret < 0) { + dev_err(dev, "failed to disable the devfreq-event devices\n"); + return ret; + } + + ret = devfreq_suspend_device(dmcfreq->devfreq); + if (ret < 0) { + dev_err(dev, "failed to suspend the devfreq devices\n"); + return ret; + } + + return 0; +} + +static __maybe_unused int rk3399_dmcfreq_resume(struct device *dev) +{ + struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(dev); + int ret = 0; + + ret = devfreq_event_enable_edev(dmcfreq->edev); + if (ret < 0) { + dev_err(dev, "failed to enable the devfreq-event devices\n"); + return ret; + } + + ret = devfreq_resume_device(dmcfreq->devfreq); + if (ret < 0) { + dev_err(dev, "failed to resume the devfreq devices\n"); + return ret; + } + return ret; +} + +static SIMPLE_DEV_PM_OPS(rk3399_dmcfreq_pm, rk3399_dmcfreq_suspend, + rk3399_dmcfreq_resume); + +static irqreturn_t rk3399_dmc_irq(int irq, void *dev_id) +{ + struct rk3399_dmcfreq *dmcfreq = dev_id; + struct arm_smccc_res res; + + dmcfreq->wait_dcf_flag = 0; + wake_up(&dmcfreq->wait_dcf_queue); + + /* Clear the DCF interrupt */ + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, + ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ, + 0, 0, 0, 0, &res); + + return IRQ_HANDLED; +} + +static int of_get_ddr_timings(struct dram_timing *timing, + struct device_node *np) +{ + int ret = 0; + + ret = of_property_read_u32(np, "rockchip,ddr3_speed_bin", + &timing->ddr3_speed_bin); + ret |= of_property_read_u32(np, "rockchip,pd_idle", + &timing->pd_idle); + ret |= of_property_read_u32(np, "rockchip,sr_idle", + &timing->sr_idle); + ret |= of_property_read_u32(np, "rockchip,sr_mc_gate_idle", + &timing->sr_mc_gate_idle); + ret |= of_property_read_u32(np, "rockchip,srpd_lite_idle", + &timing->srpd_lite_idle); + ret |= of_property_read_u32(np, "rockchip,standby_idle", + &timing->standby_idle); + ret |= of_property_read_u32(np, "rockchip,auto_pd_dis_freq", + &timing->auto_pd_dis_freq); + ret |= of_property_read_u32(np, "rockchip,dram_dll_dis_freq", + &timing->dram_dll_dis_freq); + ret |= of_property_read_u32(np, "rockchip,phy_dll_dis_freq", + &timing->phy_dll_dis_freq); + ret |= of_property_read_u32(np, "rockchip,ddr3_odt_dis_freq", + &timing->ddr3_odt_dis_freq); + ret |= of_property_read_u32(np, "rockchip,ddr3_drv", + &timing->ddr3_drv); + ret |= of_property_read_u32(np, "rockchip,ddr3_odt", + &timing->ddr3_odt); + ret |= of_property_read_u32(np, "rockchip,phy_ddr3_ca_drv", + &timing->phy_ddr3_ca_drv); + ret |= of_property_read_u32(np, "rockchip,phy_ddr3_dq_drv", + &timing->phy_ddr3_dq_drv); + ret |= of_property_read_u32(np, "rockchip,phy_ddr3_odt", + &timing->phy_ddr3_odt); + ret |= of_property_read_u32(np, "rockchip,lpddr3_odt_dis_freq", + &timing->lpddr3_odt_dis_freq); + ret |= of_property_read_u32(np, "rockchip,lpddr3_drv", + &timing->lpddr3_drv); + ret |= of_property_read_u32(np, "rockchip,lpddr3_odt", + &timing->lpddr3_odt); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_ca_drv", + &timing->phy_lpddr3_ca_drv); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_dq_drv", + &timing->phy_lpddr3_dq_drv); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr3_odt", + &timing->phy_lpddr3_odt); + ret |= of_property_read_u32(np, "rockchip,lpddr4_odt_dis_freq", + &timing->lpddr4_odt_dis_freq); + ret |= of_property_read_u32(np, "rockchip,lpddr4_drv", + &timing->lpddr4_drv); + ret |= of_property_read_u32(np, "rockchip,lpddr4_dq_odt", + &timing->lpddr4_dq_odt); + ret |= of_property_read_u32(np, "rockchip,lpddr4_ca_odt", + &timing->lpddr4_ca_odt); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ca_drv", + &timing->phy_lpddr4_ca_drv); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_ck_cs_drv", + &timing->phy_lpddr4_ck_cs_drv); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_dq_drv", + &timing->phy_lpddr4_dq_drv); + ret |= of_property_read_u32(np, "rockchip,phy_lpddr4_odt", + &timing->phy_lpddr4_odt); + + return ret; +} + +static int rk3399_dmcfreq_probe(struct platform_device *pdev) +{ + struct arm_smccc_res res; + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct rk3399_dmcfreq *data; + int ret, irq, index, size; + uint32_t *timing; + struct dev_pm_opp *opp; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Cannot get the dmc interrupt resource\n"); + return -EINVAL; + } + data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->lock); + + data->vdd_center = devm_regulator_get(dev, "center"); + if (IS_ERR(data->vdd_center)) { + dev_err(dev, "Cannot get the regulator \"center\"\n"); + return PTR_ERR(data->vdd_center); + } + + data->dmc_clk = devm_clk_get(dev, "dmc_clk"); + if (IS_ERR(data->dmc_clk)) { + dev_err(dev, "Cannot get the clk dmc_clk\n"); + return PTR_ERR(data->dmc_clk); + }; + + data->irq = irq; + ret = devm_request_irq(dev, irq, rk3399_dmc_irq, 0, + dev_name(dev), data); + if (ret) { + dev_err(dev, "Failed to request dmc irq: %d\n", ret); + return ret; + } + + init_waitqueue_head(&data->wait_dcf_queue); + data->wait_dcf_flag = 0; + + data->edev = devfreq_event_get_edev_by_phandle(dev, 0); + if (IS_ERR(data->edev)) + return -EPROBE_DEFER; + + ret = devfreq_event_enable_edev(data->edev); + if (ret < 0) { + dev_err(dev, "failed to enable devfreq-event devices\n"); + return ret; + } + + /* + * Get dram timing and pass it to arm trust firmware, + * the dram drvier in arm trust firmware will get these + * timing and to do dram initial. + */ + if (!of_get_ddr_timings(&data->timing, np)) { + timing = &data->timing.ddr3_speed_bin; + size = sizeof(struct dram_timing) / 4; + for (index = 0; index < size; index++) { + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index, + ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM, + 0, 0, 0, 0, &res); + if (res.a0) { + dev_err(dev, "Failed to set dram param: %ld\n", + res.a0); + return -EINVAL; + } + } + } + + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, + ROCKCHIP_SIP_CONFIG_DRAM_INIT, + 0, 0, 0, 0, &res); + + /* + * We add a devfreq driver to our parent since it has a device tree node + * with operating points. + */ + if (dev_pm_opp_of_add_table(dev)) { + dev_err(dev, "Invalid operating-points in device tree.\n"); + rcu_read_unlock(); + return -EINVAL; + } + + of_property_read_u32(np, "upthreshold", + &data->ondemand_data.upthreshold); + of_property_read_u32(np, "downdifferential", + &data->ondemand_data.downdifferential); + + data->rate = clk_get_rate(data->dmc_clk); + + rcu_read_lock(); + opp = devfreq_recommended_opp(dev, &data->rate, 0); + if (IS_ERR(opp)) { + rcu_read_unlock(); + return PTR_ERR(opp); + } + rcu_read_unlock(); + data->curr_opp = opp; + + rk3399_devfreq_dmc_profile.initial_freq = data->rate; + + data->devfreq = devfreq_add_device(dev, + &rk3399_devfreq_dmc_profile, + "simple_ondemand", + &data->ondemand_data); + if (IS_ERR(data->devfreq)) + return PTR_ERR(data->devfreq); + devm_devfreq_register_opp_notifier(dev, data->devfreq); + + data->dev = dev; + platform_set_drvdata(pdev, data); + + return 0; +} + +static const struct of_device_id rk3399dmc_devfreq_of_match[] = { + { .compatible = "rockchip,rk3399-dmc" }, + { }, +}; + +static struct platform_driver rk3399_dmcfreq_driver = { + .probe = rk3399_dmcfreq_probe, + .driver = { + .name = "rk3399-dmc-freq", + .pm = &rk3399_dmcfreq_pm, + .of_match_table = rk3399dmc_devfreq_of_match, + }, +}; +module_platform_driver(rk3399_dmcfreq_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>"); +MODULE_DESCRIPTION("RK3399 dmcfreq driver with devfreq framework"); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 5ef9b733d153..26298af73232 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1486,7 +1486,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) priv->features |= FEATURE_IRQ; priv->features |= FEATURE_SMBUS_PEC; priv->features |= FEATURE_BLOCK_BUFFER; - priv->features |= FEATURE_TCO; + /* If we have ACPI based watchdog use that instead */ + if (!acpi_has_watchdog()) + priv->features |= FEATURE_TCO; priv->features |= FEATURE_HOST_NOTIFY; break; diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 043828d541f7..08c87fadca8c 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -59,6 +59,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/mailbox_controller.h> @@ -68,11 +69,16 @@ #include "mailbox.h" #define MAX_PCC_SUBSPACES 256 +#define MBOX_IRQ_NAME "pcc-mbox" static struct mbox_chan *pcc_mbox_channels; /* Array of cached virtual address for doorbell registers */ static void __iomem **pcc_doorbell_vaddr; +/* Array of cached virtual address for doorbell ack registers */ +static void __iomem **pcc_doorbell_ack_vaddr; +/* Array of doorbell interrupts */ +static int *pcc_doorbell_irq; static struct mbox_controller pcc_mbox_ctrl = {}; /** @@ -91,6 +97,132 @@ static struct mbox_chan *get_pcc_channel(int id) return &pcc_mbox_channels[id]; } +/* + * PCC can be used with perf critical drivers such as CPPC + * So it makes sense to locally cache the virtual address and + * use it to read/write to PCC registers such as doorbell register + * + * The below read_register and write_registers are used to read and + * write from perf critical registers such as PCC doorbell register + */ +static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width) +{ + int ret_val = 0; + + switch (bit_width) { + case 8: + *val = readb(vaddr); + break; + case 16: + *val = readw(vaddr); + break; + case 32: + *val = readl(vaddr); + break; + case 64: + *val = readq(vaddr); + break; + default: + pr_debug("Error: Cannot read register of %u bit width", + bit_width); + ret_val = -EFAULT; + break; + } + return ret_val; +} + +static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) +{ + int ret_val = 0; + + switch (bit_width) { + case 8: + writeb(val, vaddr); + break; + case 16: + writew(val, vaddr); + break; + case 32: + writel(val, vaddr); + break; + case 64: + writeq(val, vaddr); + break; + default: + pr_debug("Error: Cannot write register of %u bit width", + bit_width); + ret_val = -EFAULT; + break; + } + return ret_val; +} + +/** + * pcc_map_interrupt - Map a PCC subspace GSI to a linux IRQ number + * @interrupt: GSI number. + * @flags: interrupt flags + * + * Returns: a valid linux IRQ number on success + * 0 or -EINVAL on failure + */ +static int pcc_map_interrupt(u32 interrupt, u32 flags) +{ + int trigger, polarity; + + if (!interrupt) + return 0; + + trigger = (flags & ACPI_PCCT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE + : ACPI_LEVEL_SENSITIVE; + + polarity = (flags & ACPI_PCCT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW + : ACPI_ACTIVE_HIGH; + + return acpi_register_gsi(NULL, interrupt, trigger, polarity); +} + +/** + * pcc_mbox_irq - PCC mailbox interrupt handler + */ +static irqreturn_t pcc_mbox_irq(int irq, void *p) +{ + struct acpi_generic_address *doorbell_ack; + struct acpi_pcct_hw_reduced *pcct_ss; + struct mbox_chan *chan = p; + u64 doorbell_ack_preserve; + u64 doorbell_ack_write; + u64 doorbell_ack_val; + int ret; + + pcct_ss = chan->con_priv; + + mbox_chan_received_data(chan, NULL); + + if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv; + u32 id = chan - pcc_mbox_channels; + + doorbell_ack = &pcct2_ss->doorbell_ack_register; + doorbell_ack_preserve = pcct2_ss->ack_preserve_mask; + doorbell_ack_write = pcct2_ss->ack_write_mask; + + ret = read_register(pcc_doorbell_ack_vaddr[id], + &doorbell_ack_val, + doorbell_ack->bit_width); + if (ret) + return IRQ_NONE; + + ret = write_register(pcc_doorbell_ack_vaddr[id], + (doorbell_ack_val & doorbell_ack_preserve) + | doorbell_ack_write, + doorbell_ack->bit_width); + if (ret) + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + /** * pcc_mbox_request_channel - PCC clients call this function to * request a pointer to their PCC subspace, from which they @@ -135,6 +267,18 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) chan->txdone_method |= TXDONE_BY_ACK; + if (pcc_doorbell_irq[subspace_id] > 0) { + int rc; + + rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id], + pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan); + if (unlikely(rc)) { + dev_err(dev, "failed to register PCC interrupt %d\n", + pcc_doorbell_irq[subspace_id]); + chan = ERR_PTR(rc); + } + } + spin_unlock_irqrestore(&chan->lock, flags); return chan; @@ -149,80 +293,30 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); */ void pcc_mbox_free_channel(struct mbox_chan *chan) { + u32 id = chan - pcc_mbox_channels; unsigned long flags; if (!chan || !chan->cl) return; + if (id >= pcc_mbox_ctrl.num_chans) { + pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n"); + return; + } + spin_lock_irqsave(&chan->lock, flags); chan->cl = NULL; chan->active_req = NULL; if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) chan->txdone_method = TXDONE_BY_POLL; + if (pcc_doorbell_irq[id] > 0) + devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); + spin_unlock_irqrestore(&chan->lock, flags); } EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); -/* - * PCC can be used with perf critical drivers such as CPPC - * So it makes sense to locally cache the virtual address and - * use it to read/write to PCC registers such as doorbell register - * - * The below read_register and write_registers are used to read and - * write from perf critical registers such as PCC doorbell register - */ -static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width) -{ - int ret_val = 0; - - switch (bit_width) { - case 8: - *val = readb(vaddr); - break; - case 16: - *val = readw(vaddr); - break; - case 32: - *val = readl(vaddr); - break; - case 64: - *val = readq(vaddr); - break; - default: - pr_debug("Error: Cannot read register of %u bit width", - bit_width); - ret_val = -EFAULT; - break; - } - return ret_val; -} - -static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) -{ - int ret_val = 0; - - switch (bit_width) { - case 8: - writeb(val, vaddr); - break; - case 16: - writew(val, vaddr); - break; - case 32: - writel(val, vaddr); - break; - case 64: - writeq(val, vaddr); - break; - default: - pr_debug("Error: Cannot write register of %u bit width", - bit_width); - ret_val = -EFAULT; - break; - } - return ret_val; -} /** * pcc_send_data - Called from Mailbox Controller code. Used @@ -296,8 +390,10 @@ static int parse_pcc_subspace(struct acpi_subtable_header *header, if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) { pcct_ss = (struct acpi_pcct_hw_reduced *) header; - if (pcct_ss->header.type != - ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) { + if ((pcct_ss->header.type != + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) + && (pcct_ss->header.type != + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2)) { pr_err("Incorrect PCC Subspace type detected\n"); return -EINVAL; } @@ -307,6 +403,43 @@ static int parse_pcc_subspace(struct acpi_subtable_header *header, } /** + * pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register + * There should be one entry per PCC client. + * @id: PCC subspace index. + * @pcct_ss: Pointer to the ACPI subtable header under the PCCT. + * + * Return: 0 for Success, else errno. + * + * This gets called for each entry in the PCC table. + */ +static int pcc_parse_subspace_irq(int id, + struct acpi_pcct_hw_reduced *pcct_ss) +{ + pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->doorbell_interrupt, + (u32)pcct_ss->flags); + if (pcc_doorbell_irq[id] <= 0) { + pr_err("PCC GSI %d not registered\n", + pcct_ss->doorbell_interrupt); + return -EINVAL; + } + + if (pcct_ss->header.type + == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss; + + pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap( + pcct2_ss->doorbell_ack_register.address, + pcct2_ss->doorbell_ack_register.bit_width / 8); + if (!pcc_doorbell_ack_vaddr[id]) { + pr_err("Failed to ioremap PCC ACK register\n"); + return -ENOMEM; + } + } + + return 0; +} + +/** * acpi_pcc_probe - Parse the ACPI tree for the PCCT. * * Return: 0 for Success, else errno. @@ -316,7 +449,9 @@ static int __init acpi_pcc_probe(void) acpi_size pcct_tbl_header_size; struct acpi_table_header *pcct_tbl; struct acpi_subtable_header *pcct_entry; - int count, i; + struct acpi_table_pcct *acpi_pcct_tbl; + int count, i, rc; + int sum = 0; acpi_status status = AE_OK; /* Search for PCCT */ @@ -333,37 +468,66 @@ static int __init acpi_pcc_probe(void) sizeof(struct acpi_table_pcct), ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE, parse_pcc_subspace, MAX_PCC_SUBSPACES); + sum += (count > 0) ? count : 0; + + count = acpi_table_parse_entries(ACPI_SIG_PCCT, + sizeof(struct acpi_table_pcct), + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2, + parse_pcc_subspace, MAX_PCC_SUBSPACES); + sum += (count > 0) ? count : 0; - if (count <= 0) { + if (sum == 0 || sum >= MAX_PCC_SUBSPACES) { pr_err("Error parsing PCC subspaces from PCCT\n"); return -EINVAL; } pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) * - count, GFP_KERNEL); - + sum, GFP_KERNEL); if (!pcc_mbox_channels) { pr_err("Could not allocate space for PCC mbox channels\n"); return -ENOMEM; } - pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL); + pcc_doorbell_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL); if (!pcc_doorbell_vaddr) { - kfree(pcc_mbox_channels); - return -ENOMEM; + rc = -ENOMEM; + goto err_free_mbox; + } + + pcc_doorbell_ack_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL); + if (!pcc_doorbell_ack_vaddr) { + rc = -ENOMEM; + goto err_free_db_vaddr; + } + + pcc_doorbell_irq = kcalloc(sum, sizeof(int), GFP_KERNEL); + if (!pcc_doorbell_irq) { + rc = -ENOMEM; + goto err_free_db_ack_vaddr; } /* Point to the first PCC subspace entry */ pcct_entry = (struct acpi_subtable_header *) ( (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct)); - for (i = 0; i < count; i++) { + acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl; + if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL) + pcc_mbox_ctrl.txdone_irq = true; + + for (i = 0; i < sum; i++) { struct acpi_generic_address *db_reg; struct acpi_pcct_hw_reduced *pcct_ss; pcc_mbox_channels[i].con_priv = pcct_entry; + pcct_ss = (struct acpi_pcct_hw_reduced *) pcct_entry; + + if (pcc_mbox_ctrl.txdone_irq) { + rc = pcc_parse_subspace_irq(i, pcct_ss); + if (rc < 0) + goto err; + } + /* If doorbell is in system memory cache the virt address */ - pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; db_reg = &pcct_ss->doorbell_register; if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, @@ -372,11 +536,21 @@ static int __init acpi_pcc_probe(void) ((unsigned long) pcct_entry + pcct_entry->length); } - pcc_mbox_ctrl.num_chans = count; + pcc_mbox_ctrl.num_chans = sum; pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans); return 0; + +err: + kfree(pcc_doorbell_irq); +err_free_db_ack_vaddr: + kfree(pcc_doorbell_ack_vaddr); +err_free_db_vaddr: + kfree(pcc_doorbell_vaddr); +err_free_mbox: + kfree(pcc_mbox_channels); + return rc; } /** diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index bd3aa4578346..c8dee47b45d9 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -984,6 +984,10 @@ static int lpc_ich_init_wdt(struct pci_dev *dev) int ret; struct resource *res; + /* If we have ACPI based watchdog use that instead */ + if (acpi_has_watchdog()) + return -ENODEV; + /* Setup power management base register */ pci_read_config_dword(dev, priv->abase, &base_addr_cfg); base_addr = base_addr_cfg & 0x0000ff80; diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c index ed5a097f0801..f63d4b0deff0 100644 --- a/drivers/of/of_numa.c +++ b/drivers/of/of_numa.c @@ -16,6 +16,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#define pr_fmt(fmt) "OF: NUMA: " fmt + #include <linux/of.h> #include <linux/of_address.h> #include <linux/nodemask.h> @@ -49,10 +51,9 @@ static void __init of_numa_parse_cpu_nodes(void) if (r) continue; - pr_debug("NUMA: CPU on %u\n", nid); + pr_debug("CPU on %u\n", nid); if (nid >= MAX_NUMNODES) - pr_warn("NUMA: Node id %u exceeds maximum value\n", - nid); + pr_warn("Node id %u exceeds maximum value\n", nid); else node_set(nid, numa_nodes_parsed); } @@ -63,13 +64,9 @@ static int __init of_numa_parse_memory_nodes(void) struct device_node *np = NULL; struct resource rsrc; u32 nid; - int r = 0; - - for (;;) { - np = of_find_node_by_type(np, "memory"); - if (!np) - break; + int i, r; + for_each_node_by_type(np, "memory") { r = of_property_read_u32(np, "numa-node-id", &nid); if (r == -EINVAL) /* @@ -78,27 +75,23 @@ static int __init of_numa_parse_memory_nodes(void) * "numa-node-id" property */ continue; - else if (r) - /* some other error */ - break; - r = of_address_to_resource(np, 0, &rsrc); - if (r) { - pr_err("NUMA: bad reg property in memory node\n"); - break; + if (nid >= MAX_NUMNODES) { + pr_warn("Node id %u exceeds maximum value\n", nid); + r = -EINVAL; } - pr_debug("NUMA: base = %llx len = %llx, node = %u\n", - rsrc.start, rsrc.end - rsrc.start + 1, nid); - + for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++) + r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1); - r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1); - if (r) - break; + if (!i || r) { + of_node_put(np); + pr_err("bad property in memory node\n"); + return r ? : -EINVAL; + } } - of_node_put(np); - return r; + return 0; } static int __init of_numa_parse_distance_map_v1(struct device_node *map) @@ -107,17 +100,17 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map) int entry_count; int i; - pr_info("NUMA: parsing numa-distance-map-v1\n"); + pr_info("parsing numa-distance-map-v1\n"); matrix = of_get_property(map, "distance-matrix", NULL); if (!matrix) { - pr_err("NUMA: No distance-matrix property in distance-map\n"); + pr_err("No distance-matrix property in distance-map\n"); return -EINVAL; } entry_count = of_property_count_u32_elems(map, "distance-matrix"); if (entry_count <= 0) { - pr_err("NUMA: Invalid distance-matrix\n"); + pr_err("Invalid distance-matrix\n"); return -EINVAL; } @@ -132,7 +125,7 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map) matrix++; numa_set_distance(nodea, nodeb, distance); - pr_debug("NUMA: distance[node%d -> node%d] = %d\n", + pr_debug("distance[node%d -> node%d] = %d\n", nodea, nodeb, distance); /* Set default distance of node B->A same as A->B */ @@ -166,8 +159,6 @@ int of_node_to_nid(struct device_node *device) np = of_node_get(device); while (np) { - struct device_node *parent; - r = of_property_read_u32(np, "numa-node-id", &nid); /* * -EINVAL indicates the property was not found, and @@ -178,22 +169,15 @@ int of_node_to_nid(struct device_node *device) if (r != -EINVAL) break; - parent = of_get_parent(np); - of_node_put(np); - np = parent; + np = of_get_next_parent(np); } if (np && r) - pr_warn("NUMA: Invalid \"numa-node-id\" property in node %s\n", + pr_warn("Invalid \"numa-node-id\" property in node %s\n", np->name); of_node_put(np); - if (!r) { - if (nid >= MAX_NUMNODES) - pr_warn("NUMA: Node id %u exceeds maximum value\n", - nid); - else - return nid; - } + if (!r) + return nid; return NUMA_NO_NODE; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index aab9d5115a5f..415956c5c593 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -480,6 +480,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev, EXPORT_SYMBOL(pci_find_parent_resource); /** + * pci_find_resource - Return matching PCI device resource + * @dev: PCI device to query + * @res: Resource to look for + * + * Goes over standard PCI resources (BARs) and checks if the given resource + * is partially or fully contained in any of them. In that case the + * matching resource is returned, %NULL otherwise. + */ +struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) +{ + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + struct resource *r = &dev->resource[i]; + + if (r->start && resource_contains(r, res)) + return r; + } + + return NULL; +} +EXPORT_SYMBOL(pci_find_resource); + +/** * pci_find_pcie_root_port - return PCIe Root Port * @dev: PCI device to query * diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f5e1008a223d..30370817bf13 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -534,6 +534,24 @@ static int armpmu_filter_match(struct perf_event *event) return cpumask_test_cpu(cpu, &armpmu->supported_cpus); } +static ssize_t armpmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); + return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); +} + +static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); + +static struct attribute *armpmu_common_attrs[] = { + &dev_attr_cpus.attr, + NULL, +}; + +static struct attribute_group armpmu_common_attr_group = { + .attrs = armpmu_common_attrs, +}; + static void armpmu_init(struct arm_pmu *armpmu) { atomic_set(&armpmu->active_events, 0); @@ -549,7 +567,10 @@ static void armpmu_init(struct arm_pmu *armpmu) .stop = armpmu_stop, .read = armpmu_read, .filter_match = armpmu_filter_match, + .attr_groups = armpmu->attr_groups, }; + armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = + &armpmu_common_attr_group; } /* Set at runtime when we know what CPU type we are. */ @@ -602,7 +623,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) irqs = min(pmu_device->num_resources, num_possible_cpus()); irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { + if (irq > 0 && irq_is_percpu(irq)) { on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &hw_events->percpu_pmu); @@ -616,7 +637,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); - if (irq >= 0) + if (irq > 0) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } @@ -638,7 +659,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) } irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { + if (irq > 0 && irq_is_percpu(irq)) { err = request_percpu_irq(irq, handler, "arm-pmu", &hw_events->percpu_pmu); if (err) { @@ -919,7 +940,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ irq = platform_get_irq(pdev, i); - if (irq >= 0) { + if (irq > 0) { bool spi = !irq_is_percpu(irq); if (i > 0 && spi != using_spi) { @@ -970,7 +991,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) if (cpumask_weight(&pmu->supported_cpus) == 0) { int irq = platform_get_irq(pdev, 0); - if (irq >= 0 && irq_is_percpu(irq)) { + if (irq > 0 && irq_is_percpu(irq)) { /* If using PPIs, check the affinity of the partition */ int ret; @@ -1029,7 +1050,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, ret = of_pmu_irq_cfg(pmu); if (!ret) ret = init_fn(pmu); - } else { + } else if (probe_table) { cpumask_setall(&pmu->supported_cpus); ret = probe_current_pmu(pmu, probe_table); } @@ -1039,6 +1060,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, goto out_free; } + ret = cpu_pmu_init(pmu); if (ret) goto out_free; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index b86e1bcaa055..a511d518206b 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -651,11 +651,15 @@ static int ipc_create_pmc_devices(void) { int ret; - ret = ipc_create_tco_device(); - if (ret) { - dev_err(ipcdev.dev, "Failed to add tco platform device\n"); - return ret; + /* If we have ACPI based watchdog use that instead */ + if (!acpi_has_watchdog()) { + ret = ipc_create_tco_device(); + if (ret) { + dev_err(ipcdev.dev, "Failed to add tco platform device\n"); + return ret; + } } + ret = ipc_create_punit_device(); if (ret) { dev_err(ipcdev.dev, "Failed to add punit platform device\n"); diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c index db9973bb53f1..fa0f19b975a6 100644 --- a/drivers/power/avs/smartreflex.c +++ b/drivers/power/avs/smartreflex.c @@ -136,7 +136,7 @@ static void sr_set_clk_length(struct omap_sr *sr) if (IS_ERR(fck)) { dev_err(&sr->pdev->dev, "%s: unable to get fck for device %s\n", - __func__, dev_name(&sr->pdev->dev)); + __func__, dev_name(&sr->pdev->dev)); return; } @@ -170,8 +170,8 @@ static void sr_start_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, - "%s: smartreflex class driver not registered\n", - __func__); + "%s: smartreflex class driver not registered\n", + __func__); return; } @@ -183,8 +183,8 @@ static void sr_stop_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, - "%s: smartreflex class driver not registered\n", - __func__); + "%s: smartreflex class driver not registered\n", + __func__); return; } @@ -225,9 +225,8 @@ static int sr_late_init(struct omap_sr *sr_info) error: list_del(&sr_info->node); - dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" - "interrupt handler. Smartreflex will" - "not function as desired\n", __func__); + dev_err(&sr_info->pdev->dev, "%s: ERROR in registering interrupt handler. Smartreflex will not function as desired\n", + __func__); return ret; } @@ -263,7 +262,7 @@ static void sr_v1_disable(struct omap_sr *sr) if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", - __func__); + __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, @@ -308,7 +307,7 @@ static void sr_v2_disable(struct omap_sr *sr) if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", - __func__); + __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT); @@ -322,7 +321,7 @@ static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row( if (!sr->nvalue_table) { dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n", - __func__); + __func__); return NULL; } @@ -356,8 +355,8 @@ int sr_configure_errgen(struct omap_sr *sr) u8 senp_shift, senn_shift; if (!sr) { - pr_warn("%s: NULL omap_sr from %pF\n", __func__, - (void *)_RET_IP_); + pr_warn("%s: NULL omap_sr from %pF\n", + __func__, (void *)_RET_IP_); return -EINVAL; } @@ -387,8 +386,8 @@ int sr_configure_errgen(struct omap_sr *sr) vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: - dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" - "module without specifying the ip\n", __func__); + dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n", + __func__); return -EINVAL; } @@ -423,8 +422,8 @@ int sr_disable_errgen(struct omap_sr *sr) u32 vpboundint_en, vpboundint_st; if (!sr) { - pr_warn("%s: NULL omap_sr from %pF\n", __func__, - (void *)_RET_IP_); + pr_warn("%s: NULL omap_sr from %pF\n", + __func__, (void *)_RET_IP_); return -EINVAL; } @@ -440,8 +439,8 @@ int sr_disable_errgen(struct omap_sr *sr) vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: - dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" - "module without specifying the ip\n", __func__); + dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n", + __func__); return -EINVAL; } @@ -478,8 +477,8 @@ int sr_configure_minmax(struct omap_sr *sr) u8 senp_shift, senn_shift; if (!sr) { - pr_warn("%s: NULL omap_sr from %pF\n", __func__, - (void *)_RET_IP_); + pr_warn("%s: NULL omap_sr from %pF\n", + __func__, (void *)_RET_IP_); return -EINVAL; } @@ -504,8 +503,8 @@ int sr_configure_minmax(struct omap_sr *sr) senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; break; default: - dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" - "module without specifying the ip\n", __func__); + dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n", + __func__); return -EINVAL; } @@ -537,8 +536,8 @@ int sr_configure_minmax(struct omap_sr *sr) IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT); break; default: - dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" - "module without specifying the ip\n", __func__); + dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n", + __func__); return -EINVAL; } @@ -563,16 +562,16 @@ int sr_enable(struct omap_sr *sr, unsigned long volt) int ret; if (!sr) { - pr_warn("%s: NULL omap_sr from %pF\n", __func__, - (void *)_RET_IP_); + pr_warn("%s: NULL omap_sr from %pF\n", + __func__, (void *)_RET_IP_); return -EINVAL; } volt_data = omap_voltage_get_voltdata(sr->voltdm, volt); if (IS_ERR(volt_data)) { - dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table" - "for nominal voltage %ld\n", __func__, volt); + dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table for nominal voltage %ld\n", + __func__, volt); return PTR_ERR(volt_data); } @@ -615,8 +614,8 @@ int sr_enable(struct omap_sr *sr, unsigned long volt) void sr_disable(struct omap_sr *sr) { if (!sr) { - pr_warn("%s: NULL omap_sr from %pF\n", __func__, - (void *)_RET_IP_); + pr_warn("%s: NULL omap_sr from %pF\n", + __func__, (void *)_RET_IP_); return; } @@ -658,13 +657,13 @@ int sr_register_class(struct omap_sr_class_data *class_data) struct omap_sr *sr_info; if (!class_data) { - pr_warning("%s:, Smartreflex class data passed is NULL\n", + pr_warn("%s:, Smartreflex class data passed is NULL\n", __func__); return -EINVAL; } if (sr_class) { - pr_warning("%s: Smartreflex class driver already registered\n", + pr_warn("%s: Smartreflex class driver already registered\n", __func__); return -EBUSY; } @@ -696,7 +695,7 @@ void omap_sr_enable(struct voltagedomain *voltdm) struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { - pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); + pr_warn("%s: omap_sr struct for voltdm not found\n", __func__); return; } @@ -704,8 +703,8 @@ void omap_sr_enable(struct voltagedomain *voltdm) return; if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { - dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" - "registered\n", __func__); + dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", + __func__); return; } @@ -728,7 +727,7 @@ void omap_sr_disable(struct voltagedomain *voltdm) struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { - pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); + pr_warn("%s: omap_sr struct for voltdm not found\n", __func__); return; } @@ -736,8 +735,8 @@ void omap_sr_disable(struct voltagedomain *voltdm) return; if (!sr_class || !(sr_class->disable)) { - dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" - "registered\n", __func__); + dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", + __func__); return; } @@ -760,7 +759,7 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { - pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); + pr_warn("%s: omap_sr struct for voltdm not found\n", __func__); return; } @@ -768,8 +767,8 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) return; if (!sr_class || !(sr_class->disable)) { - dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" - "registered\n", __func__); + dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", + __func__); return; } @@ -787,8 +786,8 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data) { if (!pmic_data) { - pr_warning("%s: Trying to register NULL PMIC data structure" - "with smartreflex\n", __func__); + pr_warn("%s: Trying to register NULL PMIC data structure with smartreflex\n", + __func__); return; } @@ -801,7 +800,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val) struct omap_sr *sr_info = data; if (!sr_info) { - pr_warning("%s: omap_sr struct not found\n", __func__); + pr_warn("%s: omap_sr struct not found\n", __func__); return -EINVAL; } @@ -815,13 +814,13 @@ static int omap_sr_autocomp_store(void *data, u64 val) struct omap_sr *sr_info = data; if (!sr_info) { - pr_warning("%s: omap_sr struct not found\n", __func__); + pr_warn("%s: omap_sr struct not found\n", __func__); return -EINVAL; } /* Sanity check */ if (val > 1) { - pr_warning("%s: Invalid argument %lld\n", __func__, val); + pr_warn("%s: Invalid argument %lld\n", __func__, val); return -EINVAL; } @@ -848,19 +847,13 @@ static int __init omap_sr_probe(struct platform_device *pdev) int i, ret = 0; sr_info = devm_kzalloc(&pdev->dev, sizeof(struct omap_sr), GFP_KERNEL); - if (!sr_info) { - dev_err(&pdev->dev, "%s: unable to allocate sr_info\n", - __func__); + if (!sr_info) return -ENOMEM; - } sr_info->name = devm_kzalloc(&pdev->dev, SMARTREFLEX_NAME_LEN, GFP_KERNEL); - if (!sr_info->name) { - dev_err(&pdev->dev, "%s: unable to allocate SR instance name\n", - __func__); + if (!sr_info->name) return -ENOMEM; - } platform_set_drvdata(pdev, sr_info); @@ -912,7 +905,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) if (sr_class) { ret = sr_late_init(sr_info); if (ret) { - pr_warning("%s: Error in SR late init\n", __func__); + pr_warn("%s: Error in SR late init\n", __func__); goto err_list_del; } } @@ -923,7 +916,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) if (IS_ERR_OR_NULL(sr_dbg_dir)) { ret = PTR_ERR(sr_dbg_dir); pr_err("%s:sr debugfs dir creation failed(%d)\n", - __func__, ret); + __func__, ret); goto err_list_del; } } @@ -945,8 +938,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir); if (IS_ERR_OR_NULL(nvalue_dir)) { - dev_err(&pdev->dev, "%s: Unable to create debugfs directory" - "for n-values\n", __func__); + dev_err(&pdev->dev, "%s: Unable to create debugfs directory for n-values\n", + __func__); ret = PTR_ERR(nvalue_dir); goto err_debugfs; } @@ -1053,12 +1046,12 @@ static int __init sr_init(void) if (sr_pmic_data && sr_pmic_data->sr_pmic_init) sr_pmic_data->sr_pmic_init(); else - pr_warning("%s: No PMIC hook to init smartreflex\n", __func__); + pr_warn("%s: No PMIC hook to init smartreflex\n", __func__); ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe); if (ret) { pr_err("%s: platform driver register failed for SR\n", - __func__); + __func__); return ret; } diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c index 4822346aadc6..7112004b8032 100644 --- a/drivers/soc/samsung/pm_domains.c +++ b/drivers/soc/samsung/pm_domains.c @@ -215,29 +215,22 @@ no_clk: /* Assign the child power domains to their parents */ for_each_matching_node(np, exynos_pm_domain_of_match) { - struct generic_pm_domain *child_domain, *parent_domain; - struct of_phandle_args args; + struct of_phandle_args child, parent; - args.np = np; - args.args_count = 0; - child_domain = of_genpd_get_from_provider(&args); - if (IS_ERR(child_domain)) - continue; + child.np = np; + child.args_count = 0; if (of_parse_phandle_with_args(np, "power-domains", - "#power-domain-cells", 0, &args) != 0) - continue; - - parent_domain = of_genpd_get_from_provider(&args); - if (IS_ERR(parent_domain)) + "#power-domain-cells", 0, + &parent) != 0) continue; - if (pm_genpd_add_subdomain(parent_domain, child_domain)) + if (of_genpd_add_subdomain(&parent, &child)) pr_warn("%s failed to add subdomain: %s\n", - parent_domain->name, child_domain->name); + parent.np->name, child.np->name); else pr_info("%s has as child subdomain: %s.\n", - parent_domain->name, child_domain->name); + parent.np->name, child.np->name); } return 0; diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c index 45807d8287d1..86dc41101610 100644 --- a/drivers/staging/board/board.c +++ b/drivers/staging/board/board.c @@ -140,7 +140,6 @@ static int board_staging_add_dev_domain(struct platform_device *pdev, const char *domain) { struct of_phandle_args pd_args; - struct generic_pm_domain *pd; struct device_node *np; np = of_find_node_by_path(domain); @@ -151,14 +150,8 @@ static int board_staging_add_dev_domain(struct platform_device *pdev, pd_args.np = np; pd_args.args_count = 0; - pd = of_genpd_get_from_provider(&pd_args); - if (IS_ERR(pd)) { - pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd)); - return PTR_ERR(pd); - } - pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name); - return pm_genpd_add_device(pd, &pdev->dev); + return of_genpd_add_device(&pd_args, &pdev->dev); } #else static inline int board_staging_add_dev_domain(struct platform_device *pdev, diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index e19969614203..5c0c123565ad 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -298,12 +298,17 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) p->serial_out = dw8250_serial_out32be; } } else if (has_acpi_companion(p->dev)) { - p->iotype = UPIO_MEM32; - p->regshift = 2; - p->serial_in = dw8250_serial_in32; + const struct acpi_device_id *id; + + id = acpi_match_device(p->dev->driver->acpi_match_table, + p->dev); + if (id && !strcmp(id->id, "APMC0D08")) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = dw8250_serial_in32; + data->uart_16550_compatible = true; + } p->set_termios = dw8250_set_termios; - /* So far none of there implement the Busy Functionality */ - data->uart_16550_compatible = true; } /* Platforms with iDMA */ diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1bffe006ca9a..50dbaa805658 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -152,6 +152,19 @@ config TANGOX_WATCHDOG This driver can be built as a module. The module name is tangox_wdt. +config WDAT_WDT + tristate "ACPI Watchdog Action Table (WDAT)" + depends on ACPI + select ACPI_WATCHDOG + help + This driver adds support for systems with ACPI Watchdog Action + Table (WDAT) table. Servers typically have this but it can be + found on some desktop machines as well. This driver will take + over the native iTCO watchdog driver found on many Intel CPUs. + + To compile this driver as module, choose M here: the module will + be called wdat_wdt. + config WM831X_WATCHDOG tristate "WM831x watchdog" depends on MFD_WM831X diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index c22ad3ea3539..cba00430151b 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -202,6 +202,7 @@ obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o obj-$(CONFIG_TANGOX_WATCHDOG) += tangox_wdt.o +obj-$(CONFIG_WDAT_WDT) += wdat_wdt.o obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c new file mode 100644 index 000000000000..e473e3b23720 --- /dev/null +++ b/drivers/watchdog/wdat_wdt.c @@ -0,0 +1,526 @@ +/* + * ACPI Hardware Watchdog (WDAT) driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/watchdog.h> + +#define MAX_WDAT_ACTIONS ACPI_WDAT_ACTION_RESERVED + +/** + * struct wdat_instruction - Single ACPI WDAT instruction + * @entry: Copy of the ACPI table instruction + * @reg: Register the instruction is accessing + * @node: Next instruction in action sequence + */ +struct wdat_instruction { + struct acpi_wdat_entry entry; + void __iomem *reg; + struct list_head node; +}; + +/** + * struct wdat_wdt - ACPI WDAT watchdog device + * @pdev: Parent platform device + * @wdd: Watchdog core device + * @period: How long is one watchdog period in ms + * @stopped_in_sleep: Is this watchdog stopped by the firmware in S1-S5 + * @stopped: Was the watchdog stopped by the driver in suspend + * @actions: An array of instruction lists indexed by an action number from + * the WDAT table. There can be %NULL entries for not implemented + * actions. + */ +struct wdat_wdt { + struct platform_device *pdev; + struct watchdog_device wdd; + unsigned int period; + bool stopped_in_sleep; + bool stopped; + struct list_head *instructions[MAX_WDAT_ACTIONS]; +}; + +#define to_wdat_wdt(wdd) container_of(wdd, struct wdat_wdt, wdd) + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int wdat_wdt_read(struct wdat_wdt *wdat, + const struct wdat_instruction *instr, u32 *value) +{ + const struct acpi_generic_address *gas = &instr->entry.register_region; + + switch (gas->access_width) { + case 1: + *value = ioread8(instr->reg); + break; + case 2: + *value = ioread16(instr->reg); + break; + case 3: + *value = ioread32(instr->reg); + break; + default: + return -EINVAL; + } + + dev_dbg(&wdat->pdev->dev, "Read %#x from 0x%08llx\n", *value, + gas->address); + + return 0; +} + +static int wdat_wdt_write(struct wdat_wdt *wdat, + const struct wdat_instruction *instr, u32 value) +{ + const struct acpi_generic_address *gas = &instr->entry.register_region; + + switch (gas->access_width) { + case 1: + iowrite8((u8)value, instr->reg); + break; + case 2: + iowrite16((u16)value, instr->reg); + break; + case 3: + iowrite32(value, instr->reg); + break; + default: + return -EINVAL; + } + + dev_dbg(&wdat->pdev->dev, "Wrote %#x to 0x%08llx\n", value, + gas->address); + + return 0; +} + +static int wdat_wdt_run_action(struct wdat_wdt *wdat, unsigned int action, + u32 param, u32 *retval) +{ + struct wdat_instruction *instr; + + if (action >= ARRAY_SIZE(wdat->instructions)) + return -EINVAL; + + if (!wdat->instructions[action]) + return -EOPNOTSUPP; + + dev_dbg(&wdat->pdev->dev, "Running action %#x\n", action); + + /* Run each instruction sequentially */ + list_for_each_entry(instr, wdat->instructions[action], node) { + const struct acpi_wdat_entry *entry = &instr->entry; + const struct acpi_generic_address *gas; + u32 flags, value, mask, x, y; + bool preserve; + int ret; + + gas = &entry->register_region; + + preserve = entry->instruction & ACPI_WDAT_PRESERVE_REGISTER; + flags = entry->instruction & ~ACPI_WDAT_PRESERVE_REGISTER; + value = entry->value; + mask = entry->mask; + + switch (flags) { + case ACPI_WDAT_READ_VALUE: + ret = wdat_wdt_read(wdat, instr, &x); + if (ret) + return ret; + x >>= gas->bit_offset; + x &= mask; + if (retval) + *retval = x == value; + break; + + case ACPI_WDAT_READ_COUNTDOWN: + ret = wdat_wdt_read(wdat, instr, &x); + if (ret) + return ret; + x >>= gas->bit_offset; + x &= mask; + if (retval) + *retval = x; + break; + + case ACPI_WDAT_WRITE_VALUE: + x = value & mask; + x <<= gas->bit_offset; + if (preserve) { + ret = wdat_wdt_read(wdat, instr, &y); + if (ret) + return ret; + y = y & ~(mask << gas->bit_offset); + x |= y; + } + ret = wdat_wdt_write(wdat, instr, x); + if (ret) + return ret; + break; + + case ACPI_WDAT_WRITE_COUNTDOWN: + x = param; + x &= mask; + x <<= gas->bit_offset; + if (preserve) { + ret = wdat_wdt_read(wdat, instr, &y); + if (ret) + return ret; + y = y & ~(mask << gas->bit_offset); + x |= y; + } + ret = wdat_wdt_write(wdat, instr, x); + if (ret) + return ret; + break; + + default: + dev_err(&wdat->pdev->dev, "Unknown instruction: %u\n", + flags); + return -EINVAL; + } + } + + return 0; +} + +static int wdat_wdt_enable_reboot(struct wdat_wdt *wdat) +{ + int ret; + + /* + * WDAT specification says that the watchdog is required to reboot + * the system when it fires. However, it also states that it is + * recommeded to make it configurable through hardware register. We + * enable reboot now if it is configrable, just in case. + */ + ret = wdat_wdt_run_action(wdat, ACPI_WDAT_SET_REBOOT, 0, NULL); + if (ret && ret != -EOPNOTSUPP) { + dev_err(&wdat->pdev->dev, + "Failed to enable reboot when watchdog triggers\n"); + return ret; + } + + return 0; +} + +static void wdat_wdt_boot_status(struct wdat_wdt *wdat) +{ + u32 boot_status = 0; + int ret; + + ret = wdat_wdt_run_action(wdat, ACPI_WDAT_GET_STATUS, 0, &boot_status); + if (ret && ret != -EOPNOTSUPP) { + dev_err(&wdat->pdev->dev, "Failed to read boot status\n"); + return; + } + + if (boot_status) + wdat->wdd.bootstatus = WDIOF_CARDRESET; + + /* Clear the boot status in case BIOS did not do it */ + ret = wdat_wdt_run_action(wdat, ACPI_WDAT_SET_STATUS, 0, NULL); + if (ret && ret != -EOPNOTSUPP) + dev_err(&wdat->pdev->dev, "Failed to clear boot status\n"); +} + +static void wdat_wdt_set_running(struct wdat_wdt *wdat) +{ + u32 running = 0; + int ret; + + ret = wdat_wdt_run_action(wdat, ACPI_WDAT_GET_RUNNING_STATE, 0, + &running); + if (ret && ret != -EOPNOTSUPP) + dev_err(&wdat->pdev->dev, "Failed to read running state\n"); + + if (running) + set_bit(WDOG_HW_RUNNING, &wdat->wdd.status); +} + +static int wdat_wdt_start(struct watchdog_device *wdd) +{ + return wdat_wdt_run_action(to_wdat_wdt(wdd), + ACPI_WDAT_SET_RUNNING_STATE, 0, NULL); +} + +static int wdat_wdt_stop(struct watchdog_device *wdd) +{ + return wdat_wdt_run_action(to_wdat_wdt(wdd), + ACPI_WDAT_SET_STOPPED_STATE, 0, NULL); +} + +static int wdat_wdt_ping(struct watchdog_device *wdd) +{ + return wdat_wdt_run_action(to_wdat_wdt(wdd), ACPI_WDAT_RESET, 0, NULL); +} + +static int wdat_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct wdat_wdt *wdat = to_wdat_wdt(wdd); + unsigned int periods; + int ret; + + periods = timeout * 1000 / wdat->period; + ret = wdat_wdt_run_action(wdat, ACPI_WDAT_SET_COUNTDOWN, periods, NULL); + if (!ret) + wdd->timeout = timeout; + return ret; +} + +static unsigned int wdat_wdt_get_timeleft(struct watchdog_device *wdd) +{ + struct wdat_wdt *wdat = to_wdat_wdt(wdd); + u32 periods = 0; + + wdat_wdt_run_action(wdat, ACPI_WDAT_GET_COUNTDOWN, 0, &periods); + return periods * wdat->period / 1000; +} + +static const struct watchdog_info wdat_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .firmware_version = 0, + .identity = "wdat_wdt", +}; + +static const struct watchdog_ops wdat_wdt_ops = { + .owner = THIS_MODULE, + .start = wdat_wdt_start, + .stop = wdat_wdt_stop, + .ping = wdat_wdt_ping, + .set_timeout = wdat_wdt_set_timeout, + .get_timeleft = wdat_wdt_get_timeleft, +}; + +static int wdat_wdt_probe(struct platform_device *pdev) +{ + const struct acpi_wdat_entry *entries; + const struct acpi_table_wdat *tbl; + struct wdat_wdt *wdat; + struct resource *res; + void __iomem **regs; + acpi_status status; + int i, ret; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, + (struct acpi_table_header **)&tbl); + if (ACPI_FAILURE(status)) + return -ENODEV; + + wdat = devm_kzalloc(&pdev->dev, sizeof(*wdat), GFP_KERNEL); + if (!wdat) + return -ENOMEM; + + regs = devm_kcalloc(&pdev->dev, pdev->num_resources, sizeof(*regs), + GFP_KERNEL); + if (!regs) + return -ENOMEM; + + /* WDAT specification wants to have >= 1ms period */ + if (tbl->timer_period < 1) + return -EINVAL; + if (tbl->min_count > tbl->max_count) + return -EINVAL; + + wdat->period = tbl->timer_period; + wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count; + wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count; + wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED; + wdat->wdd.info = &wdat_wdt_info; + wdat->wdd.ops = &wdat_wdt_ops; + wdat->pdev = pdev; + + /* Request and map all resources */ + for (i = 0; i < pdev->num_resources; i++) { + void __iomem *reg; + + res = &pdev->resource[i]; + if (resource_type(res) == IORESOURCE_MEM) { + reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); + } else if (resource_type(res) == IORESOURCE_IO) { + reg = devm_ioport_map(&pdev->dev, res->start, 1); + if (!reg) + return -ENOMEM; + } else { + dev_err(&pdev->dev, "Unsupported resource\n"); + return -EINVAL; + } + + regs[i] = reg; + } + + entries = (struct acpi_wdat_entry *)(tbl + 1); + for (i = 0; i < tbl->entries; i++) { + const struct acpi_generic_address *gas; + struct wdat_instruction *instr; + struct list_head *instructions; + unsigned int action; + struct resource r; + int j; + + action = entries[i].action; + if (action >= MAX_WDAT_ACTIONS) { + dev_dbg(&pdev->dev, "Skipping unknown action: %u\n", + action); + continue; + } + + instr = devm_kzalloc(&pdev->dev, sizeof(*instr), GFP_KERNEL); + if (!instr) + return -ENOMEM; + + INIT_LIST_HEAD(&instr->node); + instr->entry = entries[i]; + + gas = &entries[i].register_region; + + memset(&r, 0, sizeof(r)); + r.start = gas->address; + r.end = r.start + gas->access_width; + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + r.flags = IORESOURCE_MEM; + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + r.flags = IORESOURCE_IO; + } else { + dev_dbg(&pdev->dev, "Unsupported address space: %d\n", + gas->space_id); + continue; + } + + /* Find the matching resource */ + for (j = 0; j < pdev->num_resources; j++) { + res = &pdev->resource[j]; + if (resource_contains(res, &r)) { + instr->reg = regs[j] + r.start - res->start; + break; + } + } + + if (!instr->reg) { + dev_err(&pdev->dev, "I/O resource not found\n"); + return -EINVAL; + } + + instructions = wdat->instructions[action]; + if (!instructions) { + instructions = devm_kzalloc(&pdev->dev, + sizeof(*instructions), GFP_KERNEL); + if (!instructions) + return -ENOMEM; + + INIT_LIST_HEAD(instructions); + wdat->instructions[action] = instructions; + } + + list_add_tail(&instr->node, instructions); + } + + wdat_wdt_boot_status(wdat); + wdat_wdt_set_running(wdat); + + ret = wdat_wdt_enable_reboot(wdat); + if (ret) + return ret; + + platform_set_drvdata(pdev, wdat); + + watchdog_set_nowayout(&wdat->wdd, nowayout); + return devm_watchdog_register_device(&pdev->dev, &wdat->wdd); +} + +#ifdef CONFIG_PM_SLEEP +static int wdat_wdt_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct wdat_wdt *wdat = platform_get_drvdata(pdev); + int ret; + + if (!watchdog_active(&wdat->wdd)) + return 0; + + /* + * We need to stop the watchdog if firmare is not doing it or if we + * are going suspend to idle (where firmware is not involved). If + * firmware is stopping the watchdog we kick it here one more time + * to give it some time. + */ + wdat->stopped = false; + if (acpi_target_system_state() == ACPI_STATE_S0 || + !wdat->stopped_in_sleep) { + ret = wdat_wdt_stop(&wdat->wdd); + if (!ret) + wdat->stopped = true; + } else { + ret = wdat_wdt_ping(&wdat->wdd); + } + + return ret; +} + +static int wdat_wdt_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct wdat_wdt *wdat = platform_get_drvdata(pdev); + int ret; + + if (!watchdog_active(&wdat->wdd)) + return 0; + + if (!wdat->stopped) { + /* + * Looks like the boot firmware reinitializes the watchdog + * before it hands off to the OS on resume from sleep so we + * stop and reprogram the watchdog here. + */ + ret = wdat_wdt_stop(&wdat->wdd); + if (ret) + return ret; + + ret = wdat_wdt_set_timeout(&wdat->wdd, wdat->wdd.timeout); + if (ret) + return ret; + + ret = wdat_wdt_enable_reboot(wdat); + if (ret) + return ret; + } + + return wdat_wdt_start(&wdat->wdd); +} +#endif + +static const struct dev_pm_ops wdat_wdt_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(wdat_wdt_suspend_noirq, + wdat_wdt_resume_noirq) +}; + +static struct platform_driver wdat_wdt_driver = { + .probe = wdat_wdt_probe, + .driver = { + .name = "wdat_wdt", + .pm = &wdat_wdt_pm_ops, + }, +}; + +module_platform_driver(wdat_wdt_driver); + +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_DESCRIPTION("ACPI Hardware Watchdog (WDAT) driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:wdat_wdt"); diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index fe2e3ac7b5e3..12c2882bf647 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -144,6 +144,10 @@ #define ACPI_ADDRESS_RANGE_MAX 2 +/* Maximum number of While() loops before abort */ + +#define ACPI_MAX_LOOP_COUNT 0xFFFF + /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 34f601e7b88d..48eb4dd99bb1 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -366,7 +366,7 @@ ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value) #define ACPI_FUNCTION_TRACE_STR(name, string) \ - ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, char *, string) + ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, const char *, string) #define ACPI_FUNCTION_ENTRY() \ acpi_ut_track_stack_ptr() @@ -425,6 +425,9 @@ #define return_PTR(pointer) \ ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer) +#define return_STR(string) \ + ACPI_TRACE_EXIT (acpi_ut_str_exit, const char *, string) + #define return_VALUE(value) \ ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value) @@ -478,6 +481,7 @@ #define return_VOID return #define return_ACPI_STATUS(s) return(s) #define return_PTR(s) return(s) +#define return_STR(s) return(s) #define return_VALUE(s) return(s) #define return_UINT8(s) return(s) #define return_UINT32(s) return(s) diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 562603d7aabe..f3414c83abb1 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -371,6 +371,12 @@ acpi_status acpi_os_wait_command_ready(void); acpi_status acpi_os_notify_command_complete(void); #endif +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point +void +acpi_os_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname); +#endif + /* * Obtain ACPI table(s) */ @@ -416,41 +422,4 @@ char *acpi_os_get_next_filename(void *dir_handle); void acpi_os_close_directory(void *dir_handle); #endif -/* - * File I/O and related support - */ -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_file -ACPI_FILE acpi_os_open_file(const char *path, u8 modes); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_file -void acpi_os_close_file(ACPI_FILE file); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_file -int -acpi_os_read_file(ACPI_FILE file, - void *buffer, acpi_size size, acpi_size count); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_file -int -acpi_os_write_file(ACPI_FILE file, - void *buffer, acpi_size size, acpi_size count); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_file_offset -long acpi_os_get_file_offset(ACPI_FILE file); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_set_file_offset -acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point -void -acpi_os_trace_point(acpi_trace_event_type type, - u8 begin, u8 *aml, char *pathname); -#endif - #endif /* __ACPIOSXF_H__ */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 1ff3a76c265d..c7b3a132dbe7 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20160422 +#define ACPI_CA_VERSION 0x20160831 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -195,6 +195,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE); /* + * Optionally support module level code by parsing the entire table as + * a term_list. Default is FALSE, do not execute entire table until some + * lock order issues are fixed. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_parse_table_as_term_list, FALSE); + +/* * Optionally use 32-bit FADT addresses if and when there is a conflict * (address mismatch) between the 32-bit and 64-bit versions of the * address. Although ACPICA adheres to the ACPI specification which @@ -416,18 +423,19 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); /* * Initialization */ -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION acpi_initialize_tables(struct acpi_table_desc *initial_storage, u32 initial_table_count, u8 allow_resize)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_initialize_subsystem(void)) - -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_enable_subsystem(u32 flags)) - -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init - acpi_initialize_objects(u32 flags)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_terminate(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_subsystem(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_enable_subsystem(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_objects(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_terminate(void)) /* * Miscellaneous global interfaces @@ -467,7 +475,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status /* * ACPI table load/unload interfaces */ -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION acpi_install_table(acpi_physical_address address, u8 physical)) @@ -476,14 +484,17 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_parent_table(acpi_handle object)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_load_tables(void)) /* * ACPI table manipulation interfaces */ -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_reallocate_root_table(void)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION acpi_find_root_pointer(acpi_physical_address *rsdp_address)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status @@ -732,6 +743,10 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status u32 gpe_number)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_mask_gpe(acpi_handle gpe_device, + u32 gpe_number, u8 is_masked)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_mark_gpe_for_wake(acpi_handle gpe_device, u32 gpe_number)) @@ -935,9 +950,6 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(void acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)) -ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) - void ACPI_INTERNAL_VAR_XFACE - acpi_log_error(const char *format, ...)) acpi_status acpi_initialize_debugger(void); diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index c19700e2a2fe..1b949e08015c 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -230,62 +230,72 @@ struct acpi_table_facs { /* Fields common to all versions of the FADT */ struct acpi_table_fadt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 facs; /* 32-bit physical address of FACS */ - u32 dsdt; /* 32-bit physical address of DSDT */ - u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ - u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ - u16 sci_interrupt; /* System vector of SCI interrupt */ - u32 smi_command; /* 32-bit Port address of SMI command port */ - u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ - u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ - u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ - u8 pstate_control; /* Processor performance state control */ - u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ - u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ - u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ - u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ - u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ - u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ - u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ - u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ - u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ - u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ - u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ - u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ - u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ - u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ - u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ - u8 cst_control; /* Support for the _CST object and C-States change notification */ - u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ - u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ - u16 flush_size; /* Processor memory cache line width, in bytes */ - u16 flush_stride; /* Number of flush strides that need to be read */ - u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ - u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ - u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ - u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ - u8 century; /* Index to century in RTC CMOS RAM */ - u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ - u8 reserved; /* Reserved, must be zero */ - u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ - struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ - u8 reset_value; /* Value to write to the reset_register port to reset the system */ - u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ - u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ - u64 Xfacs; /* 64-bit physical address of FACS */ - u64 Xdsdt; /* 64-bit physical address of DSDT */ - struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ - struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ - struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ - struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ - struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ - struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ - struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ - struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ - struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ - struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ - u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ + struct acpi_table_header header; /* [V1] Common ACPI table header */ + u32 facs; /* [V1] 32-bit physical address of FACS */ + u32 dsdt; /* [V1] 32-bit physical address of DSDT */ + u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ + u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ + u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ + u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ + u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ + u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ + u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ + u8 pstate_control; /* [V1] Processor performance state control */ + u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ + u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ + u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ + u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ + u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ + u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ + u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ + u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ + u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ + u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ + u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ + u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ + u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ + u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ + u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ + u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ + u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ + u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ + u16 flush_stride; /* [V1] Number of flush strides that need to be read */ + u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ + u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ + u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ + u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* [V1] Index to century in RTC CMOS RAM */ + u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ + u8 reserved; /* [V1] Reserved, must be zero */ + u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ + /* End of Version 1 FADT fields (ACPI 1.0) */ + + struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ + u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ + u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ + u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ + u64 Xfacs; /* [V3] 64-bit physical address of FACS */ + u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ + struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ + struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ + struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ + struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ + struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ + struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ + struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ + struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ + /* End of Version 3 FADT fields (ACPI 2.0) */ + + struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */ + /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */ + + struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */ + /* End of Version 5 FADT fields (ACPI 5.0) */ + + u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */ + /* End of Version 6 FADT fields (ACPI 6.0) */ + }; /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ @@ -301,8 +311,8 @@ struct acpi_table_fadt { /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ -#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ -#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ +#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ +#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ /* Masks for FADT flags */ @@ -399,20 +409,34 @@ struct acpi_table_desc { * match the expected length. In other words, the length of the * FADT is the bottom line as to what the version really is. * - * For reference, the values below are as follows: - * FADT V1 size: 0x074 - * FADT V2 size: 0x084 - * FADT V3 size: 0x0F4 - * FADT V4 size: 0x0F4 - * FADT V5 size: 0x10C - * FADT V6 size: 0x114 + * NOTE: There is no officialy released V2 of the FADT. This + * version was used only for prototyping and testing during the + * 32-bit to 64-bit transition. V3 was the first official 64-bit + * version of the FADT. + * + * Update this list of defines when a new version of the FADT is + * added to the ACPI specification. Note that the FADT version is + * only incremented when new fields are appended to the existing + * version. Therefore, the FADT version is competely independent + * from the version of the ACPI specification where it is + * defined. + * + * For reference, the various FADT lengths are as follows: + * FADT V1 size: 0x074 ACPI 1.0 + * FADT V3 size: 0x0F4 ACPI 2.0 + * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0 + * FADT V5 size: 0x10C ACPI 5.0 + * FADT V6 size: 0x114 ACPI 6.0 */ -#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) -#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) -#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) -#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) -#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) +#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ +#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ +#define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ +#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ +#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ + +/* Update these when new FADT versions are added */ +#define ACPI_FADT_MAX_VERSION 6 #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" #endif /* __ACTBL_H__ */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index cb389efd321c..1d798abae710 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -732,16 +732,17 @@ typedef u32 acpi_event_type; * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). - * +-------------+-+-+-+-+-+ - * | Bits 31:5 |4|3|2|1|0| - * +-------------+-+-+-+-+-+ - * | | | | | | - * | | | | | +- Enabled? - * | | | | +--- Enabled for wake? - * | | | +----- Status bit set? - * | | +------- Enable bit set? - * | +--------- Has a handler? - * +--------------- <Reserved> + * +-------------+-+-+-+-+-+-+ + * | Bits 31:6 |5|4|3|2|1|0| + * +-------------+-+-+-+-+-+-+ + * | | | | | | | + * | | | | | | +- Enabled? + * | | | | | +--- Enabled for wake? + * | | | | +----- Status bit set? + * | | | +------- Enable bit set? + * | | +--------- Has a handler? + * | +----------- Masked? + * +----------------- <Reserved> */ typedef u32 acpi_event_status; @@ -751,6 +752,7 @@ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04 #define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08 #define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10 +#define ACPI_EVENT_FLAG_MASKED (acpi_event_status) 0x20 #define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET /* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ @@ -761,14 +763,15 @@ typedef u32 acpi_event_status; /* * GPE info flags - Per GPE - * +-------+-+-+---+ - * | 7:5 |4|3|2:0| - * +-------+-+-+---+ - * | | | | - * | | | +-- Type of dispatch:to method, handler, notify, or none - * | | +----- Interrupt type: edge or level triggered - * | +------- Is a Wake GPE - * +------------ <Reserved> + * +---+-+-+-+---+ + * |7:6|5|4|3|2:0| + * +---+-+-+-+---+ + * | | | | | + * | | | | +-- Type of dispatch:to method, handler, notify, or none + * | | | +----- Interrupt type: edge or level triggered + * | | +------- Is a Wake GPE + * | +--------- Is GPE masked by the software GPE masking machanism + * +------------ <Reserved> */ #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 @@ -1031,12 +1034,6 @@ struct acpi_statistics { u32 method_count; }; -/* Table Event Types */ - -#define ACPI_TABLE_EVENT_LOAD 0x0 -#define ACPI_TABLE_EVENT_UNLOAD 0x1 -#define ACPI_NUM_TABLE_EVENTS 2 - /* * Types specific to the OS service interfaces */ @@ -1088,9 +1085,13 @@ acpi_status (*acpi_exception_handler) (acpi_status aml_status, typedef acpi_status (*acpi_table_handler) (u32 event, void *table, void *context); -#define ACPI_TABLE_LOAD 0x0 -#define ACPI_TABLE_UNLOAD 0x1 -#define ACPI_NUM_TABLE_EVENTS 2 +/* Table Event Types */ + +#define ACPI_TABLE_EVENT_LOAD 0x0 +#define ACPI_TABLE_EVENT_UNLOAD 0x1 +#define ACPI_TABLE_EVENT_INSTALL 0x2 +#define ACPI_TABLE_EVENT_UNINSTALL 0x3 +#define ACPI_NUM_TABLE_EVENTS 4 /* Address Spaces (For Operation Regions) */ @@ -1285,15 +1286,6 @@ typedef enum { #define ACPI_OSI_WIN_8 0x0C #define ACPI_OSI_WIN_10 0x0D -/* Definitions of file IO */ - -#define ACPI_FILE_READING 0x01 -#define ACPI_FILE_WRITING 0x02 -#define ACPI_FILE_BINARY 0x04 - -#define ACPI_FILE_BEGIN 0x01 -#define ACPI_FILE_END 0x02 - /* Definitions of getopt */ #define ACPI_OPT_END -1 diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 284965cbc9af..427a7c3e6c75 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -24,7 +24,9 @@ #define CPPC_NUM_ENT 21 #define CPPC_REV 2 -#define PCC_CMD_COMPLETE 1 +#define PCC_CMD_COMPLETE_MASK (1 << 0) +#define PCC_ERROR_MASK (1 << 2) + #define MAX_CPC_REG_ENT 19 /* CPPC specific PCC commands. */ @@ -49,6 +51,7 @@ struct cpc_reg { */ struct cpc_register_resource { acpi_object_type type; + u64 __iomem *sys_mem_vaddr; union { struct cpc_reg reg; u64 int_value; @@ -60,8 +63,11 @@ struct cpc_desc { int num_entries; int version; int cpu_id; + int write_cmd_status; + int write_cmd_id; struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; struct acpi_psd_package domain_info; + struct kobject kobj; }; /* These are indexes into the per-cpu cpc_regs[]. Order is important. */ @@ -96,7 +102,6 @@ enum cppc_regs { struct cppc_perf_caps { u32 highest_perf; u32 nominal_perf; - u32 reference_perf; u32 lowest_perf; }; @@ -108,13 +113,13 @@ struct cppc_perf_ctrls { struct cppc_perf_fb_ctrs { u64 reference; - u64 prev_reference; u64 delivered; - u64 prev_delivered; + u64 reference_perf; + u64 ctr_wrap_time; }; /* Per CPU container for runtime CPPC management. */ -struct cpudata { +struct cppc_cpudata { int cpu; struct cppc_perf_caps perf_caps; struct cppc_perf_ctrls perf_ctrls; @@ -127,6 +132,7 @@ struct cpudata { extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); -extern int acpi_get_psd_map(struct cpudata **); +extern int acpi_get_psd_map(struct cppc_cpudata **); +extern unsigned int cppc_get_transition_latency(int cpu); #endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index 86b5a8447606..34cce729109c 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -78,6 +78,7 @@ (defined ACPI_EXAMPLE_APP) #define ACPI_APPLICATION #define ACPI_SINGLE_THREADED +#define USE_NATIVE_ALLOCATE_ZEROED #endif /* iASL configuration */ @@ -124,7 +125,6 @@ #ifdef ACPI_DUMP_APP #define ACPI_USE_NATIVE_MEMORY_MAPPING -#define USE_NATIVE_ALLOCATE_ZEROED #endif /* acpi_names/Example configuration. Hardware disabled */ @@ -149,7 +149,6 @@ /* Common for all ACPICA applications */ #ifdef ACPI_APPLICATION -#define ACPI_USE_SYSTEM_CLIBRARY #define ACPI_USE_LOCAL_CACHE #endif @@ -167,10 +166,21 @@ /****************************************************************************** * * Host configuration files. The compiler configuration files are included - * by the host files. + * first. * *****************************************************************************/ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include <acpi/platform/acgcc.h> + +#elif defined(_MSC_VER) +#include "acmsvc.h" + +#elif defined(__INTEL_COMPILER) +#include "acintel.h" + +#endif + #if defined(_LINUX) || defined(__linux__) #include <acpi/platform/aclinux.h> @@ -210,18 +220,20 @@ #elif defined(__OS2__) #include "acos2.h" -#elif defined(_AED_EFI) -#include "acefi.h" - -#elif defined(_GNU_EFI) -#include "acefi.h" - #elif defined(__HAIKU__) #include "achaiku.h" #elif defined(__QNX__) #include "acqnx.h" +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) +#include "acefi.h" + #else /* Unknown environment */ @@ -326,7 +338,8 @@ * ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library. * Otherwise, local versions of string/memory functions will be used. * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and - * the standard header files may be used. + * the standard header files may be used. Defining this implies that + * ACPI_USE_SYSTEM_CLIBRARY has been defined. * * The ACPICA subsystem only uses low level C library functions that do not * call operating system services and may therefore be inlined in the code. @@ -334,7 +347,6 @@ * It may be necessary to tailor these include files to the target * generation environment. */ -#ifdef ACPI_USE_SYSTEM_CLIBRARY /* Use the standard C library headers. We want to keep these to a minimum. */ @@ -342,57 +354,20 @@ /* Use the standard headers from the standard locations */ -#include <stdarg.h> #include <stdlib.h> #include <string.h> #include <ctype.h> +#ifdef ACPI_APPLICATION +#include <stdio.h> +#include <fcntl.h> +#include <errno.h> +#include <time.h> +#include <signal.h> +#endif #endif /* ACPI_USE_STANDARD_HEADERS */ -/* We will be linking to the standard Clib functions */ - -#else - -/****************************************************************************** - * - * Not using native C library, use local implementations - * - *****************************************************************************/ - -/* - * Use local definitions of C library macros and functions. These function - * implementations may not be as efficient as an inline or assembly code - * implementation provided by a native C library, but they are functionally - * equivalent. - */ -#ifndef va_arg - -#ifndef _VALIST -#define _VALIST -typedef char *va_list; -#endif /* _VALIST */ - -/* Storage alignment properties */ - -#define _AUPBND (sizeof (acpi_native_int) - 1) -#define _ADNBND (sizeof (acpi_native_int) - 1) - -/* Variable argument list macro definitions */ - -#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd))) -#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND)))) -#define va_end(ap) (ap = (va_list) NULL) -#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND)))) - -#endif /* va_arg */ - -/* Use the local (ACPICA) definitions of the clib functions */ - -#endif /* ACPI_USE_SYSTEM_CLIBRARY */ - -#ifndef ACPI_FILE #ifdef ACPI_APPLICATION -#include <stdio.h> #define ACPI_FILE FILE * #define ACPI_FILE_OUT stdout #define ACPI_FILE_ERR stderr @@ -401,6 +376,9 @@ typedef char *va_list; #define ACPI_FILE_OUT NULL #define ACPI_FILE_ERR NULL #endif /* ACPI_APPLICATION */ -#endif /* ACPI_FILE */ + +#ifndef ACPI_INIT_FUNCTION +#define ACPI_INIT_FUNCTION +#endif #endif /* __ACENV_H__ */ diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h index 4f15c1d10b61..b3171b9d6974 100644 --- a/include/acpi/platform/acenvex.h +++ b/include/acpi/platform/acenvex.h @@ -56,17 +56,24 @@ #if defined(_LINUX) || defined(__linux__) #include <acpi/platform/aclinuxex.h> -#elif defined(WIN32) -#include "acwinex.h" +#elif defined(__DragonFly__) +#include "acdragonflyex.h" -#elif defined(_AED_EFI) +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) #include "acefiex.h" -#elif defined(_GNU_EFI) -#include "acefiex.h" +#endif -#elif defined(__DragonFly__) -#include "acdragonflyex.h" +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include "acgccex.h" + +#elif defined(_MSC_VER) +#include "acmsvcex.h" #endif diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index c5a216c976fa..8f66aaabadf7 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -44,6 +44,12 @@ #ifndef __ACGCC_H__ #define __ACGCC_H__ +/* + * Use compiler specific <stdarg.h> is a good practice for even when + * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. + */ +#include <stdarg.h> + #define ACPI_INLINE __inline__ /* Function name is used for debug output. Non-ANSI, compiler-dependent */ @@ -64,17 +70,6 @@ */ #define ACPI_UNUSED_VAR __attribute__ ((unused)) -/* - * Some versions of gcc implement strchr() with a buggy macro. So, - * undef it here. Prevents error messages of this form (usually from the - * file getopt.c): - * - * error: logical '&&' with non-zero constant will always evaluate as true - */ -#ifdef strchr -#undef strchr -#endif - /* GCC supports __VA_ARGS__ in macros */ #define COMPILER_VA_MACRO 1 diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h new file mode 100644 index 000000000000..46ead2caada4 --- /dev/null +++ b/include/acpi/platform/acgccex.h @@ -0,0 +1,58 @@ +/****************************************************************************** + * + * Name: acgccex.h - Extra GCC specific defines, etc. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACGCCEX_H__ +#define __ACGCCEX_H__ + +/* + * Some versions of gcc implement strchr() with a buggy macro. So, + * undef it here. Prevents error messages of this form (usually from the + * file getopt.c): + * + * error: logical '&&' with non-zero constant will always evaluate as true + */ +#ifdef strchr +#undef strchr +#endif + +#endif /* __ACGCCEX_H__ */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 93b61b1f2beb..a5d98d171866 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -92,6 +92,8 @@ #include <asm/acenv.h> #endif +#define ACPI_INIT_FUNCTION __init + #ifndef CONFIG_ACPI /* External globals for __KERNEL__, stubs is needed */ @@ -168,13 +170,21 @@ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory +#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: " +#define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: " +#define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: " +#define ACPI_MSG_INFO KERN_INFO "ACPI: " + +#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " +#define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " + #else /* !__KERNEL__ */ -#include <stdarg.h> -#include <string.h> -#include <stdlib.h> -#include <ctype.h> +#define ACPI_USE_STANDARD_HEADERS + +#ifdef ACPI_USE_STANDARD_HEADERS #include <unistd.h> +#endif /* Define/disable kernel-specific declarators */ @@ -205,8 +215,4 @@ #endif /* __KERNEL__ */ -/* Linux uses GCC */ - -#include <acpi/platform/acgcc.h> - #endif /* __ACLINUX_H__ */ diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index f8bb0d857bff..a5509d87230a 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -71,7 +71,7 @@ /* * Overrides for in-kernel ACPICA */ -acpi_status __init acpi_os_initialize(void); +acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void); acpi_status acpi_os_terminate(void); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c5eaf2f80a4c..2d42fb7d56b7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -85,6 +85,8 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return dev_name(&adev->dev); } +struct device *acpi_get_first_physical_node(struct acpi_device *adev); + enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, @@ -634,6 +636,11 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return NULL; } +static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) +{ + return NULL; +} + static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } @@ -927,9 +934,17 @@ struct acpi_reference_args { #ifdef CONFIG_ACPI int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); -int acpi_node_get_property_reference(struct fwnode_handle *fwnode, - const char *name, size_t index, - struct acpi_reference_args *args); +int __acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args); + +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, + struct acpi_reference_args *args) +{ + return __acpi_node_get_property_reference(fwnode, name, index, + MAX_ACPI_REFERENCE_ARGS, args); +} int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, void **valptr); @@ -1005,6 +1020,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, return -ENXIO; } +static inline int +__acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, size_t num_args, + struct acpi_reference_args *args) +{ + return -ENXIO; +} + static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, const char *name, size_t index, struct acpi_reference_args *args) @@ -1074,4 +1097,10 @@ void acpi_table_upgrade(void); static inline void acpi_table_upgrade(void) { } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) +extern bool acpi_has_watchdog(void); +#else +static inline bool acpi_has_watchdog(void) { return false; } +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 797d9c8e9a1b..ad4f1f33a74e 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -228,7 +228,11 @@ static inline void cpu_hotplug_done(void) {} #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PM_SLEEP_SMP -extern int disable_nonboot_cpus(void); +extern int freeze_secondary_cpus(int primary); +static inline int disable_nonboot_cpus(void) +{ + return freeze_secondary_cpus(0); +} extern void enable_nonboot_cpus(void); #else /* !CONFIG_PM_SLEEP_SMP */ static inline int disable_nonboot_cpus(void) { return 0; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 34bd80512a0c..eb445a4e2a83 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -47,6 +47,8 @@ enum cpuhp_state { CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_ARM_VFP_STARTING, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h index 0a83a1e648b0..4db00b02ca3f 100644 --- a/include/linux/devfreq-event.h +++ b/include/linux/devfreq-event.h @@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) return -EINVAL; } -static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) -{ - return ERR_PTR(-EINVAL); -} - static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( struct device *dev, int index) { diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 661af564fae8..a534c7f15a61 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -21,6 +21,8 @@ * * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); + * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); * static_branch_likely() * static_branch_unlikely() * @@ -270,6 +272,16 @@ struct static_key_false { #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT +#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \ + struct static_key_true name[count] = { \ + [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \ + } + +#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \ + struct static_key_false name[count] = { \ + [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ + } + extern bool ____wrong_branch_error(void); #define static_key_enabled(x) \ diff --git a/include/linux/pci.h b/include/linux/pci.h index 0ab835965669..a917d4b20554 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1126,6 +1126,7 @@ void pdev_enable_device(struct pci_dev *); int pci_enable_resources(struct pci_dev *, int mask); void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), int (*)(const struct pci_dev *, u8, u8)); +struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); #define HAVE_PCI_REQ_REGIONS 2 int __must_check pci_request_regions(struct pci_dev *, const char *); int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); @@ -1542,6 +1543,9 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } +static inline struct resource *pci_find_resource(struct pci_dev *dev, + struct resource *res) +{ return NULL; } static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index e18843809eec..9ff07d3fc8de 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -14,7 +14,7 @@ #include <linux/interrupt.h> #include <linux/perf_event.h> - +#include <linux/sysfs.h> #include <asm/cputype.h> /* @@ -77,6 +77,13 @@ struct pmu_hw_events { struct arm_pmu *percpu_pmu; }; +enum armpmu_attr_groups { + ARMPMU_ATTR_GROUP_COMMON, + ARMPMU_ATTR_GROUP_EVENTS, + ARMPMU_ATTR_GROUP_FORMATS, + ARMPMU_NR_ATTR_GROUPS +}; + struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; @@ -111,6 +118,8 @@ struct arm_pmu { struct pmu_hw_events __percpu *hw_events; struct list_head entry; struct notifier_block cpu_pm_nb; + /* the attr_groups array must be NULL-terminated */ + const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) @@ -151,6 +160,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table); +#define ARMV8_PMU_PDEV_NAME "armv8-pmu" + #endif /* CONFIG_ARM_PMU */ #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 31fec858088c..a09fe5c009c8 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -51,6 +51,8 @@ struct generic_pm_domain { struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; + struct fwnode_handle *provider; /* Identity of the domain provider */ + bool has_provider; const char *name; atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ @@ -116,7 +118,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) return to_gpd_data(dev->power.subsys_data->domain_data); } -extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev); extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td); @@ -129,6 +130,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); extern int pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_remove(struct generic_pm_domain *genpd); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -138,10 +140,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) { return ERR_PTR(-ENOSYS); } -static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) -{ - return NULL; -} static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) @@ -168,6 +166,10 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd, { return -ENOSYS; } +static inline int pm_genpd_remove(struct generic_pm_domain *genpd) +{ + return -ENOTSUPP; +} #endif static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, @@ -192,57 +194,57 @@ struct genpd_onecell_data { unsigned int num_domains; }; -typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, - void *data); - #ifdef CONFIG_PM_GENERIC_DOMAINS_OF -int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, - void *data); +int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd); +int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data); void of_genpd_del_provider(struct device_node *np); -struct generic_pm_domain *of_genpd_get_from_provider( - struct of_phandle_args *genpdspec); - -struct generic_pm_domain *__of_genpd_xlate_simple( - struct of_phandle_args *genpdspec, - void *data); -struct generic_pm_domain *__of_genpd_xlate_onecell( - struct of_phandle_args *genpdspec, - void *data); +extern int of_genpd_add_device(struct of_phandle_args *args, + struct device *dev); +extern int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain); +extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int genpd_dev_pm_attach(struct device *dev); #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ -static inline int __of_genpd_add_provider(struct device_node *np, - genpd_xlate_t xlate, void *data) +static inline int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) { - return 0; + return -ENOTSUPP; } -static inline void of_genpd_del_provider(struct device_node *np) {} -static inline struct generic_pm_domain *of_genpd_get_from_provider( - struct of_phandle_args *genpdspec) +static inline int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) { - return NULL; + return -ENOTSUPP; } -#define __of_genpd_xlate_simple NULL -#define __of_genpd_xlate_onecell NULL +static inline void of_genpd_del_provider(struct device_node *np) {} -static inline int genpd_dev_pm_attach(struct device *dev) +static inline int of_genpd_add_device(struct of_phandle_args *args, + struct device *dev) { return -ENODEV; } -#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ -static inline int of_genpd_add_provider_simple(struct device_node *np, - struct generic_pm_domain *genpd) +static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain) { - return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd); + return -ENODEV; } -static inline int of_genpd_add_provider_onecell(struct device_node *np, - struct genpd_onecell_data *data) + +static inline int genpd_dev_pm_attach(struct device *dev) +{ + return -ENODEV; +} + +static inline +struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) { - return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data); + return ERR_PTR(-ENOTSUPP); } +#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ #ifdef CONFIG_PM extern int dev_pm_domain_attach(struct device *dev, bool power_on); diff --git a/include/linux/sched.h b/include/linux/sched.h index 62c68e513e39..98fe95fea30c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -3469,15 +3469,20 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + #ifdef CONFIG_CPU_FREQ struct update_util_data { - void (*func)(struct update_util_data *data, - u64 time, unsigned long util, unsigned long max); + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, - void (*func)(struct update_util_data *data, u64 time, - unsigned long util, unsigned long max)); + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); #endif /* CONFIG_CPU_FREQ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 7693e39b14fe..d9718378a8be 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -245,6 +245,7 @@ static inline bool idle_should_freeze(void) return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); } +extern void __init pm_states_init(void); extern void freeze_set_ops(const struct platform_freeze_ops *ops); extern void freeze_wake(void); @@ -279,6 +280,7 @@ static inline bool pm_resume_via_firmware(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } static inline bool idle_should_freeze(void) { return false; } +static inline void __init pm_states_init(void) {} static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} static inline void freeze_wake(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 19e50300ce7d..54e3aad32806 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -69,7 +69,8 @@ TRACE_EVENT(pstate_sample, u64 mperf, u64 aperf, u64 tsc, - u32 freq + u32 freq, + u32 io_boost ), TP_ARGS(core_busy, @@ -79,7 +80,8 @@ TRACE_EVENT(pstate_sample, mperf, aperf, tsc, - freq + freq, + io_boost ), TP_STRUCT__entry( @@ -91,6 +93,7 @@ TRACE_EVENT(pstate_sample, __field(u64, aperf) __field(u64, tsc) __field(u32, freq) + __field(u32, io_boost) ), TP_fast_assign( @@ -102,9 +105,10 @@ TRACE_EVENT(pstate_sample, __entry->aperf = aperf; __entry->tsc = tsc; __entry->freq = freq; + __entry->io_boost = io_boost; ), - TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ", + TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu io_boost=%lu", (unsigned long)__entry->core_busy, (unsigned long)__entry->scaled_busy, (unsigned long)__entry->from, @@ -112,7 +116,8 @@ TRACE_EVENT(pstate_sample, (unsigned long long)__entry->mperf, (unsigned long long)__entry->aperf, (unsigned long long)__entry->tsc, - (unsigned long)__entry->freq + (unsigned long)__entry->freq, + (unsigned long)__entry->io_boost ) ); diff --git a/kernel/cpu.c b/kernel/cpu.c index 341bf80f80bd..ebbf027dd4a1 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1024,12 +1024,13 @@ EXPORT_SYMBOL_GPL(cpu_up); #ifdef CONFIG_PM_SLEEP_SMP static cpumask_var_t frozen_cpus; -int disable_nonboot_cpus(void) +int freeze_secondary_cpus(int primary) { - int cpu, first_cpu, error = 0; + int cpu, error = 0; cpu_maps_update_begin(); - first_cpu = cpumask_first(cpu_online_mask); + if (!cpu_online(primary)) + primary = cpumask_first(cpu_online_mask); /* * We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time @@ -1038,7 +1039,7 @@ int disable_nonboot_cpus(void) pr_info("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { - if (cpu == first_cpu) + if (cpu == primary) continue; trace_suspend_resume(TPS("CPU_OFF"), cpu, true); error = _cpu_down(cpu, 1, CPUHP_OFFLINE); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 68d3ebc12601..e8517b63eb37 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -186,7 +186,7 @@ config PM_SLEEP_DEBUG config DPM_WATCHDOG bool "Device suspend/resume watchdog" - depends on PM_DEBUG && PSTORE + depends on PM_DEBUG && PSTORE && EXPERT ---help--- Sets up a watchdog timer to capture drivers that are locked up attempting to suspend/resume a device. @@ -197,7 +197,7 @@ config DPM_WATCHDOG config DPM_WATCHDOG_TIMEOUT int "Watchdog timeout in seconds" range 1 120 - default 60 + default 120 depends on DPM_WATCHDOG config PM_TRACE diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 33c79b6105c5..b26dbc48c75b 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -306,8 +306,10 @@ static int create_image(int platform_mode) if (error) printk(KERN_ERR "PM: Error %d creating hibernation image\n", error); - if (!in_suspend) + if (!in_suspend) { events_check_enabled = false; + clear_free_pages(); + } platform_leave(platform_mode); @@ -1189,22 +1191,6 @@ static int __init nohibernate_setup(char *str) return 1; } -static int __init page_poison_nohibernate_setup(char *str) -{ -#ifdef CONFIG_PAGE_POISONING_ZERO - /* - * The zeroing option for page poison skips the checks on alloc. - * since hibernation doesn't save free pages there's no way to - * guarantee the pages will still be zeroed. - */ - if (!strcmp(str, "on")) { - pr_info("Disabling hibernation due to page poisoning\n"); - return nohibernate_setup(str); - } -#endif - return 1; -} - __setup("noresume", noresume_setup); __setup("resume_offset=", resume_offset_setup); __setup("resume=", resume_setup); @@ -1212,4 +1198,3 @@ __setup("hibernate=", hibernate_setup); __setup("resumewait", resumewait_setup); __setup("resumedelay=", resumedelay_setup); __setup("nohibernate", nohibernate_setup); -__setup("page_poison=", page_poison_nohibernate_setup); diff --git a/kernel/power/main.c b/kernel/power/main.c index 5ea50b1b7595..281a697fd458 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -644,6 +644,7 @@ static int __init pm_init(void) return error; hibernate_image_size_init(); hibernate_reserved_size_init(); + pm_states_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; diff --git a/kernel/power/power.h b/kernel/power/power.h index 242d8b827dd5..56d1d0dedf76 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -110,6 +110,8 @@ extern int create_basic_memory_bitmaps(void); extern void free_basic_memory_bitmaps(void); extern int hibernate_preallocate_memory(void); +extern void clear_free_pages(void); + /** * Auxiliary structure used for reading the snapshot image data and * metadata from and writing them to the list of page backup entries diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index b02228411d57..4f0f0604f1c4 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1132,6 +1132,28 @@ void free_basic_memory_bitmaps(void) pr_debug("PM: Basic memory bitmaps freed\n"); } +void clear_free_pages(void) +{ +#ifdef CONFIG_PAGE_POISONING_ZERO + struct memory_bitmap *bm = free_pages_map; + unsigned long pfn; + + if (WARN_ON(!(free_pages_map))) + return; + + memory_bm_position_reset(bm); + pfn = memory_bm_next_pfn(bm); + while (pfn != BM_END_OF_MAP) { + if (pfn_valid(pfn)) + clear_highpage(pfn_to_page(pfn)); + + pfn = memory_bm_next_pfn(bm); + } + memory_bm_position_reset(bm); + pr_info("PM: free pages cleared after restore\n"); +#endif /* PAGE_POISONING_ZERO */ +} + /** * snapshot_additional_pages - Estimate the number of extra pages needed. * @zone: Memory zone to carry out the computation for. diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 0acab9d7f96f..1e7f5da648d9 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -118,10 +118,18 @@ static bool valid_state(suspend_state_t state) */ static bool relative_states; +void __init pm_states_init(void) +{ + /* + * freeze state should be supported even without any suspend_ops, + * initialize pm_states accordingly here + */ + pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2]; +} + static int __init sleep_states_setup(char *str) { relative_states = !strncmp(str, "1", 1); - pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2]; return 1; } @@ -211,7 +219,7 @@ static int platform_suspend_begin(suspend_state_t state) { if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) return freeze_ops->begin(); - else if (suspend_ops->begin) + else if (suspend_ops && suspend_ops->begin) return suspend_ops->begin(state); else return 0; @@ -221,7 +229,7 @@ static void platform_resume_end(suspend_state_t state) { if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) freeze_ops->end(); - else if (suspend_ops->end) + else if (suspend_ops && suspend_ops->end) suspend_ops->end(); } diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c index 1141954e73b4..dbc51442ecbc 100644 --- a/kernel/sched/cpufreq.c +++ b/kernel/sched/cpufreq.c @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); */ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, - unsigned long util, unsigned long max)) + unsigned int flags)) { if (WARN_ON(!data || !func)) return; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index a84641b222c1..69e06898997d 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -12,7 +12,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cpufreq.h> -#include <linux/module.h> #include <linux/slab.h> #include <trace/events/power.h> @@ -48,11 +47,14 @@ struct sugov_cpu { struct sugov_policy *sg_policy; unsigned int cached_raw_freq; + unsigned long iowait_boost; + unsigned long iowait_boost_max; + u64 last_update; /* The fields below are only needed when sharing a policy. */ unsigned long util; unsigned long max; - u64 last_update; + unsigned int flags; }; static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); @@ -144,24 +146,75 @@ static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, return cpufreq_driver_resolve_freq(policy, freq); } +static void sugov_get_util(unsigned long *util, unsigned long *max) +{ + struct rq *rq = this_rq(); + unsigned long cfs_max; + + cfs_max = arch_scale_cpu_capacity(NULL, smp_processor_id()); + + *util = min(rq->cfs.avg.util_avg, cfs_max); + *max = cfs_max; +} + +static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, + unsigned int flags) +{ + if (flags & SCHED_CPUFREQ_IOWAIT) { + sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; + } else if (sg_cpu->iowait_boost) { + s64 delta_ns = time - sg_cpu->last_update; + + /* Clear iowait_boost if the CPU apprears to have been idle. */ + if (delta_ns > TICK_NSEC) + sg_cpu->iowait_boost = 0; + } +} + +static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, + unsigned long *max) +{ + unsigned long boost_util = sg_cpu->iowait_boost; + unsigned long boost_max = sg_cpu->iowait_boost_max; + + if (!boost_util) + return; + + if (*util * boost_max < *max * boost_util) { + *util = boost_util; + *max = boost_max; + } + sg_cpu->iowait_boost >>= 1; +} + static void sugov_update_single(struct update_util_data *hook, u64 time, - unsigned long util, unsigned long max) + unsigned int flags) { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; + unsigned long util, max; unsigned int next_f; + sugov_set_iowait_boost(sg_cpu, time, flags); + sg_cpu->last_update = time; + if (!sugov_should_update_freq(sg_policy, time)) return; - next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq : - get_next_freq(sg_cpu, util, max); + if (flags & SCHED_CPUFREQ_RT_DL) { + next_f = policy->cpuinfo.max_freq; + } else { + sugov_get_util(&util, &max); + sugov_iowait_boost(sg_cpu, &util, &max); + next_f = get_next_freq(sg_cpu, util, max); + } sugov_update_commit(sg_policy, time, next_f); } static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, - unsigned long util, unsigned long max) + unsigned long util, unsigned long max, + unsigned int flags) { struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; @@ -169,9 +222,11 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 last_freq_update_time = sg_policy->last_freq_update_time; unsigned int j; - if (util == ULONG_MAX) + if (flags & SCHED_CPUFREQ_RT_DL) return max_f; + sugov_iowait_boost(sg_cpu, &util, &max); + for_each_cpu(j, policy->cpus) { struct sugov_cpu *j_sg_cpu; unsigned long j_util, j_max; @@ -186,41 +241,50 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, * frequency update and the time elapsed between the last update * of the CPU utilization and the last frequency update is long * enough, don't take the CPU into account as it probably is - * idle now. + * idle now (and clear iowait_boost for it). */ delta_ns = last_freq_update_time - j_sg_cpu->last_update; - if (delta_ns > TICK_NSEC) + if (delta_ns > TICK_NSEC) { + j_sg_cpu->iowait_boost = 0; continue; - - j_util = j_sg_cpu->util; - if (j_util == ULONG_MAX) + } + if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) return max_f; + j_util = j_sg_cpu->util; j_max = j_sg_cpu->max; if (j_util * max > j_max * util) { util = j_util; max = j_max; } + + sugov_iowait_boost(j_sg_cpu, &util, &max); } return get_next_freq(sg_cpu, util, max); } static void sugov_update_shared(struct update_util_data *hook, u64 time, - unsigned long util, unsigned long max) + unsigned int flags) { struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_policy *sg_policy = sg_cpu->sg_policy; + unsigned long util, max; unsigned int next_f; + sugov_get_util(&util, &max); + raw_spin_lock(&sg_policy->update_lock); sg_cpu->util = util; sg_cpu->max = max; + sg_cpu->flags = flags; + + sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; if (sugov_should_update_freq(sg_policy, time)) { - next_f = sugov_next_freq_shared(sg_cpu, util, max); + next_f = sugov_next_freq_shared(sg_cpu, util, max, flags); sugov_update_commit(sg_policy, time, next_f); } @@ -444,10 +508,13 @@ static int sugov_start(struct cpufreq_policy *policy) sg_cpu->sg_policy = sg_policy; if (policy_is_shared(policy)) { - sg_cpu->util = ULONG_MAX; + sg_cpu->util = 0; sg_cpu->max = 0; + sg_cpu->flags = SCHED_CPUFREQ_RT; sg_cpu->last_update = 0; sg_cpu->cached_raw_freq = 0; + sg_cpu->iowait_boost = 0; + sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, sugov_update_shared); } else { @@ -495,28 +562,15 @@ static struct cpufreq_governor schedutil_gov = { .limits = sugov_limits, }; -static int __init sugov_module_init(void) -{ - return cpufreq_register_governor(&schedutil_gov); -} - -static void __exit sugov_module_exit(void) -{ - cpufreq_unregister_governor(&schedutil_gov); -} - -MODULE_AUTHOR("Rafael J. Wysocki <rafael.j.wysocki@intel.com>"); -MODULE_DESCRIPTION("Utilization-based CPU frequency selection"); -MODULE_LICENSE("GPL"); - #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL struct cpufreq_governor *cpufreq_default_governor(void) { return &schedutil_gov; } - -fs_initcall(sugov_module_init); -#else -module_init(sugov_module_init); #endif -module_exit(sugov_module_exit); + +static int __init sugov_register(void) +{ + return cpufreq_register_governor(&schedutil_gov); +} +fs_initcall(sugov_register); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 1ce8867283dc..974779656999 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -735,9 +735,8 @@ static void update_curr_dl(struct rq *rq) return; } - /* kick cpufreq (see the comment in linux/cpufreq.h). */ - if (cpu_of(rq) == smp_processor_id()) - cpufreq_trigger_update(rq_clock(rq)); + /* kick cpufreq (see the comment in kernel/sched/sched.h). */ + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL); schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 039de34f1521..a5cd07b25aa1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2875,12 +2875,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) { - struct rq *rq = rq_of(cfs_rq); - int cpu = cpu_of(rq); - - if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) { - unsigned long max = rq->cpu_capacity_orig; - + if (&this_rq()->cfs == cfs_rq) { /* * There are a few boundary cases this might miss but it should * get called often enough that that should (hopefully) not be @@ -2897,8 +2892,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) * * See cpu_util(). */ - cpufreq_update_util(rq_clock(rq), - min(cfs_rq->avg.util_avg, max), max); + cpufreq_update_util(rq_of(cfs_rq), 0); } } @@ -3159,10 +3153,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) static inline void update_load_avg(struct sched_entity *se, int not_used) { - struct cfs_rq *cfs_rq = cfs_rq_of(se); - struct rq *rq = rq_of(cfs_rq); - - cpufreq_trigger_update(rq_clock(rq)); + cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); } static inline void @@ -4509,6 +4500,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; + /* + * If in_iowait is set, the code below may not trigger any cpufreq + * utilization updates, so do it here explicitly with the IOWAIT flag + * passed. + */ + if (p->in_iowait) + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT); + for_each_sched_entity(se) { if (se->on_rq) break; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d5690b722691..2516b8df6dbb 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -957,9 +957,8 @@ static void update_curr_rt(struct rq *rq) if (unlikely((s64)delta_exec <= 0)) return; - /* Kick cpufreq (see the comment in linux/cpufreq.h). */ - if (cpu_of(rq) == smp_processor_id()) - cpufreq_trigger_update(rq_clock(rq)); + /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c64fc5114004..b7fc1ced4380 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1763,27 +1763,13 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); /** * cpufreq_update_util - Take a note about CPU utilization changes. - * @time: Current time. - * @util: Current utilization. - * @max: Utilization ceiling. + * @rq: Runqueue to carry out the update for. + * @flags: Update reason flags. * - * This function is called by the scheduler on every invocation of - * update_load_avg() on the CPU whose utilization is being updated. + * This function is called by the scheduler on the CPU whose utilization is + * being updated. * * It can only be called from RCU-sched read-side critical sections. - */ -static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) -{ - struct update_util_data *data; - - data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); - if (data) - data->func(data, time, util, max); -} - -/** - * cpufreq_trigger_update - Trigger CPU performance state evaluation if needed. - * @time: Current time. * * The way cpufreq is currently arranged requires it to evaluate the CPU * performance state (frequency/voltage) on a regular basis to prevent it from @@ -1797,13 +1783,23 @@ static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned lo * but that really is a band-aid. Going forward it should be replaced with * solutions targeted more specifically at RT and DL tasks. */ -static inline void cpufreq_trigger_update(u64 time) +static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) +{ + struct update_util_data *data; + + data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); + if (data) + data->func(data, rq_clock(rq), flags); +} + +static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) { - cpufreq_update_util(time, ULONG_MAX, 0); + if (cpu_of(rq) == smp_processor_id()) + cpufreq_update_util(rq, flags); } #else -static inline void cpufreq_update_util(u64 time, unsigned long util, unsigned long max) {} -static inline void cpufreq_trigger_update(u64 time) {} +static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} +static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} #endif /* CONFIG_CPU_FREQ */ #ifdef arch_scale_freq_capacity diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 22f4cd96acb0..afcc550877ff 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -76,8 +76,6 @@ config PAGE_POISONING_ZERO no longer necessary to write zeros when GFP_ZERO is used on allocation. - Enabling page poisoning with this option will disable hibernation - If unsure, say N bool diff --git a/mm/mmap.c b/mm/mmap.c index ca9d91bca0d6..69cad562cd00 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -88,6 +88,11 @@ static void unmap_region(struct mm_struct *mm, * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * + * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and + * MAP_PRIVATE: + * r: (no) no + * w: (no) no + * x: (yes) yes */ pgprot_t protection_map[16] = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c index e73a79fce015..bc82596d7354 100644 --- a/tools/power/acpi/common/cmfsize.c +++ b/tools/power/acpi/common/cmfsize.c @@ -44,7 +44,6 @@ #include <acpi/acpi.h> #include "accommon.h" #include "acapps.h" -#include <stdio.h> #define _COMPONENT ACPI_TOOLS ACPI_MODULE_NAME("cmfsize") @@ -69,24 +68,24 @@ u32 cm_get_file_size(ACPI_FILE file) /* Save the current file pointer, seek to EOF to obtain file size */ - current_offset = acpi_os_get_file_offset(file); + current_offset = ftell(file); if (current_offset < 0) { goto offset_error; } - status = acpi_os_set_file_offset(file, 0, ACPI_FILE_END); + status = fseek(file, 0, SEEK_END); if (ACPI_FAILURE(status)) { goto seek_error; } - file_size = acpi_os_get_file_offset(file); + file_size = ftell(file); if (file_size < 0) { goto offset_error; } /* Restore original file pointer */ - status = acpi_os_set_file_offset(file, current_offset, ACPI_FILE_BEGIN); + status = fseek(file, current_offset, SEEK_SET); if (ACPI_FAILURE(status)) { goto seek_error; } @@ -94,10 +93,10 @@ u32 cm_get_file_size(ACPI_FILE file) return ((u32)file_size); offset_error: - acpi_log_error("Could not get file offset"); + fprintf(stderr, "Could not get file offset\n"); return (ACPI_UINT32_MAX); seek_error: - acpi_log_error("Could not set file offset"); + fprintf(stderr, "Could not set file offset\n"); return (ACPI_UINT32_MAX); } diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c index 0bd343f136a4..3919970f5aea 100644 --- a/tools/power/acpi/common/getopt.c +++ b/tools/power/acpi/common/getopt.c @@ -57,7 +57,7 @@ #include "acapps.h" #define ACPI_OPTION_ERROR(msg, badchar) \ - if (acpi_gbl_opterr) {acpi_log_error ("%s%c\n", msg, badchar);} + if (acpi_gbl_opterr) {fprintf (stderr, "%s%c\n", msg, badchar);} int acpi_gbl_opterr = 1; int acpi_gbl_optind = 1; @@ -94,7 +94,7 @@ int acpi_getopt_argument(int argc, char **argv) acpi_gbl_optarg = &argv[acpi_gbl_optind++][(int)(current_char_ptr + 1)]; } else if (++acpi_gbl_optind >= argc) { - ACPI_OPTION_ERROR("Option requires an argument: -", 'v'); + ACPI_OPTION_ERROR("\nOption requires an argument", 0); current_char_ptr = 1; return (-1); diff --git a/tools/power/acpi/os_specific/service_layers/oslibcfs.c b/tools/power/acpi/os_specific/service_layers/oslibcfs.c deleted file mode 100644 index 11f4aba55aab..000000000000 --- a/tools/power/acpi/os_specific/service_layers/oslibcfs.c +++ /dev/null @@ -1,217 +0,0 @@ -/****************************************************************************** - * - * Module Name: oslibcfs - C library OSL for file I/O - * - *****************************************************************************/ - -/* - * Copyright (C) 2000 - 2016, Intel Corp. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce at minimum a disclaimer - * substantially similar to the "NO WARRANTY" disclaimer below - * ("Disclaimer") and any redistribution must be conditioned upon - * including a substantially similar Disclaimer requirement for further - * binary redistribution. - * 3. Neither the names of the above-listed copyright holders nor the names - * of any contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * NO WARRANTY - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING - * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGES. - */ - -#include <acpi/acpi.h> -#include <stdio.h> -#include <stdarg.h> - -#define _COMPONENT ACPI_OS_SERVICES -ACPI_MODULE_NAME("oslibcfs") - -/******************************************************************************* - * - * FUNCTION: acpi_os_open_file - * - * PARAMETERS: path - File path - * modes - File operation type - * - * RETURN: File descriptor. - * - * DESCRIPTION: Open a file for reading (ACPI_FILE_READING) or/and writing - * (ACPI_FILE_WRITING). - * - ******************************************************************************/ -ACPI_FILE acpi_os_open_file(const char *path, u8 modes) -{ - ACPI_FILE file; - u32 i = 0; - char modes_str[4]; - - if (modes & ACPI_FILE_READING) { - modes_str[i++] = 'r'; - } - if (modes & ACPI_FILE_WRITING) { - modes_str[i++] = 'w'; - } - - if (modes & ACPI_FILE_BINARY) { - modes_str[i++] = 'b'; - } - - modes_str[i++] = '\0'; - - file = fopen(path, modes_str); - if (!file) { - perror("Could not open file"); - } - - return (file); -} - -/******************************************************************************* - * - * FUNCTION: acpi_os_close_file - * - * PARAMETERS: file - An open file descriptor - * - * RETURN: None. - * - * DESCRIPTION: Close a file opened via acpi_os_open_file. - * - ******************************************************************************/ - -void acpi_os_close_file(ACPI_FILE file) -{ - - fclose(file); -} - -/******************************************************************************* - * - * FUNCTION: acpi_os_read_file - * - * PARAMETERS: file - An open file descriptor - * buffer - Data buffer - * size - Data block size - * count - Number of data blocks - * - * RETURN: Number of bytes actually read. - * - * DESCRIPTION: Read from a file. - * - ******************************************************************************/ - -int -acpi_os_read_file(ACPI_FILE file, void *buffer, acpi_size size, acpi_size count) -{ - int length; - - length = fread(buffer, size, count, file); - if (length < 0) { - perror("Error reading file"); - } - - return (length); -} - -/******************************************************************************* - * - * FUNCTION: acpi_os_write_file - * - * PARAMETERS: file - An open file descriptor - * buffer - Data buffer - * size - Data block size - * count - Number of data blocks - * - * RETURN: Number of bytes actually written. - * - * DESCRIPTION: Write to a file. - * - ******************************************************************************/ - -int -acpi_os_write_file(ACPI_FILE file, - void *buffer, acpi_size size, acpi_size count) -{ - int length; - - length = fwrite(buffer, size, count, file); - if (length < 0) { - perror("Error writing file"); - } - - return (length); -} - -/******************************************************************************* - * - * FUNCTION: acpi_os_get_file_offset - * - * PARAMETERS: file - An open file descriptor - * - * RETURN: Current file pointer position. - * - * DESCRIPTION: Get current file offset. - * - ******************************************************************************/ - -long acpi_os_get_file_offset(ACPI_FILE file) -{ - long offset; - - offset = ftell(file); - return (offset); -} - -/******************************************************************************* - * - * FUNCTION: acpi_os_set_file_offset - * - * PARAMETERS: file - An open file descriptor - * offset - New file offset - * from - From begin/end of file - * - * RETURN: Status - * - * DESCRIPTION: Set current file offset. - * - ******************************************************************************/ - -acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from) -{ - int ret = 0; - - if (from == ACPI_FILE_BEGIN) { - ret = fseek(file, offset, SEEK_SET); - } - - if (from == ACPI_FILE_END) { - ret = fseek(file, offset, SEEK_END); - } - - if (ret < 0) { - return (AE_ERROR); - } else { - return (AE_OK); - } -} diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c index 88aa66ef4ad5..8d8003c919d4 100644 --- a/tools/power/acpi/os_specific/service_layers/osunixxf.c +++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c @@ -63,10 +63,7 @@ #define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("osunixxf") -u8 acpi_gbl_debug_timeout = FALSE; - /* Upcalls to acpi_exec */ - void ae_table_override(struct acpi_table_header *existing_table, struct acpi_table_header **new_table); diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile index 2942cdced2ad..04b5db7c7c0b 100644 --- a/tools/power/acpi/tools/acpidump/Makefile +++ b/tools/power/acpi/tools/acpidump/Makefile @@ -36,12 +36,13 @@ TOOL_OBJS = \ utdebug.o\ utexcep.o\ utglobal.o\ + uthex.o\ utmath.o\ utnonansi.o\ utprint.o\ utstring.o\ + utstrtoul64.o\ utxferror.o\ - oslibcfs.o\ oslinuxtbl.o\ cmfsize.o\ getopt.o diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h index 025c232e920d..00423fc45e7c 100644 --- a/tools/power/acpi/tools/acpidump/acpidump.h +++ b/tools/power/acpi/tools/acpidump/acpidump.h @@ -55,11 +55,7 @@ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" - -#include <stdio.h> -#include <fcntl.h> -#include <errno.h> -#include <sys/stat.h> +#include "acapps.h" /* Globals */ @@ -72,12 +68,6 @@ EXTERN ACPI_FILE INIT_GLOBAL(gbl_output_file, NULL); EXTERN char INIT_GLOBAL(*gbl_output_filename, NULL); EXTERN u64 INIT_GLOBAL(gbl_rsdp_base, 0); -/* Globals required for use with ACPICA modules */ - -#ifdef _DECLARE_GLOBALS -u8 acpi_gbl_integer_byte_width = 8; -#endif - /* Action table used to defer requested options */ struct ap_dump_action { diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c index fb8f1d9e3b1b..9031be1afe63 100644 --- a/tools/power/acpi/tools/acpidump/apdump.c +++ b/tools/power/acpi/tools/acpidump/apdump.c @@ -69,16 +69,17 @@ u8 ap_is_valid_header(struct acpi_table_header *table) /* Make sure signature is all ASCII and a valid ACPI name */ if (!acpi_ut_valid_nameseg(table->signature)) { - acpi_log_error("Table signature (0x%8.8X) is invalid\n", - *(u32 *)table->signature); + fprintf(stderr, + "Table signature (0x%8.8X) is invalid\n", + *(u32 *)table->signature); return (FALSE); } /* Check for minimum table length */ if (table->length < sizeof(struct acpi_table_header)) { - acpi_log_error("Table length (0x%8.8X) is invalid\n", - table->length); + fprintf(stderr, "Table length (0x%8.8X) is invalid\n", + table->length); return (FALSE); } } @@ -115,8 +116,8 @@ u8 ap_is_valid_checksum(struct acpi_table_header *table) } if (ACPI_FAILURE(status)) { - acpi_log_error("%4.4s: Warning: wrong checksum in table\n", - table->signature); + fprintf(stderr, "%4.4s: Warning: wrong checksum in table\n", + table->signature); } return (AE_OK); @@ -195,13 +196,13 @@ ap_dump_table_buffer(struct acpi_table_header *table, * Note: simplest to just always emit a 64-bit address. acpi_xtract * utility can handle this. */ - acpi_ut_file_printf(gbl_output_file, "%4.4s @ 0x%8.8X%8.8X\n", - table->signature, ACPI_FORMAT_UINT64(address)); + fprintf(gbl_output_file, "%4.4s @ 0x%8.8X%8.8X\n", + table->signature, ACPI_FORMAT_UINT64(address)); acpi_ut_dump_buffer_to_file(gbl_output_file, ACPI_CAST_PTR(u8, table), table_length, DB_BYTE_DISPLAY, 0); - acpi_ut_file_printf(gbl_output_file, "\n"); + fprintf(gbl_output_file, "\n"); return (0); } @@ -239,14 +240,14 @@ int ap_dump_all_tables(void) if (status == AE_LIMIT) { return (0); } else if (i == 0) { - acpi_log_error - ("Could not get ACPI tables, %s\n", - acpi_format_exception(status)); + fprintf(stderr, + "Could not get ACPI tables, %s\n", + acpi_format_exception(status)); return (-1); } else { - acpi_log_error - ("Could not get ACPI table at index %u, %s\n", - i, acpi_format_exception(status)); + fprintf(stderr, + "Could not get ACPI table at index %u, %s\n", + i, acpi_format_exception(status)); continue; } } @@ -286,20 +287,20 @@ int ap_dump_table_by_address(char *ascii_address) /* Convert argument to an integer physical address */ - status = acpi_ut_strtoul64(ascii_address, ACPI_ANY_BASE, - ACPI_MAX64_BYTE_WIDTH, &long_address); + status = acpi_ut_strtoul64(ascii_address, ACPI_STRTOUL_64BIT, + &long_address); if (ACPI_FAILURE(status)) { - acpi_log_error("%s: Could not convert to a physical address\n", - ascii_address); + fprintf(stderr, "%s: Could not convert to a physical address\n", + ascii_address); return (-1); } address = (acpi_physical_address)long_address; status = acpi_os_get_table_by_address(address, &table); if (ACPI_FAILURE(status)) { - acpi_log_error("Could not get table at 0x%8.8X%8.8X, %s\n", - ACPI_FORMAT_UINT64(address), - acpi_format_exception(status)); + fprintf(stderr, "Could not get table at 0x%8.8X%8.8X, %s\n", + ACPI_FORMAT_UINT64(address), + acpi_format_exception(status)); return (-1); } @@ -331,9 +332,9 @@ int ap_dump_table_by_name(char *signature) int table_status; if (strlen(signature) != ACPI_NAME_SIZE) { - acpi_log_error - ("Invalid table signature [%s]: must be exactly 4 characters\n", - signature); + fprintf(stderr, + "Invalid table signature [%s]: must be exactly 4 characters\n", + signature); return (-1); } @@ -363,9 +364,9 @@ int ap_dump_table_by_name(char *signature) return (0); } - acpi_log_error - ("Could not get ACPI table with signature [%s], %s\n", - local_signature, acpi_format_exception(status)); + fprintf(stderr, + "Could not get ACPI table with signature [%s], %s\n", + local_signature, acpi_format_exception(status)); return (-1); } @@ -408,24 +409,24 @@ int ap_dump_table_from_file(char *pathname) } if (!acpi_ut_valid_nameseg(table->signature)) { - acpi_log_error - ("No valid ACPI signature was found in input file %s\n", - pathname); + fprintf(stderr, + "No valid ACPI signature was found in input file %s\n", + pathname); } /* File must be at least as long as the table length */ if (table->length > file_size) { - acpi_log_error - ("Table length (0x%X) is too large for input file (0x%X) %s\n", - table->length, file_size, pathname); + fprintf(stderr, + "Table length (0x%X) is too large for input file (0x%X) %s\n", + table->length, file_size, pathname); goto exit; } if (gbl_verbose_mode) { - acpi_log_error - ("Input file: %s contains table [%4.4s], 0x%X (%u) bytes\n", - pathname, table->signature, file_size, file_size); + fprintf(stderr, + "Input file: %s contains table [%4.4s], 0x%X (%u) bytes\n", + pathname, table->signature, file_size, file_size); } table_status = ap_dump_table_buffer(table, 0, 0); diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c index 5fcd9700ac18..dd5b861dc4a8 100644 --- a/tools/power/acpi/tools/acpidump/apfiles.c +++ b/tools/power/acpi/tools/acpidump/apfiles.c @@ -42,7 +42,6 @@ */ #include "acpidump.h" -#include "acapps.h" /* Local prototypes */ @@ -66,7 +65,8 @@ static int ap_is_existing_file(char *pathname) struct stat stat_info; if (!stat(pathname, &stat_info)) { - acpi_log_error("Target path already exists, overwrite? [y|n] "); + fprintf(stderr, + "Target path already exists, overwrite? [y|n] "); if (getchar() != 'y') { return (-1); @@ -102,9 +102,9 @@ int ap_open_output_file(char *pathname) /* Point stdout to the file */ - file = acpi_os_open_file(pathname, ACPI_FILE_WRITING); + file = fopen(pathname, "w"); if (!file) { - acpi_log_error("Could not open output file: %s\n", pathname); + fprintf(stderr, "Could not open output file: %s\n", pathname); return (-1); } @@ -134,7 +134,7 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance) char filename[ACPI_NAME_SIZE + 16]; char instance_str[16]; ACPI_FILE file; - size_t actual; + acpi_size actual; u32 table_length; /* Obtain table length */ @@ -158,37 +158,36 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance) /* Handle multiple SSDts - create different filenames for each */ if (instance > 0) { - acpi_ut_snprintf(instance_str, sizeof(instance_str), "%u", - instance); + snprintf(instance_str, sizeof(instance_str), "%u", instance); strcat(filename, instance_str); } strcat(filename, FILE_SUFFIX_BINARY_TABLE); if (gbl_verbose_mode) { - acpi_log_error - ("Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n", - table->signature, filename, table->length, table->length); + fprintf(stderr, + "Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n", + table->signature, filename, table->length, + table->length); } /* Open the file and dump the entire table in binary mode */ - file = acpi_os_open_file(filename, - ACPI_FILE_WRITING | ACPI_FILE_BINARY); + file = fopen(filename, "wb"); if (!file) { - acpi_log_error("Could not open output file: %s\n", filename); + fprintf(stderr, "Could not open output file: %s\n", filename); return (-1); } - actual = acpi_os_write_file(file, table, 1, table_length); + actual = fwrite(table, 1, table_length, file); if (actual != table_length) { - acpi_log_error("Error writing binary output file: %s\n", - filename); - acpi_os_close_file(file); + fprintf(stderr, "Error writing binary output file: %s\n", + filename); + fclose(file); return (-1); } - acpi_os_close_file(file); + fclose(file); return (0); } @@ -211,14 +210,13 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname, struct acpi_table_header *buffer = NULL; ACPI_FILE file; u32 file_size; - size_t actual; + acpi_size actual; /* Must use binary mode */ - file = - acpi_os_open_file(pathname, ACPI_FILE_READING | ACPI_FILE_BINARY); + file = fopen(pathname, "rb"); if (!file) { - acpi_log_error("Could not open input file: %s\n", pathname); + fprintf(stderr, "Could not open input file: %s\n", pathname); return (NULL); } @@ -226,7 +224,8 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname, file_size = cm_get_file_size(file); if (file_size == ACPI_UINT32_MAX) { - acpi_log_error("Could not get input file size: %s\n", pathname); + fprintf(stderr, + "Could not get input file size: %s\n", pathname); goto cleanup; } @@ -234,16 +233,17 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname, buffer = ACPI_ALLOCATE_ZEROED(file_size); if (!buffer) { - acpi_log_error("Could not allocate file buffer of size: %u\n", - file_size); + fprintf(stderr, + "Could not allocate file buffer of size: %u\n", + file_size); goto cleanup; } /* Read the entire file */ - actual = acpi_os_read_file(file, buffer, 1, file_size); + actual = fread(buffer, 1, file_size, file); if (actual != file_size) { - acpi_log_error("Could not read input file: %s\n", pathname); + fprintf(stderr, "Could not read input file: %s\n", pathname); ACPI_FREE(buffer); buffer = NULL; goto cleanup; @@ -252,6 +252,6 @@ struct acpi_table_header *ap_get_table_from_file(char *pathname, *out_file_size = file_size; cleanup: - acpi_os_close_file(file); + fclose(file); return (buffer); } diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c index 7692e6b887e1..7ff46be908f0 100644 --- a/tools/power/acpi/tools/acpidump/apmain.c +++ b/tools/power/acpi/tools/acpidump/apmain.c @@ -43,7 +43,6 @@ #define _DECLARE_GLOBALS #include "acpidump.h" -#include "acapps.h" /* * acpidump - A portable utility for obtaining system ACPI tables and dumping @@ -140,8 +139,8 @@ static int ap_insert_action(char *argument, u32 to_be_done) current_action++; if (current_action > AP_MAX_ACTIONS) { - acpi_log_error("Too many table options (max %u)\n", - AP_MAX_ACTIONS); + fprintf(stderr, "Too many table options (max %u)\n", + AP_MAX_ACTIONS); return (-1); } @@ -186,9 +185,9 @@ static int ap_do_options(int argc, char **argv) } else if (!strcmp(acpi_gbl_optarg, "off")) { gbl_dump_customized_tables = FALSE; } else { - acpi_log_error - ("%s: Cannot handle this switch, please use on|off\n", - acpi_gbl_optarg); + fprintf(stderr, + "%s: Cannot handle this switch, please use on|off\n", + acpi_gbl_optarg); return (-1); } continue; @@ -209,13 +208,13 @@ static int ap_do_options(int argc, char **argv) case 'r': /* Dump tables from specified RSDP */ status = - acpi_ut_strtoul64(acpi_gbl_optarg, ACPI_ANY_BASE, - ACPI_MAX64_BYTE_WIDTH, + acpi_ut_strtoul64(acpi_gbl_optarg, + ACPI_STRTOUL_64BIT, &gbl_rsdp_base); if (ACPI_FAILURE(status)) { - acpi_log_error - ("%s: Could not convert to a physical address\n", - acpi_gbl_optarg); + fprintf(stderr, + "%s: Could not convert to a physical address\n", + acpi_gbl_optarg); return (-1); } continue; @@ -242,7 +241,7 @@ static int ap_do_options(int argc, char **argv) case 'z': /* Verbose mode */ gbl_verbose_mode = TRUE; - acpi_log_error(ACPI_COMMON_SIGNON(AP_UTILITY_NAME)); + fprintf(stderr, ACPI_COMMON_SIGNON(AP_UTILITY_NAME)); continue; /* @@ -315,6 +314,7 @@ int ACPI_SYSTEM_XFACE acpi_main(int argc, char *argv[]) ACPI_DEBUG_INITIALIZE(); /* For debug version only */ acpi_os_initialize(); gbl_output_file = ACPI_FILE_OUT; + acpi_gbl_integer_byte_width = 8; /* Process command line options */ @@ -353,8 +353,9 @@ int ACPI_SYSTEM_XFACE acpi_main(int argc, char *argv[]) default: - acpi_log_error("Internal error, invalid action: 0x%X\n", - action->to_be_done); + fprintf(stderr, + "Internal error, invalid action: 0x%X\n", + action->to_be_done); return (-1); } @@ -369,12 +370,12 @@ int ACPI_SYSTEM_XFACE acpi_main(int argc, char *argv[]) /* Summary for the output file */ file_size = cm_get_file_size(gbl_output_file); - acpi_log_error - ("Output file %s contains 0x%X (%u) bytes\n\n", - gbl_output_filename, file_size, file_size); + fprintf(stderr, + "Output file %s contains 0x%X (%u) bytes\n\n", + gbl_output_filename, file_size, file_size); } - acpi_os_close_file(gbl_output_file); + fclose(gbl_output_file); } return (status); |