From a9d887dc1c60ed67f2271d66560cdcf864c4a578 Mon Sep 17 00:00:00 2001 From: Guru Das Srinagesh Date: Tue, 2 Jun 2020 15:31:16 -0700 Subject: pwm: Convert period and duty cycle to u64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Because period and duty cycle are defined as ints with units of nanoseconds, the maximum time duration that can be set is limited to ~2.147 seconds. Change their definitions to u64 in the structs of the PWM framework so that higher durations may be set. Also use the right format specifiers in debug prints in both core.c, pwm-stm32-lp.c as well as video/fbdev/ssd1307fb.c. Reported-by: kbuild test robot Signed-off-by: Guru Das Srinagesh Acked-by: Uwe Kleine-König Signed-off-by: Thierry Reding --- include/linux/pwm.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2635b2a55090..a13ff383fa1d 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -39,7 +39,7 @@ enum pwm_polarity { * current PWM hardware state. */ struct pwm_args { - unsigned int period; + u64 period; enum pwm_polarity polarity; }; @@ -56,8 +56,8 @@ enum { * @enabled: PWM enabled status */ struct pwm_state { - unsigned int period; - unsigned int duty_cycle; + u64 period; + u64 duty_cycle; enum pwm_polarity polarity; bool enabled; }; @@ -107,13 +107,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm) return state.enabled; } -static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +static inline void pwm_set_period(struct pwm_device *pwm, u64 period) { if (pwm) pwm->state.period = period; } -static inline unsigned int pwm_get_period(const struct pwm_device *pwm) +static inline u64 pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; @@ -128,7 +128,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) pwm->state.duty_cycle = duty; } -static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) +static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; -- cgit v1.2.3 From e0bcc58d876c6ece9720310509a908b7637e37cf Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Wed, 3 Jun 2020 14:54:36 +0200 Subject: mfd: stm32: Add defines to be used for clkevent purpose Add defines to be able to enable/clear irq and configure one shot mode. Signed-off-by: Benjamin Gaignard Signed-off-by: Lee Jones --- include/linux/mfd/stm32-lptimer.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h index 605f62264825..90b20550c1c8 100644 --- a/include/linux/mfd/stm32-lptimer.h +++ b/include/linux/mfd/stm32-lptimer.h @@ -27,10 +27,15 @@ #define STM32_LPTIM_CMPOK BIT(3) /* STM32_LPTIM_ICR - bit fields */ +#define STM32_LPTIM_ARRMCF BIT(1) #define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3) +/* STM32_LPTIM_IER - bit flieds */ +#define STM32_LPTIM_ARRMIE BIT(1) + /* STM32_LPTIM_CR - bit fields */ #define STM32_LPTIM_CNTSTRT BIT(2) +#define STM32_LPTIM_SNGSTRT BIT(1) #define STM32_LPTIM_ENABLE BIT(0) /* STM32_LPTIM_CFGR - bit fields */ -- cgit v1.2.3 From 7f8a137f736f7366820c485c5a0d34d65b9d0125 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Mon, 15 Jun 2020 14:53:22 +0100 Subject: mfd: madera: Remove unused forward declaration of madera_codec_pdata This forward declaration is redundant since the header including the full data structure is included. Signed-off-by: Charles Keepax Signed-off-by: Lee Jones --- include/linux/mfd/madera/pdata.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index fa9595dd42ba..601cbbc10370 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -21,7 +21,6 @@ struct gpio_desc; struct pinctrl_map; -struct madera_codec_pdata; /** * struct madera_pdata - Configuration data for Madera devices -- cgit v1.2.3 From 6c27219e34911601955b72c754adfc11c527ba7b Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 8 Jun 2020 11:17:36 +0200 Subject: mfd: Add support for the Khadas System control Microcontroller This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and Edge boards. It has multiple boot control features like password check, power-on options, power-off control and system FAN control on recent boards. This implements a very basic MFD driver with the fan control and User NVMEM cells. Signed-off-by: Neil Armstrong Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 21 ++++++ drivers/mfd/Makefile | 1 + drivers/mfd/khadas-mcu.c | 142 +++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/khadas-mcu.h | 91 ++++++++++++++++++++++++++ 4 files changed, 255 insertions(+) create mode 100644 drivers/mfd/khadas-mcu.c create mode 100644 include/linux/mfd/khadas-mcu.h (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a37d7d171382..d13bb0abfd6f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2053,6 +2053,27 @@ config MFD_WCD934X This driver provides common support WCD934x audio codec and its associated Pin Controller, Soundwire Controller and Audio codec. +config MFD_KHADAS_MCU + tristate "Support for Khadas System control Microcontroller" + depends on I2C + depends on ARCH_MESON || ARCH_ROCKCHIP || COMPILE_TEST + select MFD_CORE + select REGMAP_I2C + help + Support for the Khadas System control Microcontroller interface + present on their VIM and Edge boards. + + This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and + Edge boards. + + It provides multiple boot control features like password check, + power-on options, power-off control and system FAN control on recent + boards. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the functionality + of the device. + menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 9367a92f795a..1c8d6be3347d 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -262,5 +262,6 @@ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o obj-$(CONFIG_MFD_STMFX) += stmfx.o +obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c new file mode 100644 index 000000000000..44d5bb462dab --- /dev/null +++ b/drivers/mfd/khadas-mcu.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Khadas System control Microcontroller + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong + */ +#include +#include +#include +#include +#include +#include + +static bool khadas_mcu_reg_volatile(struct device *dev, unsigned int reg) +{ + if (reg >= KHADAS_MCU_USER_DATA_0_REG && + reg < KHADAS_MCU_PWR_OFF_CMD_REG) + return true; + + switch (reg) { + case KHADAS_MCU_PWR_OFF_CMD_REG: + case KHADAS_MCU_PASSWD_START_REG: + case KHADAS_MCU_CHECK_VEN_PASSWD_REG: + case KHADAS_MCU_CHECK_USER_PASSWD_REG: + case KHADAS_MCU_WOL_INIT_START_REG: + case KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG: + return true; + default: + return false; + } +} + +static bool khadas_mcu_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case KHADAS_MCU_PASSWD_VEN_0_REG: + case KHADAS_MCU_PASSWD_VEN_1_REG: + case KHADAS_MCU_PASSWD_VEN_2_REG: + case KHADAS_MCU_PASSWD_VEN_3_REG: + case KHADAS_MCU_PASSWD_VEN_4_REG: + case KHADAS_MCU_PASSWD_VEN_5_REG: + case KHADAS_MCU_MAC_0_REG: + case KHADAS_MCU_MAC_1_REG: + case KHADAS_MCU_MAC_2_REG: + case KHADAS_MCU_MAC_3_REG: + case KHADAS_MCU_MAC_4_REG: + case KHADAS_MCU_MAC_5_REG: + case KHADAS_MCU_USID_0_REG: + case KHADAS_MCU_USID_1_REG: + case KHADAS_MCU_USID_2_REG: + case KHADAS_MCU_USID_3_REG: + case KHADAS_MCU_USID_4_REG: + case KHADAS_MCU_USID_5_REG: + case KHADAS_MCU_VERSION_0_REG: + case KHADAS_MCU_VERSION_1_REG: + case KHADAS_MCU_DEVICE_NO_0_REG: + case KHADAS_MCU_DEVICE_NO_1_REG: + case KHADAS_MCU_FACTORY_TEST_REG: + case KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG: + return false; + default: + return true; + } +} + +static const struct regmap_config khadas_mcu_regmap_config = { + .reg_bits = 8, + .reg_stride = 1, + .val_bits = 8, + .max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG, + .volatile_reg = khadas_mcu_reg_volatile, + .writeable_reg = khadas_mcu_reg_writeable, + .cache_type = REGCACHE_RBTREE, +}; + +static struct mfd_cell khadas_mcu_fan_cells[] = { + /* VIM1/2 Rev13+ and VIM3 only */ + { .name = "khadas-mcu-fan-ctrl", }, +}; + +static struct mfd_cell khadas_mcu_cells[] = { + { .name = "khadas-mcu-user-mem", }, +}; + +static int khadas_mcu_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct khadas_mcu *ddata; + int ret; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + i2c_set_clientdata(client, ddata); + + ddata->dev = dev; + + ddata->regmap = devm_regmap_init_i2c(client, &khadas_mcu_regmap_config); + if (IS_ERR(ddata->regmap)) { + ret = PTR_ERR(ddata->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", ret); + return ret; + } + + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_cells, + ARRAY_SIZE(khadas_mcu_cells), + NULL, 0, NULL); + if (ret) + return ret; + + if (of_find_property(dev->of_node, "#cooling-cells", NULL)) + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_fan_cells, + ARRAY_SIZE(khadas_mcu_fan_cells), + NULL, 0, NULL); + + return 0; +} + +static const struct of_device_id khadas_mcu_of_match[] = { + { .compatible = "khadas,mcu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, khadas_mcu_of_match); + +static struct i2c_driver khadas_mcu_driver = { + .driver = { + .name = "khadas-mcu-core", + .of_match_table = of_match_ptr(khadas_mcu_of_match), + }, + .probe = khadas_mcu_probe, +}; +module_i2c_driver(khadas_mcu_driver); + +MODULE_DESCRIPTION("Khadas MCU core driver"); +MODULE_AUTHOR("Neil Armstrong "); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h new file mode 100644 index 000000000000..a99ba2ed0e4e --- /dev/null +++ b/include/linux/mfd/khadas-mcu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Khadas System control Microcontroller Register map + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong + */ + +#ifndef MFD_KHADAS_MCU_H +#define MFD_KHADAS_MCU_H + +#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */ +#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */ +#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */ +#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */ +#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */ +#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */ +#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */ +#define KHADAS_MCU_USID_0_REG 0x0c /* RO */ +#define KHADAS_MCU_USID_1_REG 0x0d /* RO */ +#define KHADAS_MCU_USID_2_REG 0x0e /* RO */ +#define KHADAS_MCU_USID_3_REG 0x0f /* RO */ +#define KHADAS_MCU_USID_4_REG 0x10 /* RO */ +#define KHADAS_MCU_USID_5_REG 0x11 /* RO */ +#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */ +#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */ +#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */ +#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */ +#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */ +#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */ +#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */ +#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */ +#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */ +#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */ +#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */ +#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */ +#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */ +#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */ +#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */ +#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */ +#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */ +#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */ +#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */ +#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */ +#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */ +#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */ +#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */ +#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */ +#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */ +#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */ +#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */ +#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */ +#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */ +#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */ +#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */ +#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */ +#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */ +#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */ +#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */ +#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */ +#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */ +#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */ +#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */ + +enum { + KHADAS_BOARD_VIM1 = 0x1, + KHADAS_BOARD_VIM2, + KHADAS_BOARD_VIM3, + KHADAS_BOARD_EDGE = 0x11, + KHADAS_BOARD_EDGE_V, +}; + +/** + * struct khadas_mcu - Khadas MCU structure + * @device: device reference used for logs + * @regmap: register map + */ +struct khadas_mcu { + struct device *dev; + struct regmap *regmap; +}; + +#endif /* MFD_KHADAS_MCU_H */ -- cgit v1.2.3 From e32b16c31339e37d3cdc814f2773b74c1dbf660c Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Thu, 28 May 2020 04:36:03 -0700 Subject: platform/chrome: cros_ec: Update mux state bits Sync the EC_CMD_USB_PD_MUX_INFO mux state bit fields with the Chrome EC code base. The newly added bit fields will be used for cros-ec-typec mux control. Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra --- include/linux/platform_data/cros_ec_commands.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 69210881ebac..a7b0fc440c35 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -5207,11 +5207,15 @@ struct ec_params_usb_pd_mux_info { } __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ -#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ -#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ -#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ -#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_NONE 0 /* Open switch */ +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */ +#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */ +#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ -- cgit v1.2.3 From 5e48a03bb9bff1728164040d71aa03cdb3cdfca2 Mon Sep 17 00:00:00 2001 From: Prashant Malani Date: Wed, 24 Jun 2020 01:09:23 -0700 Subject: platform/chrome: cros_ec: Add TBT pd_ctrl fields To support Thunderbolt compatibility mode, synchronize ec_response_usb_pd_control_v2 with the Chrome EC version, so that we get the Thunderbolt related control fields and macros. Signed-off-by: Prashant Malani Signed-off-by: Enric Balletbo i Serra --- include/linux/platform_data/cros_ec_commands.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index a7b0fc440c35..b808570bdd04 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 { #define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ #define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ +/* Active/Passive Cable */ +#define USB_PD_CTRL_ACTIVE_CABLE BIT(0) +/* Optical/Non-optical cable */ +#define USB_PD_CTRL_OPTICAL_CABLE BIT(1) +/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */ +#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2) +/* Active Link Uni-Direction */ +#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3) + struct ec_response_usb_pd_control_v2 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; - uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ - uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ - /* CL:1500994 Current cable type */ - uint8_t reserved_cable_type; + uint8_t cc_state; /* enum pd_cc_states representing cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + uint8_t reserved; /* Reserved for future use */ + uint8_t control_flags; /* USB_PD_CTRL_*flags */ + uint8_t cable_speed; /* TBT_SS_* cable speed */ + uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */ } __ec_align1; #define EC_CMD_USB_PD_PORTS 0x0102 -- cgit v1.2.3 From 33d226f504ed72cba3a2b42bbe2a993b3d6d9548 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 02:25:06 +0200 Subject: mtd: nand: Move nand_device forward declaration to the top This structure might be used earlier in this file, let's move the forward declaration at the top. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529002517.3546-10-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 0c7483843a32..a1f38c778d0e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -12,6 +12,8 @@ #include +struct nand_device; + /** * struct nand_memory_organization - Memory organization structure * @bits_per_cell: number of bits per NAND cell @@ -133,8 +135,6 @@ struct nand_bbt { unsigned long *cache; }; -struct nand_device; - /** * struct nand_ops - NAND operations * @erase: erase a specific block. No need to check if the block is bad before -- cgit v1.2.3 From 85f54c5588885cc3b5be4a07498dd0755de9f5cf Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 02:25:10 +0200 Subject: mtd: nand: Rename a core structure Prepare the migration to a generic ECC engine by renaming the nand_ecc_req structure into nand_ecc_props. This structure will be the base of a wider 'nand_ecc' structure. In nand_device, these properties are still named "eccreq" even if "eccprops" might be more descriptive. This is just a transition step, this field is being replaced very soon by a much wider structure. The impact of renaming this field would be huge compared to its interest. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529002517.3546-14-miquel.raynal@bootlin.com --- include/linux/mtd/nand.h | 8 ++++---- include/linux/mtd/spinand.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index a1f38c778d0e..af99041ceaa9 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -116,11 +116,11 @@ struct nand_page_io_req { }; /** - * struct nand_ecc_req - NAND ECC requirements + * struct nand_ecc_props - NAND ECC properties * @strength: ECC strength - * @step_size: ECC step/block size + * @step_size: Number of bytes per step */ -struct nand_ecc_req { +struct nand_ecc_props { unsigned int strength; unsigned int step_size; }; @@ -179,7 +179,7 @@ struct nand_ops { struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 1077c45721ff..7b78c4ba9b3e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -309,7 +309,7 @@ struct spinand_info { struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct spinand_ecc_info eccinfo; struct { const struct spinand_op_variants *read_cache; -- cgit v1.2.3 From c4cabc08d09e4b107b685e08bdec8a38a91089d8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:55 +0200 Subject: mtd: rawnand: Use unsigned types for nand_chip unsigned values page_shift, phys_erase_shift, bbt_erase_shift, chip_shift, pagemask, subpagesize and badblockbits are all positive values, so declare them as unsigned. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-2-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 65b1c1c18b41..830f2d08937f 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1110,11 +1110,11 @@ struct nand_chip { unsigned int options; unsigned int bbt_options; - int page_shift; - int phys_erase_shift; - int bbt_erase_shift; - int chip_shift; - int pagemask; + unsigned int page_shift; + unsigned int phys_erase_shift; + unsigned int bbt_erase_shift; + unsigned int chip_shift; + unsigned int pagemask; u8 *data_buf; struct { @@ -1122,10 +1122,10 @@ struct nand_chip { int page; } pagecache; - int subpagesize; + unsigned int subpagesize; int onfi_timing_mode_default; unsigned int badblockpos; - int badblockbits; + unsigned int badblockbits; struct nand_id id; struct nand_parameters parameters; -- cgit v1.2.3 From d1f3837a507d73746f9e2118fad20ee5e57e86cc Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:56 +0200 Subject: mtd: rawnand: Only use u8 instead of uint8_t in nand_chip structure Mechanical change to avoid using old types. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-3-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 830f2d08937f..cea137778224 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1141,13 +1141,13 @@ struct nand_chip { int (*suspend)(struct nand_chip *chip); void (*resume)(struct nand_chip *chip); - uint8_t *oob_poi; + u8 *oob_poi; struct nand_controller *controller; struct nand_ecc_ctrl ecc; unsigned long buf_align; - uint8_t *bbt; + u8 *bbt; struct nand_bbt_descr *bbt_td; struct nand_bbt_descr *bbt_md; -- cgit v1.2.3 From 8e8b2706e15d16443b1ea61a6f994c08ec5b9486 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:57 +0200 Subject: mtd: rawnand: Create a nand_chip operations structure And move nand_chip hooks there. While moving entries from one structure to the other, adapt the documentation style. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-4-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 20 ++++++++++---------- drivers/mtd/nand/raw/nand_hynix.c | 2 +- drivers/mtd/nand/raw/nand_macronix.c | 10 +++++----- drivers/mtd/nand/raw/nand_micron.c | 2 +- include/linux/mtd/rawnand.h | 32 ++++++++++++++++++-------------- 5 files changed, 35 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 45124dbb1835..d9cb71e7c0ed 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -3215,10 +3215,10 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) if (retry_mode >= chip->read_retries) return -EINVAL; - if (!chip->setup_read_retry) + if (!chip->ops.setup_read_retry) return -EOPNOTSUPP; - return chip->setup_read_retry(chip, retry_mode); + return chip->ops.setup_read_retry(chip, retry_mode); } static void nand_wait_readrdy(struct nand_chip *chip) @@ -4462,8 +4462,8 @@ static int nand_suspend(struct mtd_info *mtd) int ret = 0; mutex_lock(&chip->lock); - if (chip->suspend) - ret = chip->suspend(chip); + if (chip->ops.suspend) + ret = chip->ops.suspend(chip); if (!ret) chip->suspended = 1; mutex_unlock(&chip->lock); @@ -4481,8 +4481,8 @@ static void nand_resume(struct mtd_info *mtd) mutex_lock(&chip->lock); if (chip->suspended) { - if (chip->resume) - chip->resume(chip); + if (chip->ops.resume) + chip->ops.resume(chip); chip->suspended = 0; } else { pr_err("%s called for a chip which is not in suspended state\n", @@ -4511,10 +4511,10 @@ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->lock_area) + if (!chip->ops.lock_area) return -ENOTSUPP; - return chip->lock_area(chip, ofs, len); + return chip->ops.lock_area(chip, ofs, len); } /** @@ -4527,10 +4527,10 @@ static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->unlock_area) + if (!chip->ops.unlock_area) return -ENOTSUPP; - return chip->unlock_area(chip, ofs, len); + return chip->ops.unlock_area(chip, ofs, len); } /* Set default functions */ diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index 7caedaa5b9e5..7d1be53f27f3 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -337,7 +337,7 @@ static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip, rr->nregs = nregs; rr->regs = hynix_1xnm_mlc_read_retry_regs; hynix->read_retry = rr; - chip->setup_read_retry = hynix_nand_setup_read_retry; + chip->ops.setup_read_retry = hynix_nand_setup_read_retry; chip->read_retries = nmodes; out: diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 09c254c97b5c..1472f925f386 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -130,7 +130,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip) return; chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES; - chip->setup_read_retry = macronix_nand_setup_read_retry; + chip->ops.setup_read_retry = macronix_nand_setup_read_retry; if (p->supports_set_get_features) { bitmap_set(p->set_feature_list, @@ -242,8 +242,8 @@ static void macronix_nand_block_protection_support(struct nand_chip *chip) bitmap_set(chip->parameters.set_feature_list, ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1); - chip->lock_area = mxic_nand_lock; - chip->unlock_area = mxic_nand_unlock; + chip->ops.lock_area = mxic_nand_lock; + chip->ops.unlock_area = mxic_nand_unlock; } static int nand_power_down_op(struct nand_chip *chip) @@ -312,8 +312,8 @@ static void macronix_nand_deep_power_down_support(struct nand_chip *chip) if (i < 0) return; - chip->suspend = mxic_nand_suspend; - chip->resume = mxic_nand_resume; + chip->ops.suspend = mxic_nand_suspend; + chip->ops.resume = mxic_nand_resume; } static int macronix_nand_init(struct nand_chip *chip) diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 3589b4fce0d4..4385092a9325 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -84,7 +84,7 @@ static int micron_nand_onfi_init(struct nand_chip *chip) struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor; chip->read_retries = micron->read_retry_options; - chip->setup_read_retry = micron_nand_setup_read_retry; + chip->ops.setup_read_retry = micron_nand_setup_read_retry; } if (p->supports_set_get_features) { diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index cea137778224..7f9be95ca8dc 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1027,16 +1027,31 @@ struct nand_legacy { struct nand_controller dummy_controller; }; +/** + * struct nand_chip_ops - NAND chip operations + * @suspend: Suspend operation + * @resume: Resume operation + * @lock_area: Lock operation + * @unlock_area: Unlock operation + * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + */ +struct nand_chip_ops { + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device + * @ops: NAND chip operations * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those * fields/hooks, you should consider reworking the driver * avoid using them. - * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for - * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform * @oob_poi: "poison value buffer," used for laying out OOB data @@ -1081,8 +1096,6 @@ struct nand_legacy { * @lock: lock protecting the suspended field. Also used to * serialize accesses to the NAND device. * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @suspend: [REPLACEABLE] specific NAND device suspend operation - * @resume: [REPLACEABLE] specific NAND device resume operation * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1096,17 +1109,13 @@ struct nand_legacy { * @manufacturer: [INTERN] Contains manufacturer information * @manufacturer.desc: [INTERN] Contains manufacturer's description * @manufacturer.priv: [INTERN] Contains manufacturer private information - * @lock_area: [REPLACEABLE] specific NAND chip lock operation - * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation */ struct nand_chip { struct nand_device base; - + struct nand_chip_ops ops; struct nand_legacy legacy; - int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); - unsigned int options; unsigned int bbt_options; @@ -1138,8 +1147,6 @@ struct nand_chip { struct mutex lock; unsigned int suspended : 1; - int (*suspend)(struct nand_chip *chip); - void (*resume)(struct nand_chip *chip); u8 *oob_poi; struct nand_controller *controller; @@ -1159,9 +1166,6 @@ struct nand_chip { const struct nand_manufacturer *desc; void *priv; } manufacturer; - - int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); - int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3 From 271de009b7c0c1c15f63491a352ab08835462977 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:58 +0200 Subject: mtd: rawnand: Rename the manufacturer structure It is currently called nand_manufacturer but could actually be called nand_manufacturer_desc, like its instances, so that the former name is left unused for now. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-5-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 6 +++--- drivers/mtd/nand/raw/nand_base.c | 14 +++++++------- drivers/mtd/nand/raw/nand_ids.c | 16 ++++++++-------- include/linux/mtd/rawnand.h | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 03866b0aadea..a518acfd9b3f 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -53,12 +53,12 @@ struct nand_manufacturer_ops { }; /** - * struct nand_manufacturer - NAND Flash Manufacturer structure + * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor * @name: Manufacturer name * @id: manufacturer ID code of device. * @ops: manufacturer operations */ -struct nand_manufacturer { +struct nand_manufacturer_desc { int id; char *name; const struct nand_manufacturer_ops *ops; @@ -79,7 +79,7 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct mtd_pairing_scheme dist3_pairing_scheme; /* Core functions */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id); +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id); int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d9cb71e7c0ed..534ee75d0f2b 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -4810,9 +4810,9 @@ static void nand_manufacturer_cleanup(struct nand_chip *chip) } static const char * -nand_manufacturer_name(const struct nand_manufacturer *manufacturer) +nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) { - return manufacturer ? manufacturer->name : "Unknown"; + return manufacturer_desc ? manufacturer_desc->name : "Unknown"; } /* @@ -4820,7 +4820,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) */ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { - const struct nand_manufacturer *manufacturer; + const struct nand_manufacturer_desc *manufacturer_desc; struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; int busw, ret; @@ -4877,8 +4877,8 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); /* Try to identify manufacturer */ - manufacturer = nand_get_manufacturer(maf_id); - chip->manufacturer.desc = manufacturer; + manufacturer_desc = nand_get_manufacturer_desc(maf_id); + chip->manufacturer.desc = manufacturer_desc; if (!type) type = nand_flash_ids; @@ -4957,7 +4957,7 @@ ident_done: */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), mtd->name); pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); @@ -4992,7 +4992,7 @@ ident_done: pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), chip->parameters.model); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index ba27902fc54b..e0dbc2e316c7 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -166,7 +166,7 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -static const struct nand_manufacturer nand_manufacturers[] = { +static const struct nand_manufacturer_desc nand_manufacturer_descs[] = { {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, {NAND_MFR_ATO, "ATO"}, {NAND_MFR_EON, "Eon"}, @@ -186,20 +186,20 @@ static const struct nand_manufacturer nand_manufacturers[] = { }; /** - * nand_get_manufacturer - Get manufacturer information from the manufacturer - * ID + * nand_get_manufacturer_desc - Get manufacturer information from the + * manufacturer ID * @id: manufacturer ID * - * Returns a pointer a nand_manufacturer object if the manufacturer is defined + * Returns a nand_manufacturer_desc object if the manufacturer is defined * in the NAND manufacturers database, NULL otherwise. */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id) +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id) { int i; - for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++) - if (nand_manufacturers[i].id == id) - return &nand_manufacturers[i]; + for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++) + if (nand_manufacturer_descs[i].id == id) + return &nand_manufacturer_descs[i]; return NULL; } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 7f9be95ca8dc..860d3c1020ef 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1163,7 +1163,7 @@ struct nand_chip { void *priv; struct { - const struct nand_manufacturer *desc; + const struct nand_manufacturer_desc *desc; void *priv; } manufacturer; }; -- cgit v1.2.3 From 36017af430e6b2fad0b2ee5476103706160f1379 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:12:59 +0200 Subject: mtd: rawnand: Declare the nand_manufacturer structure out of nand_chip Now that struct nand_manufacturer type is free, use it to store the nand_manufacturer_desc and the manufacturer's private data. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-6-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 860d3c1020ef..a3dfa36a9fd5 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1043,10 +1043,21 @@ struct nand_chip_ops { int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); }; +/** + * struct nand_manufacturer - NAND manufacturer structure + * @desc: The manufacturer description + * @priv: Private information for the manufacturer driver + */ +struct nand_manufacturer { + const struct nand_manufacturer_desc *desc; + void *priv; +}; + /** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device * @ops: NAND chip operations + * @manufacturer: Manufacturer information * @legacy: All legacy fields/hooks. If you develop a new driver, * don't even try to use any of these fields/hooks, and if * you're modifying an existing driver that is using those @@ -1106,13 +1117,11 @@ struct nand_chip_ops { * structure which is shared among multiple independent * devices. * @priv: [OPTIONAL] pointer to private chip data - * @manufacturer: [INTERN] Contains manufacturer information - * @manufacturer.desc: [INTERN] Contains manufacturer's description - * @manufacturer.priv: [INTERN] Contains manufacturer private information */ struct nand_chip { struct nand_device base; + struct nand_manufacturer manufacturer; struct nand_chip_ops ops; struct nand_legacy legacy; @@ -1161,11 +1170,6 @@ struct nand_chip { struct nand_bbt_descr *badblock_pattern; void *priv; - - struct { - const struct nand_manufacturer_desc *desc; - void *priv; - } manufacturer; }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; -- cgit v1.2.3 From a63674c7cfe62221e05ba73107d9e15d73ff8bbd Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:00 +0200 Subject: mtd: rawnand: Reorganize the nand_chip structure Reorder fields in this structure and pack entries by theme: * The main descriptive structures * The data interface details * Bad block information * The device layout * Extra buffers matching the device layout * Internal values * External objects like the ECC controller, the ECC engine and a private data pointer. While at it, adapt the documentation style. I changed on purpose the description of @oob_poi which was weird. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-7-miquel.raynal@bootlin.com --- include/linux/mtd/rawnand.h | 166 ++++++++++++++++++++------------------------ 1 file changed, 76 insertions(+), 90 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a3dfa36a9fd5..544ec8736793 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1055,120 +1055,106 @@ struct nand_manufacturer { /** * struct nand_chip - NAND Private Flash Chip Data - * @base: Inherit from the generic NAND device - * @ops: NAND chip operations - * @manufacturer: Manufacturer information - * @legacy: All legacy fields/hooks. If you develop a new driver, - * don't even try to use any of these fields/hooks, and if - * you're modifying an existing driver that is using those - * fields/hooks, you should consider reworking the driver - * avoid using them. - * @ecc: [BOARDSPECIFIC] ECC control structure - * @buf_align: minimum buffer alignment required by a platform - * @oob_poi: "poison value buffer," used for laying out OOB data - * before writing - * @page_shift: [INTERN] number of address bits in a page (column - * address bits). - * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock - * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry - * @chip_shift: [INTERN] number of address bits in one chip - * @options: [BOARDSPECIFIC] various chip options. They can partly - * be set to inform nand_scan about special functionality. - * See the defines for further explanation. - * @bbt_options: [INTERN] bad block specific options. All options used - * here must come from bbm.h. By default, these options - * will be copied to the appropriate nand_bbt_descr's. - * @badblockpos: [INTERN] position of the bad block marker in the oob - * area. - * @badblockbits: [INTERN] minimum number of set bits in a good block's - * bad block marker position; i.e., BBM == 11110111b is - * not bad when badblockbits == 7 - * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is - * set to the actually used ONFI mode if the chip is - * ONFI compliant or deduced from the datasheet if - * the NAND chip is not ONFI compliant. - * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 - * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). - * @pagecache: Structure containing page cache related fields - * @pagecache.bitflips: Number of bitflips of the cached page - * @pagecache.page: Page number currently in the cache. -1 means no page is - * currently cached - * @subpagesize: [INTERN] holds the subpagesize - * @id: [INTERN] holds NAND ID - * @parameters: [INTERN] holds generic parameters under an easily - * readable form. - * @data_interface: [INTERN] NAND interface timing information - * @cur_cs: currently selected target. -1 means no target selected, - * otherwise we should always have cur_cs >= 0 && - * cur_cs < nanddev_ntargets(). NAND Controller drivers - * should not modify this value, but they're allowed to - * read it. - * @read_retries: [INTERN] the number of read retry modes supported - * @lock: lock protecting the suspended field. Also used to - * serialize accesses to the NAND device. - * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @bbt: [INTERN] bad block table pointer - * @bbt_td: [REPLACEABLE] bad block table descriptor for flash - * lookup. - * @bbt_md: [REPLACEABLE] bad block table mirror descriptor - * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial - * bad block scan. - * @controller: [REPLACEABLE] a pointer to a hardware controller - * structure which is shared among multiple independent - * devices. - * @priv: [OPTIONAL] pointer to private chip data + * @base: Inherit from the generic NAND device + * @id: Holds NAND ID + * @parameters: Holds generic parameters under an easily readable form + * @manufacturer: Manufacturer information + * @ops: NAND chip operations + * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try + * to use any of these fields/hooks, and if you're modifying an + * existing driver that is using those fields/hooks, you should + * consider reworking the driver and avoid using them. + * @options: Various chip options. They can partly be set to inform nand_scan + * about special functionality. See the defines for further + * explanation. + * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the + * actually used ONFI mode if the chip is ONFI + * compliant or deduced from the datasheet otherwise + * @data_interface: NAND interface timing information + * @bbt_erase_shift: Number of address bits in a bbt entry + * @bbt_options: Bad block table specific options. All options used here must + * come from bbm.h. By default, these options will be copied to + * the appropriate nand_bbt_descr's. + * @badblockpos: Bad block marker position in the oob area + * @badblockbits: Minimum number of set bits in a good block's bad block marker + * position; i.e., BBM = 11110111b is good when badblockbits = 7 + * @bbt_td: Bad block table descriptor for flash lookup + * @bbt_md: Bad block table mirror descriptor + * @badblock_pattern: Bad block scan pattern used for initial bad block scan + * @bbt: Bad block table pointer + * @page_shift: Number of address bits in a page (column address bits) + * @phys_erase_shift: Number of address bits in a physical eraseblock + * @chip_shift: Number of address bits in one chip + * @pagemask: Page number mask = number of (pages / chip) - 1 + * @subpagesize: Holds the subpagesize + * @data_buf: Buffer for data, size is (page size + oobsize) + * @oob_poi: pointer on the OOB area covered by data_buf + * @pagecache: Structure containing page cache related fields + * @pagecache.bitflips: Number of bitflips of the cached page + * @pagecache.page: Page number currently in the cache. -1 means no page is + * currently cached + * @buf_align: Minimum buffer alignment required by a platform + * @lock: Lock protecting the suspended field. Also used to serialize accesses + * to the NAND device + * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @cur_cs: Currently selected target. -1 means no target selected, otherwise we + * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). + * NAND Controller drivers should not modify this value, but they're + * allowed to read it. + * @read_retries: The number of read retry modes supported + * @controller: The hardware controller structure which is shared among multiple + * independent devices + * @ecc: The ECC controller structure + * @priv: Chip private data */ - struct nand_chip { struct nand_device base; + struct nand_id id; + struct nand_parameters parameters; struct nand_manufacturer manufacturer; struct nand_chip_ops ops; struct nand_legacy legacy; - unsigned int options; + + /* Data interface */ + int onfi_timing_mode_default; + struct nand_data_interface data_interface; + + /* Bad block information */ + unsigned int bbt_erase_shift; unsigned int bbt_options; + unsigned int badblockpos; + unsigned int badblockbits; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + struct nand_bbt_descr *badblock_pattern; + u8 *bbt; + /* Device internal layout */ unsigned int page_shift; unsigned int phys_erase_shift; - unsigned int bbt_erase_shift; unsigned int chip_shift; unsigned int pagemask; - u8 *data_buf; + unsigned int subpagesize; + /* Buffers */ + u8 *data_buf; + u8 *oob_poi; struct { unsigned int bitflips; int page; } pagecache; + unsigned long buf_align; - unsigned int subpagesize; - int onfi_timing_mode_default; - unsigned int badblockpos; - unsigned int badblockbits; - - struct nand_id id; - struct nand_parameters parameters; - - struct nand_data_interface data_interface; - - int cur_cs; - - int read_retries; - + /* Internals */ struct mutex lock; unsigned int suspended : 1; + int cur_cs; + int read_retries; - u8 *oob_poi; + /* Externals */ struct nand_controller *controller; - struct nand_ecc_ctrl ecc; - unsigned long buf_align; - - u8 *bbt; - struct nand_bbt_descr *bbt_td; - struct nand_bbt_descr *bbt_md; - - struct nand_bbt_descr *badblock_pattern; - void *priv; }; -- cgit v1.2.3 From e0160cd41fb81fde9ee4612a7ea2dfd631de2638 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:12 +0200 Subject: mtd: rawnand: Hide the chip->data_interface indirection As a preparation for allocating the data interface structure dynamically (and rename it), let's avoid accessing chip->data_interface directly. Instead, we introduce a helper, nand_get_interface_config(), and use it to retrieve the current data interface configuration out of a nand_chip object. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-19-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/marvell_nand.c | 9 ++++++--- drivers/mtd/nand/raw/meson_nand.c | 8 ++++---- drivers/mtd/nand/raw/nand_base.c | 34 +++++++++++++++++----------------- drivers/mtd/nand/raw/nand_legacy.c | 5 ++++- drivers/mtd/nand/raw/nand_toshiba.c | 2 +- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 2 +- drivers/mtd/nand/raw/tango_nand.c | 2 +- include/linux/mtd/rawnand.h | 11 +++++++++++ 8 files changed, 45 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 260a0430313e..df859889e4eb 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1096,6 +1096,8 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, const u8 *oob_buf, bool raw, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; @@ -1141,7 +1143,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, return ret; ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + PSEC_TO_MSEC(sdr->tPROG_max)); return ret; } @@ -1562,6 +1564,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct mtd_info *mtd = nand_to_mtd(chip); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; const u8 *data = buf; @@ -1598,8 +1602,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, marvell_nfc_wait_ndrun(chip); } - ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max)); marvell_nfc_disable_hw_ecc(chip); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 3f376471f3f7..580b7be0719f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -573,10 +573,10 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, int page, bool in) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(nand)); struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); u32 *addrs = nfc->cmdfifo.rw.addrs; u32 cs = nfc->param.chip_select; u32 cmd0, cmd_num, row_start; @@ -626,9 +626,9 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, static int meson_nfc_write_page_sub(struct nand_chip *nand, int page, int raw) { - struct mtd_info *mtd = nand_to_mtd(nand); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); + nand_get_sdr_timings(nand_get_interface_config(nand)); + struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); int data_len, info_len; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index b4de85794e07..7d393e1d0252 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -773,7 +773,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) return -ENOTSUPP; /* Wait tWB before polling the STATUS reg. */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); ret = nand_status_op(chip, NULL); @@ -1119,9 +1119,9 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[4]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1163,7 +1163,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int len) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[5]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1260,7 +1260,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PARAM, 0), NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1315,7 +1315,7 @@ int nand_change_read_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2] = {}; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDOUT, 0), @@ -1389,9 +1389,9 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len, bool prog) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[5] = {}; struct nand_op_instr instrs[] = { /* @@ -1514,7 +1514,7 @@ int nand_prog_page_end_op(struct nand_chip *chip) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1621,7 +1621,7 @@ int nand_change_write_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDIN, 0), @@ -1676,7 +1676,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READID, 0), NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1715,7 +1715,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_STATUS, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1784,7 +1784,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[3] = { page, page >> 8, page >> 16 }; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_ERASE1, 0), @@ -1843,7 +1843,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1890,7 +1890,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1947,7 +1947,7 @@ int nand_reset_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), @@ -3226,7 +3226,7 @@ static void nand_wait_readrdy(struct nand_chip *chip) if (!(chip->options & NAND_NEED_READRDY)) return; - sdr = nand_get_sdr_timings(&chip->data_interface); + sdr = nand_get_sdr_timings(nand_get_interface_config(chip)); WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 848403dcae03..fe769762e1d8 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -354,6 +354,9 @@ static void nand_command(struct nand_chip *chip, unsigned int command, static void nand_ccs_delay(struct nand_chip *chip) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); + /* * The controller already takes care of waiting for tCCS when the RNDIN * or RNDOUT command is sent, return directly. @@ -366,7 +369,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * (which should be safe for all NANDs). */ if (nand_controller_can_setup_data_iface(chip)) - ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000); + ndelay(sdr->tCCS_min / 1000); else ndelay(500); } diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index ae069905d7e4..333037bdca41 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -33,7 +33,7 @@ static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ, PSEC_TO_NSEC(sdr->tADL_min)), diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 65c9d17b25a3..7320c0fc19ec 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1308,7 +1308,7 @@ static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip, dev_warn(nfc->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); /* R/B# signal is low, clear high level flag */ diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index b3a0d08f1733..648dc7e77f6a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -336,7 +336,7 @@ static int tango_write_page(struct nand_chip *chip, const u8 *buf, if (err) return err; - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max)); if (err) return err; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 544ec8736793..0852df941130 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1203,6 +1203,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) return mtd_get_of_node(nand_to_mtd(chip)); } +/** + * nand_get_interface_config - Retrieve the current interface configuration + * of a NAND chip + * @chip: The NAND chip + */ +static inline const struct nand_data_interface * +nand_get_interface_config(struct nand_chip *chip) +{ + return &chip->data_interface; +} + /* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page -- cgit v1.2.3 From 4c46667b3d67253604ee42840917844548c86657 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:13 +0200 Subject: mtd: rawnand: s/data_interface/interface_config/ The name/suffix data_interface is a bit misleading in that the field or functions actually represent a configuration that can be applied by the controller/chip. Let's rename all fields/functions/hooks that are worth renaming. Signed-off-by: Boris Brezillon Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/ams-delta.c | 6 +-- drivers/mtd/nand/raw/arasan-nand-controller.c | 6 +-- drivers/mtd/nand/raw/atmel/nand-controller.c | 34 ++++++------- drivers/mtd/nand/raw/cadence-nand-controller.c | 6 +-- drivers/mtd/nand/raw/denali.c | 8 +-- drivers/mtd/nand/raw/fsmc_nand.c | 6 +-- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 6 +-- drivers/mtd/nand/raw/internals.h | 12 ++--- drivers/mtd/nand/raw/marvell_nand.c | 9 ++-- drivers/mtd/nand/raw/meson_nand.c | 6 +-- drivers/mtd/nand/raw/mtk_nand.c | 6 +-- drivers/mtd/nand/raw/mxc_nand.c | 20 ++++---- drivers/mtd/nand/raw/mxic_nand.c | 6 +-- drivers/mtd/nand/raw/nand_base.c | 69 +++++++++++++------------- drivers/mtd/nand/raw/nand_legacy.c | 2 +- drivers/mtd/nand/raw/nand_timings.c | 17 ++++--- drivers/mtd/nand/raw/s3c2410.c | 6 +-- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 4 +- drivers/mtd/nand/raw/sunxi_nand.c | 6 +-- drivers/mtd/nand/raw/tango_nand.c | 4 +- drivers/mtd/nand/raw/tegra_nand.c | 6 +-- include/linux/mtd/rawnand.h | 33 ++++++------ 22 files changed, 139 insertions(+), 139 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 3711e7a0436c..fdba155416d2 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -191,8 +191,8 @@ static int gpio_nand_exec_op(struct nand_chip *this, return ret; } -static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, - const struct nand_data_interface *cf) +static int gpio_nand_setup_interface(struct nand_chip *this, int csline, + const struct nand_interface_config *cf) { struct gpio_nand *priv = nand_get_controller_data(this); const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf); @@ -217,7 +217,7 @@ static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, static const struct nand_controller_ops gpio_nand_ops = { .exec_op = gpio_nand_exec_op, - .setup_data_interface = gpio_nand_setup_data_interface, + .setup_interface = gpio_nand_setup_interface, }; /* diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 7141dcccba3c..12c643e97c85 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -854,8 +854,8 @@ static int anfc_exec_op(struct nand_chip *chip, return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only); } -static int anfc_setup_data_interface(struct nand_chip *chip, int target, - const struct nand_data_interface *conf) +static int anfc_setup_interface(struct nand_chip *chip, int target, + const struct nand_interface_config *conf) { struct anand *anand = to_anand(chip); struct arasan_nfc *nfc = to_anfc(chip->controller); @@ -1083,7 +1083,7 @@ static void anfc_detach_chip(struct nand_chip *chip) static const struct nand_controller_ops anfc_ops = { .exec_op = anfc_exec_op, - .setup_data_interface = anfc_setup_data_interface, + .setup_interface = anfc_setup_interface, .attach_chip = anfc_attach_chip, .detach_chip = anfc_detach_chip, }; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 46a3724a788e..c9818f548d07 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -200,8 +200,8 @@ struct atmel_nand_controller_ops { void (*nand_init)(struct atmel_nand_controller *nc, struct atmel_nand *nand); int (*ecc_init)(struct nand_chip *chip); - int (*setup_data_interface)(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct atmel_nand *nand, int csline, + const struct nand_interface_config *conf); }; struct atmel_nand_controller_caps { @@ -1168,7 +1168,7 @@ static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip) } static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, - const struct nand_data_interface *conf, + const struct nand_interface_config *conf, struct atmel_smc_cs_conf *smcconf) { u32 ncycles, totalcycles, timeps, mckperiodps; @@ -1397,9 +1397,9 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, return 0; } -static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_smc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1422,9 +1422,9 @@ static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_hsmc_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1452,8 +1452,8 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int atmel_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct atmel_nand *nand = to_atmel_nand(chip); struct atmel_nand_controller *nc; @@ -1464,7 +1464,7 @@ static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY)) return -EINVAL; - return nc->caps->ops->setup_data_interface(nand, csline, conf); + return nc->caps->ops->setup_interface(nand, csline, conf); } static void atmel_nand_init(struct atmel_nand_controller *nc, @@ -1483,7 +1483,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, chip->legacy.write_buf = atmel_nand_write_buf; chip->legacy.select_chip = atmel_nand_select_chip; - if (!nc->mck || !nc->caps->ops->setup_data_interface) + if (!nc->mck || !nc->caps->ops->setup_interface) chip->options |= NAND_KEEP_TIMINGS; /* Some NANDs require a longer delay than the default one (20us). */ @@ -1956,7 +1956,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops atmel_nand_controller_ops = { .attach_chip = atmel_nand_attach_chip, - .setup_data_interface = atmel_nand_setup_data_interface, + .setup_interface = atmel_nand_setup_interface, }; static int atmel_nand_controller_init(struct atmel_nand_controller *nc, @@ -2318,7 +2318,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = { .remove = atmel_hsmc_nand_controller_remove, .ecc_init = atmel_hsmc_nand_ecc_init, .nand_init = atmel_hsmc_nand_init, - .setup_data_interface = atmel_hsmc_nand_setup_data_interface, + .setup_interface = atmel_hsmc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = { @@ -2375,10 +2375,10 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) /* * The SMC reg layout of at91rm9200 is completely different which prevents us - * from re-using atmel_smc_nand_setup_data_interface() for the - * ->setup_data_interface() hook. + * from re-using atmel_smc_nand_setup_interface() for the + * ->setup_interface() hook. * At this point, there's no support for the at91rm9200 SMC IP, so we leave - * ->setup_data_interface() unassigned. + * ->setup_interface() unassigned. */ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = { .probe = atmel_smc_nand_controller_probe, @@ -2399,7 +2399,7 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { .remove = atmel_smc_nand_controller_remove, .ecc_init = atmel_nand_ecc_init, .nand_init = atmel_smc_nand_init, - .setup_data_interface = atmel_smc_nand_setup_data_interface, + .setup_interface = atmel_smc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = { diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index c405722adfe1..574cbd3446e5 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2303,8 +2303,8 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, } static int -cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr; struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); @@ -2690,7 +2690,7 @@ static int cadence_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops cadence_nand_controller_ops = { .attach_chip = cadence_nand_attach_chip, .exec_op = cadence_nand_exec_op, - .setup_data_interface = cadence_nand_setup_data_interface, + .setup_interface = cadence_nand_setup_interface, }; static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 4e6e1578aa2d..9d99dade95ce 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -761,8 +761,8 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf, return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true); } -static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int denali_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { static const unsigned int data_setup_on_host = 10000; struct denali_controller *denali = to_denali_controller(chip); @@ -1173,7 +1173,7 @@ static int denali_exec_op(struct nand_chip *chip, static const struct nand_controller_ops denali_controller_ops = { .attach_chip = denali_attach_chip, .exec_op = denali_exec_op, - .setup_data_interface = denali_setup_data_interface, + .setup_interface = denali_setup_interface, }; int denali_chip_init(struct denali_controller *denali, @@ -1230,7 +1230,7 @@ int denali_chip_init(struct denali_controller *denali, chip->buf_align = 16; } - /* clk rate info is needed for setup_data_interface */ + /* clk rate info is needed for setup_interface */ if (!denali->clk_rate || !denali->clk_x_rate) chip->options |= NAND_KEEP_TIMINGS; diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 3909752b14c5..92ddc41d0ff0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -327,8 +327,8 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, return 0; } -static int fsmc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int fsmc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct fsmc_nand_data *host = nand_to_fsmc(nand); struct fsmc_nand_timings tims; @@ -951,7 +951,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops fsmc_nand_controller_ops = { .attach_chip = fsmc_nand_attach_chip, .exec_op = fsmc_exec_op, - .setup_data_interface = fsmc_setup_data_interface, + .setup_interface = fsmc_setup_interface, }; /** diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 061a8ddda275..5d4aee46cc55 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -736,8 +736,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) udelay(dll_wait_time_us); } -static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct gpmi_nand_data *this = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -2400,7 +2400,7 @@ unmap: static const struct nand_controller_ops gpmi_nand_controller_ops = { .attach_chip = gpmi_nand_attach_chip, - .setup_data_interface = gpmi_setup_data_interface, + .setup_interface = gpmi_setup_interface, .exec_op = gpmi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index f851ed210d70..114c63a6a349 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -84,10 +84,10 @@ int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt); -int onfi_fill_data_interface(struct nand_chip *chip, - struct nand_data_interface *iface, - enum nand_data_interface_type type, - unsigned int timing_mode); +int onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode); unsigned int onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); @@ -133,10 +133,10 @@ static inline int nand_exec_op(struct nand_chip *chip, return chip->controller->ops->exec_op(chip, op, false); } -static inline bool nand_controller_can_setup_data_iface(struct nand_chip *chip) +static inline bool nand_controller_can_setup_interface(struct nand_chip *chip) { if (!chip->controller || !chip->controller->ops || - !chip->controller->ops->setup_data_interface) + !chip->controller->ops->setup_interface) return false; if (chip->options & NAND_KEEP_TIMINGS) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index df859889e4eb..8482d3bd8b1f 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2308,9 +2308,8 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = bbt_mirror_pattern }; -static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface - *conf) +static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -2511,7 +2510,7 @@ static int marvell_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops marvell_nand_controller_ops = { .attach_chip = marvell_nand_attach_chip, .exec_op = marvell_nfc_exec_op, - .setup_data_interface = marvell_nfc_setup_data_interface, + .setup_interface = marvell_nfc_setup_interface, }; static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, @@ -2647,7 +2646,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, /* * Save a reference value for timing registers before - * ->setup_data_interface() is called. + * ->setup_interface() is called. */ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0); marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 580b7be0719f..0e5829a2b54f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1097,8 +1097,8 @@ static int meson_chip_buffer_init(struct nand_chip *nand) } static -int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +int meson_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); const struct nand_sdr_timings *timings; @@ -1222,7 +1222,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops meson_nand_controller_ops = { .attach_chip = meson_nand_attach_chip, .detach_chip = meson_nand_detach_chip, - .setup_data_interface = meson_nfc_setup_data_interface, + .setup_interface = meson_nfc_setup_interface, .exec_op = meson_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index ca8457626d53..ad1b55dab211 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -531,8 +531,8 @@ static int mtk_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtk_nfc *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *timings; @@ -1357,7 +1357,7 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops mtk_nfc_controller_ops = { .attach_chip = mtk_nfc_attach_chip, - .setup_data_interface = mtk_nfc_setup_data_interface, + .setup_interface = mtk_nfc_setup_interface, .exec_op = mtk_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 09dacb83cb5a..07c41e8bae2d 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -137,8 +137,8 @@ struct mxc_nand_devtype_data { u32 (*get_ecc_status)(struct mxc_nand_host *); const struct mtd_ooblayout_ops *ooblayout; void (*select_chip)(struct nand_chip *chip, int cs); - int (*setup_data_interface)(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf); void (*enable_hwecc)(struct nand_chip *chip, bool enable); /* @@ -1139,8 +1139,8 @@ static void preset_v1(struct mtd_info *mtd) writew(0x4, NFC_V1_V2_WRPROT); } -static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); int tRC_min_ns, tRC_ps, ret; @@ -1521,7 +1521,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { .get_ecc_status = get_ecc_status_v2, .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v2, - .setup_data_interface = mxc_nand_v2_setup_data_interface, + .setup_interface = mxc_nand_v2_setup_interface, .enable_hwecc = mxc_nand_enable_hwecc_v1_v2, .irqpending_quirk = 0, .needs_ip = 0, @@ -1738,17 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip) return 0; } -static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); - return host->devtype_data->setup_data_interface(chip, chipnr, conf); + return host->devtype_data->setup_interface(chip, chipnr, conf); } static const struct nand_controller_ops mxcnd_controller_ops = { .attach_chip = mxcnd_attach_chip, - .setup_data_interface = mxcnd_setup_data_interface, + .setup_interface = mxcnd_setup_interface, }; static int mxcnd_probe(struct platform_device *pdev) @@ -1809,7 +1809,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (err < 0) return err; - if (!host->devtype_data->setup_data_interface) + if (!host->devtype_data->setup_interface) this->options |= NAND_KEEP_TIMINGS; if (host->devtype_data->needs_ip) { diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index 57f36721f4c6..d66b5b0971fa 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -451,8 +451,8 @@ static int mxic_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -480,7 +480,7 @@ static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, static const struct nand_controller_ops mxic_nand_controller_ops = { .exec_op = mxic_nfc_exec_op, - .setup_data_interface = mxic_nfc_setup_data_interface, + .setup_interface = mxic_nfc_setup_interface, }; static int mxic_nfc_probe(struct platform_device *pdev) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 7d393e1d0252..4fa18fb68d62 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -898,7 +898,7 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) } /** - * nand_reset_data_interface - Reset data interface and timings + * nand_reset_interface - Reset data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -906,11 +906,12 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) +static int nand_reset_interface(struct nand_chip *chip, int chipnr) { + const struct nand_controller_ops *ops = chip->controller->ops; int ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -927,9 +928,9 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_data_interface(chip, &chip->data_interface, NAND_SDR_IFACE, 0); - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + onfi_fill_interface_config(chip, &chip->interface_config, + NAND_SDR_IFACE, 0); + ret = ops->setup_interface(chip, chipnr, &chip->interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -937,7 +938,7 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) } /** - * nand_setup_data_interface - Setup the best data interface and timings + * nand_setup_interface - Setup the best data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -946,13 +947,13 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) +static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 mode = chip->data_interface.timings.mode; + u8 mode = chip->interface_config.timings.mode; u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { mode, }; int ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* Change the mode on the chip side (if supported by the NAND chip) */ @@ -966,8 +967,8 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + ret = chip->controller->ops->setup_interface(chip, chipnr, + &chip->interface_config); if (ret) return ret; @@ -996,7 +997,7 @@ err_reset_chip: * Fallback to mode 0 if the chip explicitly did not ack the chosen * timing mode. */ - nand_reset_data_interface(chip, chipnr); + nand_reset_interface(chip, chipnr); nand_select_target(chip, chipnr); nand_reset_op(chip); nand_deselect_target(chip); @@ -1005,7 +1006,7 @@ err_reset_chip: } /** - * nand_choose_data_interface - find the best data interface and timings + * nand_choose_interface_config - find the best data interface and timings * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip @@ -1013,16 +1014,16 @@ err_reset_chip: * First tries to retrieve supported timing modes from ONFI information, * and if the NAND chip does not support ONFI, relies on the * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->data_interface is initialized with the best timing mode + * function nand_chip->interface_ is initialized with the best timing mode * available. * * Returns 0 for success or negative error code otherwise. */ -static int nand_choose_data_interface(struct nand_chip *chip) +static int nand_choose_interface_config(struct nand_chip *chip) { int modes, mode, ret; - if (!nand_controller_can_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -1040,8 +1041,8 @@ static int nand_choose_data_interface(struct nand_chip *chip) } for (mode = fls(modes) - 1; mode >= 0; mode--) { - ret = onfi_fill_data_interface(chip, &chip->data_interface, - NAND_SDR_IFACE, mode); + ret = onfi_fill_interface_config(chip, &chip->interface_config, + NAND_SDR_IFACE, mode); if (ret) continue; @@ -1049,9 +1050,9 @@ static int nand_choose_data_interface(struct nand_chip *chip) * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the * controller supports the requested timings. */ - ret = chip->controller->ops->setup_data_interface(chip, + ret = chip->controller->ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, - &chip->data_interface); + &chip->interface_config); if (!ret) { chip->onfi_timing_mode_default = mode; break; @@ -2477,17 +2478,17 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); * @chipnr: Internal die id * * Save the timings data structure, then apply SDR timings mode 0 (see - * nand_reset_data_interface for details), do the reset operation, and - * apply back the previous timings. + * nand_reset_interface for details), do the reset operation, and apply + * back the previous timings. * * Returns 0 on success, a negative error code otherwise. */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_data_interface saved_data_intf = chip->data_interface; + struct nand_interface_config saved_intf_config = chip->interface_config; int ret; - ret = nand_reset_data_interface(chip, chipnr); + ret = nand_reset_interface(chip, chipnr); if (ret) return ret; @@ -2503,18 +2504,18 @@ int nand_reset(struct nand_chip *chip, int chipnr) return ret; /* - * A nand_reset_data_interface() put both the NAND chip and the NAND + * A nand_reset_interface() put both the NAND chip and the NAND * controller in timings mode 0. If the default mode for this chip is * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_data_interface() uses ->set/get_features() which would + * nand_setup_interface() uses ->set/get_features() which would * fail anyway as the parameter page is not available yet. */ - if (!memcmp(&chip->data_interface, &saved_data_intf, - sizeof(saved_data_intf))) + if (!memcmp(&chip->interface_config, &saved_intf_config, + sizeof(saved_intf_config))) return 0; - chip->data_interface = saved_data_intf; - ret = nand_setup_data_interface(chip, chipnr); + chip->interface_config = saved_intf_config; + ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -5183,7 +5184,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_data_interface(chip, &chip->data_interface, NAND_SDR_IFACE, 0); + onfi_fill_interface_config(chip, &chip->interface_config, NAND_SDR_IFACE, 0); ret = nand_dt_init(chip); if (ret) @@ -5971,13 +5972,13 @@ static int nand_scan_tail(struct nand_chip *chip) mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); /* Find the fastest data interface for this chip */ - ret = nand_choose_data_interface(chip); + ret = nand_choose_interface_config(chip); if (ret) goto err_nanddev_cleanup; /* Enter fastest possible mode on all dies. */ for (i = 0; i < nanddev_ntargets(&chip->base); i++) { - ret = nand_setup_data_interface(chip, i); + ret = nand_setup_interface(chip, i); if (ret) goto err_nanddev_cleanup; } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index fe769762e1d8..2bcc03714432 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -368,7 +368,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * Wait tCCS_min if it is correctly defined, otherwise wait 500ns * (which should be safe for all NANDs). */ - if (nand_controller_can_setup_data_iface(chip)) + if (nand_controller_can_setup_interface(chip)) ndelay(sdr->tCCS_min / 1000); else ndelay(500); diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index efff3583c549..bf05b4bceaa0 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -19,7 +19,7 @@ * * These four values are tweaked to be more accurate in the case of ONFI chips. */ -static const struct nand_data_interface onfi_sdr_timings[] = { +static const struct nand_interface_config onfi_sdr_timings[] = { /* Mode 0 */ { .type = NAND_SDR_IFACE, @@ -340,16 +340,17 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings) } /** - * onfi_fill_data_interface - Initialize a data interface from a given ONFI mode + * onfi_fill_interface_config - Initialize an interface config from a given + * ONFI mode * @chip: The NAND chip - * @iface: The data interface to fill - * @type: The data interface type + * @iface: The interface configuration to fill + * @type: The interface type * @timing_mode: The ONFI timing mode */ -int onfi_fill_data_interface(struct nand_chip *chip, - struct nand_data_interface *iface, - enum nand_data_interface_type type, - unsigned int timing_mode) +int onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode) { struct onfi_params *onfi = chip->parameters.onfi; diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index f86dff311464..f121a3ae294c 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -808,8 +808,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, return -ENODEV; } -static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtd_info *mtd = nand_to_mtd(chip); struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); @@ -999,7 +999,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops s3c24xx_nand_controller_ops = { .attach_chip = s3c2410_nand_attach_chip, - .setup_data_interface = s3c2410_nand_setup_data_interface, + .setup_interface = s3c2410_nand_setup_interface, }; static const struct of_device_id s3c24xx_nand_dt_ids[] = { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 7320c0fc19ec..a4140af43ed4 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1546,7 +1546,7 @@ static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, } static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdrt; @@ -1764,7 +1764,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = { .attach_chip = stm32_fmc2_nfc_attach_chip, .exec_op = stm32_fmc2_nfc_exec_op, - .setup_data_interface = stm32_fmc2_nfc_setup_interface, + .setup_interface = stm32_fmc2_nfc_setup_interface, }; static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index ffbc1651fadc..9c50c2b965e1 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1376,8 +1376,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, #define sunxi_nand_lookup_timing(l, p, c) \ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) -static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); @@ -1920,7 +1920,7 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand, static const struct nand_controller_ops sunxi_nand_controller_ops = { .attach_chip = sunxi_nand_attach_chip, - .setup_data_interface = sunxi_nfc_setup_data_interface, + .setup_interface = sunxi_nfc_setup_interface, .exec_op = sunxi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index 648dc7e77f6a..bdb965ae7a4a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -515,7 +515,7 @@ static u32 to_ticks(int kHz, int ps) } static int tango_set_timings(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf); struct tango_nfc *nfc = to_tango_nfc(chip->controller); @@ -565,7 +565,7 @@ static int tango_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tango_controller_ops = { .attach_chip = tango_attach_chip, - .setup_data_interface = tango_set_timings, + .setup_interface = tango_set_timings, .exec_op = tango_exec_op, }; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index f9d046b2cd3b..6b6212ffa01c 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -813,8 +813,8 @@ static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl, writel_relaxed(reg, ctrl->regs + TIMING_2); } -static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int tegra_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); const struct nand_sdr_timings *timings; @@ -1053,7 +1053,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tegra_nand_controller_ops = { .attach_chip = &tegra_nand_attach_chip, .exec_op = tegra_nand_exec_op, - .setup_data_interface = tegra_nand_setup_data_interface, + .setup_interface = tegra_nand_setup_interface, }; static int tegra_nand_chips_init(struct device *dev, diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 0852df941130..2ca56eef0f07 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -492,22 +492,22 @@ struct nand_sdr_timings { }; /** - * enum nand_data_interface_type - NAND interface timing type + * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface */ -enum nand_data_interface_type { +enum nand_interface_type { NAND_SDR_IFACE, }; /** - * struct nand_data_interface - NAND interface timing + * struct nand_interface_config - NAND interface timing * @type: type of the timing * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ -struct nand_data_interface { - enum nand_data_interface_type type; +struct nand_interface_config { + enum nand_interface_type type; struct nand_timings { unsigned int mode; union { @@ -521,7 +521,7 @@ struct nand_data_interface { * @conf: The data interface */ static inline const struct nand_sdr_timings * -nand_get_sdr_timings(const struct nand_data_interface *conf) +nand_get_sdr_timings(const struct nand_interface_config *conf) { if (conf->type != NAND_SDR_IFACE) return ERR_PTR(-EINVAL); @@ -944,11 +944,10 @@ static inline void nand_op_trace(const char *prefix, * This method replaces chip->legacy.cmdfunc(), * chip->legacy.{read,write}_{buf,byte,word}(), * chip->legacy.dev_ready() and chip->legacy.waifunc(). - * @setup_data_interface: setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. - * This hook is optional. + * @setup_interface: setup the data interface and timing. If chipnr is set to + * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration + * should not be applied but only checked. + * This hook is optional. */ struct nand_controller_ops { int (*attach_chip)(struct nand_chip *chip); @@ -956,8 +955,8 @@ struct nand_controller_ops { int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf); }; /** @@ -1070,7 +1069,7 @@ struct nand_manufacturer { * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the * actually used ONFI mode if the chip is ONFI * compliant or deduced from the datasheet otherwise - * @data_interface: NAND interface timing information + * @interface_config: NAND interface timing information * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must * come from bbm.h. By default, these options will be copied to @@ -1118,7 +1117,7 @@ struct nand_chip { /* Data interface */ int onfi_timing_mode_default; - struct nand_data_interface data_interface; + struct nand_interface_config interface_config; /* Bad block information */ unsigned int bbt_erase_shift; @@ -1208,10 +1207,10 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * of a NAND chip * @chip: The NAND chip */ -static inline const struct nand_data_interface * +static inline const struct nand_interface_config * nand_get_interface_config(struct nand_chip *chip) { - return &chip->data_interface; + return &chip->interface_config; } /* -- cgit v1.2.3 From 26d014f0400e5ff54cc80c8329e3adbd74db1e04 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:16 +0200 Subject: mtd: rawnand: Add the ->choose_interface_config() hook This hook can be overloaded by NAND manufacturer drivers to propose alternative timings when not following the main standards. In this case, the manufacturer drivers is responsible for choosing the best interface configuration that fits both the controller and chip capabilities. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-23-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 17 +++++++++++------ include/linux/mtd/rawnand.h | 3 +++ 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 742d099df5c6..2f4eba1a1082 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1066,18 +1066,23 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, * @chip: The NAND chip * * Find the best data interface and NAND timings supported by the chip - * and the driver. - * First tries to retrieve supported timing modes from ONFI information, - * and if the NAND chip does not support ONFI, relies on the - * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->interface_ is initialized with the best timing mode - * available. + * and the driver. Eventually let the NAND manufacturer driver propose his own + * set of timings. + * + * After this function nand_chip->interface_config is initialized with the best + * timing mode available. + * + * Returns 0 for success or negative error code otherwise. */ static int nand_choose_interface_config(struct nand_chip *chip) { if (!nand_controller_can_setup_interface(chip)) return 0; + if (chip->ops.choose_interface_config) + return chip->ops.choose_interface_config(chip, + &chip->interface_config); + return nand_choose_best_sdr_timings(chip, &chip->interface_config, NULL); } diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2ca56eef0f07..316a02189da1 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1033,6 +1033,7 @@ struct nand_legacy { * @lock_area: Lock operation * @unlock_area: Unlock operation * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + * @choose_interface_config: Choose the best interface configuration */ struct nand_chip_ops { int (*suspend)(struct nand_chip *chip); @@ -1040,6 +1041,8 @@ struct nand_chip_ops { int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + int (*choose_interface_config)(struct nand_chip *chip, + struct nand_interface_config *iface); }; /** -- cgit v1.2.3 From a69ad11168dca68b3f0adc6882422f4a2e2cb050 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:21 +0200 Subject: mtd: rawnand: Get rid of the default ONFI timing mode The ->choose_interface() hook is here for manufacturer drivers to provide a better timing interface than the default one, this field is not needed anymore. Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-28-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/nand_base.c | 19 ++++--------------- include/linux/mtd/rawnand.h | 9 --------- 2 files changed, 4 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 2f4eba1a1082..753328f106c1 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1012,10 +1012,8 @@ err_reset_chip: * @iface: the interface configuration (can eventually be updated) * @spec_timings: specific timings, when not fitting the ONFI specification * - * If specific timings are provided, use them. Otherwise, try to retrieve - * supported timing modes from ONFI information. Finally, if the NAND chip does - * not follow the ONFI specification, rely on the ->default_timing_mode - * specified in the nand_ids table. + * If specific timings are provided, use them. Otherwise, retrieve supported + * timing modes from ONFI information. */ int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, @@ -1038,15 +1036,8 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Fallback to slower modes */ best_mode = iface->timings.mode; - } else { - if (chip->parameters.onfi) { - unsigned int onfi_modes; - - onfi_modes = chip->parameters.onfi->async_timing_mode; - best_mode = fls(onfi_modes) - 1; - } else { - best_mode = chip->onfi_timing_mode_default; - } + } else if (chip->parameters.onfi) { + best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1; } for (mode = best_mode; mode >= 0; mode--) { @@ -4767,8 +4758,6 @@ static bool find_full_id_nand(struct nand_chip *chip, chip->options |= type->options; chip->base.eccreq.strength = NAND_ECC_STRENGTH(type); chip->base.eccreq.step_size = NAND_ECC_STEP(type); - chip->onfi_timing_mode_default = - type->onfi_timing_mode_default; chip->parameters.model = kstrdup(type->name, GFP_KERNEL); if (!chip->parameters.model) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 316a02189da1..a2427c67d38b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1069,9 +1069,6 @@ struct nand_manufacturer { * @options: Various chip options. They can partly be set to inform nand_scan * about special functionality. See the defines for further * explanation. - * @onfi_timing_mode_default: Default ONFI timing mode. This field is set to the - * actually used ONFI mode if the chip is ONFI - * compliant or deduced from the datasheet otherwise * @interface_config: NAND interface timing information * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must @@ -1119,7 +1116,6 @@ struct nand_chip { unsigned int options; /* Data interface */ - int onfi_timing_mode_default; struct nand_interface_config interface_config; /* Bad block information */ @@ -1268,10 +1264,6 @@ nand_get_interface_config(struct nand_chip *chip) * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). - * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND - * reset. Should be deduced from timings described - * in the datasheet. - * */ struct nand_flash_dev { char *name; @@ -1292,7 +1284,6 @@ struct nand_flash_dev { uint16_t strength_ds; uint16_t step_ds; } ecc; - int onfi_timing_mode_default; }; int nand_create_bbt(struct nand_chip *chip); -- cgit v1.2.3 From 35b6bcc970f759d4a86d6221d09ca28ea20467c8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 29 May 2020 13:13:22 +0200 Subject: mtd: rawnand: Allocate the interface configurations dynamically Instead of manipulating the statically allocated structure and copy timings around, allocate one at identification time and save it in the nand_chip structure once it has been initialized. All NAND chips using the same interface configuration during reset and startup, we define a helper to retrieve a single reset interface configuration object, shared across all NAND chips. We use a second pointer to always have a reference on the currently applied interface configuration, which may either point to the "best interface configuration" or to the "default reset interface configuration". Signed-off-by: Miquel Raynal Reviewed-by: Boris Brezillon Link: https://lore.kernel.org/linux-mtd/20200529111322.7184-29-miquel.raynal@bootlin.com --- drivers/mtd/nand/raw/internals.h | 1 + drivers/mtd/nand/raw/nand_base.c | 84 +++++++++++++++++++++++-------------- drivers/mtd/nand/raw/nand_timings.c | 6 +++ include/linux/mtd/rawnand.h | 11 +++-- 4 files changed, 67 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 5ebfbb89e572..012876e14317 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -93,6 +93,7 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, struct nand_sdr_timings *spec_timings); +const struct nand_interface_config *nand_get_reset_interface_config(void); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 753328f106c1..0c768cb88f96 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -928,9 +928,9 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_interface_config(chip, &chip->interface_config, - NAND_SDR_IFACE, 0); - ret = ops->setup_interface(chip, chipnr, &chip->interface_config); + chip->current_interface_config = nand_get_reset_interface_config(); + ret = ops->setup_interface(chip, chipnr, + chip->current_interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -949,13 +949,25 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) */ static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 mode = chip->interface_config.timings.mode; - u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { mode, }; + const struct nand_controller_ops *ops = chip->controller->ops; + u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }; int ret; if (!nand_controller_can_setup_interface(chip)) return 0; + /* + * A nand_reset_interface() put both the NAND chip and the NAND + * controller in timings mode 0. If the default mode for this chip is + * also 0, no need to proceed to the change again. Plus, at probe time, + * nand_setup_interface() uses ->set/get_features() which would + * fail anyway as the parameter page is not available yet. + */ + if (!chip->best_interface_config) + return 0; + + tmode_param[0] = chip->best_interface_config->timings.mode; + /* Change the mode on the chip side (if supported by the NAND chip) */ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { nand_select_target(chip, chipnr); @@ -967,14 +979,13 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_interface(chip, chipnr, - &chip->interface_config); + ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); if (ret) return ret; /* Check the mode has been accepted by the chip, if supported */ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) - return 0; + goto update_interface_config; memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); nand_select_target(chip, chipnr); @@ -984,12 +995,15 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) if (ret) goto err_reset_chip; - if (tmode_param[0] != mode) { + if (tmode_param[0] != chip->best_interface_config->timings.mode) { pr_warn("timing mode %d not acknowledged by the NAND chip\n", - mode); + chip->best_interface_config->timings.mode); goto err_reset_chip; } +update_interface_config: + chip->current_interface_config = chip->best_interface_config; + return 0; err_reset_chip: @@ -1031,8 +1045,10 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Verify the controller supports the requested interface */ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); - if (!ret) + if (!ret) { + chip->best_interface_config = iface; return ret; + } /* Fallback to slower modes */ best_mode = iface->timings.mode; @@ -1046,9 +1062,11 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); if (!ret) - return 0; + break; } + chip->best_interface_config = iface; + return 0; } @@ -1067,15 +1085,25 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, */ static int nand_choose_interface_config(struct nand_chip *chip) { + struct nand_interface_config *iface; + int ret; + if (!nand_controller_can_setup_interface(chip)) return 0; + iface = kzalloc(sizeof(*iface), GFP_KERNEL); + if (!iface) + return -ENOMEM; + if (chip->ops.choose_interface_config) - return chip->ops.choose_interface_config(chip, - &chip->interface_config); + ret = chip->ops.choose_interface_config(chip, iface); + else + ret = nand_choose_best_sdr_timings(chip, iface, NULL); - return nand_choose_best_sdr_timings(chip, &chip->interface_config, - NULL); + if (ret) + kfree(iface); + + return ret; } /** @@ -2501,7 +2529,6 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_interface_config saved_intf_config = chip->interface_config; int ret; ret = nand_reset_interface(chip, chipnr); @@ -2519,18 +2546,6 @@ int nand_reset(struct nand_chip *chip, int chipnr) if (ret) return ret; - /* - * A nand_reset_interface() put both the NAND chip and the NAND - * controller in timings mode 0. If the default mode for this chip is - * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_interface() uses ->set/get_features() which would - * fail anyway as the parameter page is not available yet. - */ - if (!memcmp(&chip->interface_config, &saved_intf_config, - sizeof(saved_intf_config))) - return 0; - - chip->interface_config = saved_intf_config; ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -5198,7 +5213,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_interface_config(chip, &chip->interface_config, NAND_SDR_IFACE, 0); + chip->current_interface_config = nand_get_reset_interface_config(); ret = nand_dt_init(chip); if (ret) @@ -5994,7 +6009,7 @@ static int nand_scan_tail(struct nand_chip *chip) for (i = 0; i < nanddev_ntargets(&chip->base); i++) { ret = nand_setup_interface(chip, i); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; } /* Check, if we should skip the bad block table scan */ @@ -6004,10 +6019,12 @@ static int nand_scan_tail(struct nand_chip *chip) /* Build bad block table */ ret = nand_create_bbt(chip); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; return 0; +err_free_interface_config: + kfree(chip->best_interface_config); err_nanddev_cleanup: nanddev_cleanup(&chip->base); @@ -6101,6 +6118,9 @@ void nand_cleanup(struct nand_chip *chip) & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); + /* Free the data interface */ + kfree(chip->best_interface_config); + /* Free manufacturer priv data. */ nand_manufacturer_cleanup(chip); diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 1e22006c79ba..94d832646487 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -292,6 +292,12 @@ static const struct nand_interface_config onfi_sdr_timings[] = { }, }; +/* All NAND chips share the same reset data interface: SDR mode 0 */ +const struct nand_interface_config *nand_get_reset_interface_config(void) +{ + return &onfi_sdr_timings[0]; +} + /** * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a * set of timings diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index a2427c67d38b..a725b620aca2 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1069,7 +1069,11 @@ struct nand_manufacturer { * @options: Various chip options. They can partly be set to inform nand_scan * about special functionality. See the defines for further * explanation. - * @interface_config: NAND interface timing information + * @current_interface_config: The currently used NAND interface configuration + * @best_interface_config: The best NAND interface configuration which fits both + * the NAND chip and NAND controller constraints. If + * unset, the default reset interface configuration must + * be used. * @bbt_erase_shift: Number of address bits in a bbt entry * @bbt_options: Bad block table specific options. All options used here must * come from bbm.h. By default, these options will be copied to @@ -1116,7 +1120,8 @@ struct nand_chip { unsigned int options; /* Data interface */ - struct nand_interface_config interface_config; + const struct nand_interface_config *current_interface_config; + struct nand_interface_config *best_interface_config; /* Bad block information */ unsigned int bbt_erase_shift; @@ -1209,7 +1214,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) static inline const struct nand_interface_config * nand_get_interface_config(struct nand_chip *chip) { - return &chip->interface_config; + return chip->current_interface_config; } /* -- cgit v1.2.3 From ca37faf3d7005b5588f045edfac1d82799c408a7 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 30 Jun 2020 10:17:56 +0200 Subject: iommu: Move sg_table wrapper out of CONFIG_IOMMU_SUPPORT Move the recently added sg_table wrapper out of CONFIG_IOMMU_SUPPORT to let the client code copile also when IOMMU support is disabled. Fixes: 48530d9fab0d ("iommu: add generic helper for mapping sgtable objects") Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200630081756.18526-1-m.szyprowski@samsung.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5f0b7859d2eb..5657d4fef9f2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -457,22 +457,6 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); -/** - * iommu_map_sgtable - Map the given buffer to the IOMMU domain - * @domain: The IOMMU domain to perform the mapping - * @iova: The start address to map the buffer - * @sgt: The sg_table object describing the buffer - * @prot: IOMMU protection bits - * - * Creates a mapping at @iova for the buffer described by a scatterlist - * stored in the given sg_table object in the provided IOMMU domain. - */ -static inline size_t iommu_map_sgtable(struct iommu_domain *domain, - unsigned long iova, struct sg_table *sgt, int prot) -{ - return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); -} - extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void generic_iommu_put_resv_regions(struct device *dev, @@ -1079,6 +1063,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) } #endif /* CONFIG_IOMMU_API */ +/** + * iommu_map_sgtable - Map the given buffer to the IOMMU domain + * @domain: The IOMMU domain to perform the mapping + * @iova: The start address to map the buffer + * @sgt: The sg_table object describing the buffer + * @prot: IOMMU protection bits + * + * Creates a mapping at @iova for the buffer described by a scatterlist + * stored in the given sg_table object in the provided IOMMU domain. + */ +static inline size_t iommu_map_sgtable(struct iommu_domain *domain, + unsigned long iova, struct sg_table *sgt, int prot) +{ + return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); +} + #ifdef CONFIG_IOMMU_DEBUGFS extern struct dentry *iommu_debugfs_dir; void iommu_debugfs_setup(void); -- cgit v1.2.3 From e1915eec54a6b4902664eaf6fb7a20de5624c4dd Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Jun 2020 22:37:41 +0200 Subject: backlight: sky81452: Convert to GPIO descriptors The SKY81452 backlight driver just obtains a GPIO (named "gpios" in the device tree) drives it high and leaves it high until the driver is removed. Switch to use GPIO descriptors for this, simple and straight-forward. Cc: Gyungoh Yoo Signed-off-by: Linus Walleij Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/video/backlight/sky81452-backlight.c | 18 ++++-------------- include/linux/platform_data/sky81452-backlight.h | 6 ++++-- 2 files changed, 8 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c index 2355f00f5773..81d2c8f3ca50 100644 --- a/drivers/video/backlight/sky81452-backlight.c +++ b/drivers/video/backlight/sky81452-backlight.c @@ -8,12 +8,11 @@ #include #include -#include +#include #include #include #include #include -#include #include #include #include @@ -182,7 +181,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm"); pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode"); pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift"); - pdata->gpio_enable = of_get_gpio(np, 0); + pdata->gpiod_enable = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); ret = of_property_count_u32_elems(np, "led-sources"); if (ret < 0) { @@ -264,15 +263,6 @@ static int sky81452_bl_probe(struct platform_device *pdev) return PTR_ERR(pdata); } - if (gpio_is_valid(pdata->gpio_enable)) { - ret = devm_gpio_request_one(dev, pdata->gpio_enable, - GPIOF_OUT_INIT_HIGH, "sky81452-en"); - if (ret < 0) { - dev_err(dev, "failed to request GPIO. err=%d\n", ret); - return ret; - } - } - ret = sky81452_bl_init_device(regmap, pdata); if (ret < 0) { dev_err(dev, "failed to initialize. err=%d\n", ret); @@ -312,8 +302,8 @@ static int sky81452_bl_remove(struct platform_device *pdev) bd->props.brightness = 0; backlight_update_status(bd); - if (gpio_is_valid(pdata->gpio_enable)) - gpio_set_value_cansleep(pdata->gpio_enable, 0); + if (pdata->gpiod_enable) + gpiod_set_value_cansleep(pdata->gpiod_enable, 0); return 0; } diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h index 02653d92d84f..d6f46670d923 100644 --- a/include/linux/platform_data/sky81452-backlight.h +++ b/include/linux/platform_data/sky81452-backlight.h @@ -9,11 +9,13 @@ #ifndef _SKY81452_BACKLIGHT_H #define _SKY81452_BACKLIGHT_H +#include + /** * struct sky81452_platform_data * @name: backlight driver name. If it is not defined, default name is lcd-backlight. - * @gpio_enable:GPIO number which control EN pin + * @gpios_enable:GPIO descriptor which control EN pin * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. * @ignore_pwm: true if DPWMI should be ignored. * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. @@ -23,7 +25,7 @@ */ struct sky81452_bl_platform_data { const char *name; - int gpio_enable; + struct gpio_desc *gpiod_enable; unsigned int enable; bool ignore_pwm; bool dpwm_mode; -- cgit v1.2.3 From 08bf73a6f056d84fd52a58c5d165523dd84be535 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 26 Jun 2020 22:37:42 +0200 Subject: backlight: sky81452: Privatize platform data The only way the platform data for the SKY81452 ever gets populated is through the device tree. The MFD device is bothered with this for no reason at all. Just allocate the platform data in the driver and be happy. Cc: Gyungoh Yoo Signed-off-by: Linus Walleij Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/mfd/sky81452.c | 2 -- drivers/video/backlight/sky81452-backlight.c | 34 +++++++++++++++++----- include/linux/mfd/sky81452.h | 2 -- include/linux/platform_data/sky81452-backlight.h | 37 ------------------------ 4 files changed, 27 insertions(+), 48 deletions(-) delete mode 100644 include/linux/platform_data/sky81452-backlight.h (limited to 'include/linux') diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c index 76eedfae8553..3ad35bf0c015 100644 --- a/drivers/mfd/sky81452.c +++ b/drivers/mfd/sky81452.c @@ -47,8 +47,6 @@ static int sky81452_probe(struct i2c_client *client, memset(cells, 0, sizeof(cells)); cells[0].name = "sky81452-backlight"; cells[0].of_compatible = "skyworks,sky81452-backlight"; - cells[0].platform_data = pdata->bl_pdata; - cells[0].pdata_size = sizeof(*pdata->bl_pdata); cells[1].name = "sky81452-regulator"; cells[1].platform_data = pdata->regulator_init_data; cells[1].pdata_size = sizeof(*pdata->regulator_init_data); diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c index 81d2c8f3ca50..83ccb3d940fa 100644 --- a/drivers/video/backlight/sky81452-backlight.c +++ b/drivers/video/backlight/sky81452-backlight.c @@ -15,7 +15,6 @@ #include #include #include -#include #include /* registers */ @@ -41,6 +40,29 @@ #define SKY81452_DEFAULT_NAME "lcd-backlight" #define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1) +/** + * struct sky81452_platform_data + * @name: backlight driver name. + If it is not defined, default name is lcd-backlight. + * @gpios_enable:GPIO descriptor which control EN pin + * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. + * @ignore_pwm: true if DPWMI should be ignored. + * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. + * @phase_shift:true is phase shift mode. + * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. + * @boost_current_limit: It should be one of 2300, 2750mA. + */ +struct sky81452_bl_platform_data { + const char *name; + struct gpio_desc *gpiod_enable; + unsigned int enable; + bool ignore_pwm; + bool dpwm_mode; + bool phase_shift; + unsigned int short_detection_threshold; + unsigned int boost_current_limit; +}; + #define CTZ(b) __builtin_ctz(b) static int sky81452_bl_update_status(struct backlight_device *bd) @@ -251,17 +273,15 @@ static int sky81452_bl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct regmap *regmap = dev_get_drvdata(dev->parent); - struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev); + struct sky81452_bl_platform_data *pdata; struct backlight_device *bd; struct backlight_properties props; const char *name; int ret; - if (!pdata) { - pdata = sky81452_bl_parse_dt(dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - } + pdata = sky81452_bl_parse_dt(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); ret = sky81452_bl_init_device(regmap, pdata); if (ret < 0) { diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h index d469aa481243..b08570ff34df 100644 --- a/include/linux/mfd/sky81452.h +++ b/include/linux/mfd/sky81452.h @@ -9,11 +9,9 @@ #ifndef _SKY81452_H #define _SKY81452_H -#include #include struct sky81452_platform_data { - struct sky81452_bl_platform_data *bl_pdata; struct regulator_init_data *regulator_init_data; }; diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h deleted file mode 100644 index d6f46670d923..000000000000 --- a/include/linux/platform_data/sky81452-backlight.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * sky81452.h SKY81452 backlight driver - * - * Copyright 2014 Skyworks Solutions Inc. - * Author : Gyungoh Yoo - */ - -#ifndef _SKY81452_BACKLIGHT_H -#define _SKY81452_BACKLIGHT_H - -#include - -/** - * struct sky81452_platform_data - * @name: backlight driver name. - If it is not defined, default name is lcd-backlight. - * @gpios_enable:GPIO descriptor which control EN pin - * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. - * @ignore_pwm: true if DPWMI should be ignored. - * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. - * @phase_shift:true is phase shift mode. - * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. - * @boost_current_limit: It should be one of 2300, 2750mA. - */ -struct sky81452_bl_platform_data { - const char *name; - struct gpio_desc *gpiod_enable; - unsigned int enable; - bool ignore_pwm; - bool dpwm_mode; - bool phase_shift; - unsigned int short_detection_threshold; - unsigned int boost_current_limit; -}; - -#endif -- cgit v1.2.3 From ecd7274fb4cd2d6c035a52d59ca3d6ec936a07be Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 4 Jun 2020 18:11:15 +0100 Subject: iommu: Remove unused IOMMU_SYS_CACHE_ONLY flag The IOMMU_SYS_CACHE_ONLY flag was never exposed via the DMA API and has no in-tree users. Remove it. Cc: Robin Murphy Cc: "Isaac J. Manjarres" Cc: Joerg Roedel Cc: Rob Clark Reviewed-by: Christoph Hellwig Reviewed-by: Sai Prakash Ranjan Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 3 --- include/linux/iommu.h | 6 ------ 2 files changed, 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 04fbd4bf0ff9..8f175c02f8e3 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, else if (prot & IOMMU_CACHE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); - else if (prot & IOMMU_SYS_CACHE_ONLY) - pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE - << ARM_LPAE_PTE_ATTRINDX_SHIFT); } if (prot & IOMMU_CACHE) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5f0b7859d2eb..bee1a8fa1fb1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -31,12 +31,6 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) -/* - * Non-coherent masters can use this page protection flag to set cacheable - * memory attributes for only a transparent outer level of cache, also known as - * the last-level or system cache. - */ -#define IOMMU_SYS_CACHE_ONLY (1 << 6) struct iommu_ops; struct iommu_group; -- cgit v1.2.3 From 5abfe5cf0b8358b8ad0da99e4188c2519839d67c Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Tue, 23 Jun 2020 19:23:27 -0700 Subject: remoteproc: qcom: Add per subsystem SSR notification Currently there is a single notification chain which is called whenever any remoteproc shuts down. This leads to all the listeners being notified, and is not an optimal design as kernel drivers might only be interested in listening to notifications from a particular remoteproc. Create a global list of remoteproc notification info data structures. This will hold the name and notifier_list information for a particular remoteproc. The API to register for notifications will use name argument to retrieve the notification info data structure and the notifier block will be added to that data structure's notification chain. Also move from blocking notifier to srcu notifer based implementation to support dynamic notifier head creation. Reviewed-by: Alex Elder Co-developed-by: Siddharth Gupta Signed-off-by: Siddharth Gupta Signed-off-by: Rishabh Bhatnagar Link: https://lore.kernel.org/r/1592965408-16908-2-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/qcom_common.c | 90 ++++++++++++++++++++++++++++++----- drivers/remoteproc/qcom_common.h | 5 +- include/linux/remoteproc/qcom_rproc.h | 20 ++++++-- 3 files changed, 95 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c index 9028cea2d81e..48b399af9c83 100644 --- a/drivers/remoteproc/qcom_common.c +++ b/drivers/remoteproc/qcom_common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -23,7 +24,14 @@ #define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev) #define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev) -static BLOCKING_NOTIFIER_HEAD(ssr_notifiers); +struct qcom_ssr_subsystem { + const char *name; + struct srcu_notifier_head notifier_list; + struct list_head list; +}; + +static LIST_HEAD(qcom_ssr_subsystem_list); +static DEFINE_MUTEX(qcom_ssr_subsys_lock); static int glink_subdev_start(struct rproc_subdev *subdev) { @@ -189,37 +197,83 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) } EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev); +static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name) +{ + struct qcom_ssr_subsystem *info; + + mutex_lock(&qcom_ssr_subsys_lock); + /* Match in the global qcom_ssr_subsystem_list with name */ + list_for_each_entry(info, &qcom_ssr_subsystem_list, list) + if (!strcmp(info->name, name)) + goto out; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + info = ERR_PTR(-ENOMEM); + goto out; + } + info->name = kstrdup_const(name, GFP_KERNEL); + srcu_init_notifier_head(&info->notifier_list); + + /* Add to global notification list */ + list_add_tail(&info->list, &qcom_ssr_subsystem_list); + +out: + mutex_unlock(&qcom_ssr_subsys_lock); + return info; +} + /** * qcom_register_ssr_notifier() - register SSR notification handler - * @nb: notifier_block to notify for restart notifications + * @name: Subsystem's SSR name + * @nb: notifier_block to be invoked upon subsystem's state change * - * Returns 0 on success, negative errno on failure. + * This registers the @nb notifier block as part the notifier chain for a + * remoteproc associated with @name. The notifier block's callback + * will be invoked when the remote processor's SSR events occur + * (pre/post startup and pre/post shutdown). * - * This register the @notify function as handler for restart notifications. As - * remote processors are stopped this function will be called, with the SSR - * name passed as a parameter. + * Return: a subsystem cookie on success, ERR_PTR on failure. */ -int qcom_register_ssr_notifier(struct notifier_block *nb) +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb) { - return blocking_notifier_chain_register(&ssr_notifiers, nb); + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(name); + if (IS_ERR(info)) + return info; + + srcu_notifier_chain_register(&info->notifier_list, nb); + + return &info->notifier_list; } EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier); /** * qcom_unregister_ssr_notifier() - unregister SSR notification handler + * @notify: subsystem cookie returned from qcom_register_ssr_notifier * @nb: notifier_block to unregister + * + * This function will unregister the notifier from the particular notifier + * chain. + * + * Return: 0 on success, %ENOENT otherwise. */ -void qcom_unregister_ssr_notifier(struct notifier_block *nb) +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb) { - blocking_notifier_chain_unregister(&ssr_notifiers, nb); + return srcu_notifier_chain_unregister(notify, nb); } EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); static void ssr_notify_unprepare(struct rproc_subdev *subdev) { struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; - blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name); + srcu_notifier_call_chain(&ssr->info->notifier_list, 0, &data); } /** @@ -229,12 +283,21 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev) * @ssr_name: identifier to use for notifications originating from @rproc * * As the @ssr is registered with the @rproc SSR events will be sent to all - * registered listeners in the system as the remoteproc is shut down. + * registered listeners for the remoteproc when it's SSR events occur + * (pre/post startup and pre/post shutdown). */ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, const char *ssr_name) { - ssr->name = ssr_name; + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(ssr_name); + if (IS_ERR(info)) { + dev_err(&rproc->dev, "Failed to add ssr subdevice\n"); + return; + } + + ssr->info = info; ssr->subdev.unprepare = ssr_notify_unprepare; rproc_add_subdev(rproc, &ssr->subdev); @@ -249,6 +312,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev); void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr) { rproc_remove_subdev(rproc, &ssr->subdev); + ssr->info = NULL; } EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev); diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h index 34e5188187dc..dfc641c3a98b 100644 --- a/drivers/remoteproc/qcom_common.h +++ b/drivers/remoteproc/qcom_common.h @@ -26,10 +26,11 @@ struct qcom_rproc_subdev { struct qcom_smd_edge *edge; }; +struct qcom_ssr_subsystem; + struct qcom_rproc_ssr { struct rproc_subdev subdev; - - const char *name; + struct qcom_ssr_subsystem *info; }; void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink, diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index fa8e38681b4b..2a1d6d0249d9 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -5,17 +5,27 @@ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) -int qcom_register_ssr_notifier(struct notifier_block *nb); -void qcom_unregister_ssr_notifier(struct notifier_block *nb); +struct qcom_ssr_notify_data { + const char *name; + bool crashed; +}; + +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb); +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb); #else -static inline int qcom_register_ssr_notifier(struct notifier_block *nb) +static inline void *qcom_register_ssr_notifier(const char *name, + struct notifier_block *nb) { - return 0; + return NULL; } -static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {} +static inline int qcom_unregister_ssr_notifier(void *notify, + struct notifier_block *nb) +{ + return 0; +} #endif -- cgit v1.2.3 From 62495d778439a4e47571293511a785cba754874c Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Tue, 23 Jun 2020 19:23:28 -0700 Subject: remoteproc: qcom: Add notification types to SSR The SSR subdevice only adds callback for the unprepare event. Add callbacks for prepare, start and prepare events. The client driver for a particular remoteproc might be interested in knowing the status of the remoteproc while undergoing SSR, not just when the remoteproc has finished shutting down. Reviewed-by: Alex Elder Signed-off-by: Siddharth Gupta Signed-off-by: Rishabh Bhatnagar Link: https://lore.kernel.org/r/1592965408-16908-3-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/qcom_common.c | 44 ++++++++++++++++++++++++++++++++++- include/linux/remoteproc/qcom_rproc.h | 16 +++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c index 48b399af9c83..2f45f0c79914 100644 --- a/drivers/remoteproc/qcom_common.c +++ b/drivers/remoteproc/qcom_common.c @@ -265,6 +265,44 @@ int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb) } EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); +static int ssr_notify_prepare(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_POWERUP, &data); + return 0; +} + +static int ssr_notify_start(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_POWERUP, &data); + return 0; +} + +static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = crashed, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_SHUTDOWN, &data); +} + static void ssr_notify_unprepare(struct rproc_subdev *subdev) { struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); @@ -273,7 +311,8 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev) .crashed = false, }; - srcu_notifier_call_chain(&ssr->info->notifier_list, 0, &data); + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_SHUTDOWN, &data); } /** @@ -298,6 +337,9 @@ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, } ssr->info = info; + ssr->subdev.prepare = ssr_notify_prepare; + ssr->subdev.start = ssr_notify_start; + ssr->subdev.stop = ssr_notify_stop; ssr->subdev.unprepare = ssr_notify_unprepare; rproc_add_subdev(rproc, &ssr->subdev); diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index 2a1d6d0249d9..647051662174 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -5,6 +5,22 @@ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) +/** + * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc + * processor. + * + * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage) + * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage) + * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage) + * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage) + */ +enum qcom_ssr_notify_type { + QCOM_SSR_BEFORE_POWERUP, + QCOM_SSR_AFTER_POWERUP, + QCOM_SSR_BEFORE_SHUTDOWN, + QCOM_SSR_AFTER_SHUTDOWN, +}; + struct qcom_ssr_notify_data { const char *name; bool crashed; -- cgit v1.2.3 From c1326210477ecc06c53221f0005c64419aba30d6 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:20 +0000 Subject: nfs,nfsd: NFSv4.2 extended attribute protocol definitions Add definitions for the new operations, errors and flags as defined in RFC 8276 (File System Extended Attributes in NFSv4). Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- include/linux/nfs4.h | 20 ++++++++++++++++++++ include/uapi/linux/nfs4.h | 3 +++ 2 files changed, 23 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 4dba3c948932..e6ca9d1d2e76 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -150,6 +150,12 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, + /* xattr support (RFC8726) */ + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, }; @@ -280,6 +286,10 @@ enum nfsstat4 { NFS4ERR_WRONG_LFS = 10092, NFS4ERR_BADLABEL = 10093, NFS4ERR_OFFLOAD_NO_REQS = 10094, + + /* xattr (RFC8276) */ + NFS4ERR_NOXATTR = 10095, + NFS4ERR_XATTR2BIG = 10096, }; static inline bool seqid_mutating_err(u32 err) @@ -452,6 +462,7 @@ enum change_attr_type4 { #define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) +#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -700,4 +711,13 @@ struct nl4_server { struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */ } u; }; + +/* + * Options for setxattr. These match the flags for setxattr(2). + */ +enum nfs4_setxattr_options { + SETXATTR4_EITHER = 0, + SETXATTR4_CREATE = 1, + SETXATTR4_REPLACE = 2, +}; #endif diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index 8572930cf5b0..bf197e99b98f 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -33,6 +33,9 @@ #define NFS4_ACCESS_EXTEND 0x0008 #define NFS4_ACCESS_DELETE 0x0010 #define NFS4_ACCESS_EXECUTE 0x0020 +#define NFS4_ACCESS_XAREAD 0x0040 +#define NFS4_ACCESS_XAWRITE 0x0080 +#define NFS4_ACCESS_XALIST 0x0100 #define NFS4_FH_PERSISTENT 0x0000 #define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001 -- cgit v1.2.3 From 08b5d5014a27e717826999ad20e394a8811aae92 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:18 +0000 Subject: xattr: break delegations in {set,remove}xattr set/removexattr on an exported filesystem should break NFS delegations. This is true in general, but also for the upcoming support for RFC 8726 (NFSv4 extended attribute support). Make sure that they do. Additionally, they need to grow a _locked variant, since callers might call this with i_rwsem held (like the NFS server code). Cc: stable@vger.kernel.org # v4.9+ Cc: linux-fsdevel@vger.kernel.org Cc: Al Viro Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- fs/xattr.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++----- include/linux/xattr.h | 2 ++ 2 files changed, 79 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/xattr.c b/fs/xattr.c index 91608d9bfc6a..95f38f57347f 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -204,10 +204,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -216,15 +228,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -378,8 +415,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -388,11 +435,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -401,12 +451,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 47eaa34f8761..a2f3cd02653c 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -- cgit v1.2.3 From cab8d289c5ad541a5351a651d95c4086b7f84d7c Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:19 +0000 Subject: xattr: add a function to check if a namespace is supported Add a function that checks is an extended attribute namespace is supported for an inode, meaning that a handler must be present for either the whole namespace, or at least one synthetic xattr in the namespace. To be used by the nfs server code when being queried for extended attributes support. Cc: linux-fsdevel@vger.kernel.org Cc: Al Viro Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- fs/xattr.c | 27 +++++++++++++++++++++++++++ include/linux/xattr.h | 2 ++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/fs/xattr.c b/fs/xattr.c index 95f38f57347f..386b45676d7e 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -134,6 +134,33 @@ xattr_permission(struct inode *inode, const char *name, int mask) return inode_permission(inode, mask); } +/* + * Look for any handler that deals with the specified namespace. + */ +int +xattr_supported_namespace(struct inode *inode, const char *prefix) +{ + const struct xattr_handler **handlers = inode->i_sb->s_xattr; + const struct xattr_handler *handler; + size_t preflen; + + if (!(inode->i_opflags & IOP_XATTR)) { + if (unlikely(is_bad_inode(inode))) + return -EIO; + return -EOPNOTSUPP; + } + + preflen = strlen(prefix); + + for_each_xattr_handler(handlers, handler) { + if (!strncmp(xattr_prefix(handler), prefix, preflen)) + return 0; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(xattr_supported_namespace); + int __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index a2f3cd02653c..fac75810d9d3 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -61,6 +61,8 @@ ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_siz ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); +int xattr_supported_namespace(struct inode *inode, const char *prefix); + static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; -- cgit v1.2.3 From 23e50fe3a5e6045a573c69d4b0e3d78aa6183323 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:26 +0000 Subject: nfsd: implement the xattr functions and en/decode logic Implement the main entry points for the *XATTR operations. Add functions to calculate the reply size for the user extended attribute operations, and implement the XDR encode / decode logic for these operations. Add the user extended attributes operations to nfsd4_ops. Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever --- fs/nfsd/nfs4proc.c | 120 ++++++++++++++ fs/nfsd/nfs4xdr.c | 450 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/nfs4.h | 2 +- 3 files changed, 571 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 841aad772798..a527da3d8052 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -2097,6 +2097,68 @@ out: } #endif /* CONFIG_NFSD_PNFS */ +static __be32 +nfsd4_getxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_getxattr *getxattr = &u->getxattr; + + return nfsd_getxattr(rqstp, &cstate->current_fh, + getxattr->getxa_name, &getxattr->getxa_buf, + &getxattr->getxa_len); +} + +static __be32 +nfsd4_setxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_setxattr *setxattr = &u->setxattr; + __be32 ret; + + if (opens_in_grace(SVC_NET(rqstp))) + return nfserr_grace; + + ret = nfsd_setxattr(rqstp, &cstate->current_fh, setxattr->setxa_name, + setxattr->setxa_buf, setxattr->setxa_len, + setxattr->setxa_flags); + + if (!ret) + set_change_info(&setxattr->setxa_cinfo, &cstate->current_fh); + + return ret; +} + +static __be32 +nfsd4_listxattrs(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + /* + * Get the entire list, then copy out only the user attributes + * in the encode function. + */ + return nfsd_listxattr(rqstp, &cstate->current_fh, + &u->listxattrs.lsxa_buf, &u->listxattrs.lsxa_len); +} + +static __be32 +nfsd4_removexattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_removexattr *removexattr = &u->removexattr; + __be32 ret; + + if (opens_in_grace(SVC_NET(rqstp))) + return nfserr_grace; + + ret = nfsd_removexattr(rqstp, &cstate->current_fh, + removexattr->rmxa_name); + + if (!ret) + set_change_info(&removexattr->rmxa_cinfo, &cstate->current_fh); + + return ret; +} + /* * NULL call. */ @@ -2706,6 +2768,42 @@ static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) return (op_encode_hdr_size + 3) * sizeof(__be32); } +static inline u32 nfsd4_getxattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + u32 maxcount, rlen; + + maxcount = svc_max_payload(rqstp); + rlen = min_t(u32, XATTR_SIZE_MAX, maxcount); + + return (op_encode_hdr_size + 1 + XDR_QUADLEN(rlen)) * sizeof(__be32); +} + +static inline u32 nfsd4_setxattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + return (op_encode_hdr_size + op_encode_change_info_maxsz) + * sizeof(__be32); +} +static inline u32 nfsd4_listxattrs_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + u32 maxcount, rlen; + + maxcount = svc_max_payload(rqstp); + rlen = min(op->u.listxattrs.lsxa_maxcount, maxcount); + + return (op_encode_hdr_size + 4 + XDR_QUADLEN(rlen)) * sizeof(__be32); +} + +static inline u32 nfsd4_removexattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + return (op_encode_hdr_size + op_encode_change_info_maxsz) + * sizeof(__be32); +} + + static const struct nfsd4_operation nfsd4_ops[] = { [OP_ACCESS] = { .op_func = nfsd4_access, @@ -3087,6 +3185,28 @@ static const struct nfsd4_operation nfsd4_ops[] = { .op_name = "OP_COPY_NOTIFY", .op_rsize_bop = nfsd4_copy_notify_rsize, }, + [OP_GETXATTR] = { + .op_func = nfsd4_getxattr, + .op_name = "OP_GETXATTR", + .op_rsize_bop = nfsd4_getxattr_rsize, + }, + [OP_SETXATTR] = { + .op_func = nfsd4_setxattr, + .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, + .op_name = "OP_SETXATTR", + .op_rsize_bop = nfsd4_setxattr_rsize, + }, + [OP_LISTXATTRS] = { + .op_func = nfsd4_listxattrs, + .op_name = "OP_LISTXATTRS", + .op_rsize_bop = nfsd4_listxattrs_rsize, + }, + [OP_REMOVEXATTR] = { + .op_func = nfsd4_removexattr, + .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, + .op_name = "OP_REMOVEXATTR", + .op_rsize_bop = nfsd4_removexattr_rsize, + }, }; /** diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 48806b493eba..8bacc0ceae19 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include "idmap.h" #include "acl.h" @@ -1877,6 +1879,208 @@ nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek) DECODE_TAIL; } +/* + * XDR data that is more than PAGE_SIZE in size is normally part of a + * read or write. However, the size of extended attributes is limited + * by the maximum request size, and then further limited by the underlying + * filesystem limits. This can exceed PAGE_SIZE (currently, XATTR_SIZE_MAX + * is 64k). Since there is no kvec- or page-based interface to xattrs, + * and we're not dealing with contiguous pages, we need to do some copying. + */ + +/* + * Decode data into buffer. Uses head and pages constructed by + * svcxdr_construct_vector. + */ +static __be32 +nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct kvec *head, + struct page **pages, char **bufp, u32 buflen) +{ + char *tmp, *dp; + u32 len; + + if (buflen <= head->iov_len) { + /* + * We're in luck, the head has enough space. Just return + * the head, no need for copying. + */ + *bufp = head->iov_base; + return 0; + } + + tmp = svcxdr_tmpalloc(argp, buflen); + if (tmp == NULL) + return nfserr_jukebox; + + dp = tmp; + memcpy(dp, head->iov_base, head->iov_len); + buflen -= head->iov_len; + dp += head->iov_len; + + while (buflen > 0) { + len = min_t(u32, buflen, PAGE_SIZE); + memcpy(dp, page_address(*pages), len); + + buflen -= len; + dp += len; + pages++; + } + + *bufp = tmp; + return 0; +} + +/* + * Get a user extended attribute name from the XDR buffer. + * It will not have the "user." prefix, so prepend it. + * Lastly, check for nul characters in the name. + */ +static __be32 +nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep) +{ + DECODE_HEAD; + char *name, *sp, *dp; + u32 namelen, cnt; + + READ_BUF(4); + namelen = be32_to_cpup(p++); + + if (namelen > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) + return nfserr_nametoolong; + + if (namelen == 0) + goto xdr_error; + + READ_BUF(namelen); + + name = svcxdr_tmpalloc(argp, namelen + XATTR_USER_PREFIX_LEN + 1); + if (!name) + return nfserr_jukebox; + + memcpy(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); + + /* + * Copy the extended attribute name over while checking for 0 + * characters. + */ + sp = (char *)p; + dp = name + XATTR_USER_PREFIX_LEN; + cnt = namelen; + + while (cnt-- > 0) { + if (*sp == '\0') + goto xdr_error; + *dp++ = *sp++; + } + *dp = '\0'; + + *namep = name; + + DECODE_TAIL; +} + +/* + * A GETXATTR op request comes without a length specifier. We just set the + * maximum length for the reply based on XATTR_SIZE_MAX and the maximum + * channel reply size. nfsd_getxattr will probe the length of the xattr, + * check it against getxa_len, and allocate + return the value. + */ +static __be32 +nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp, + struct nfsd4_getxattr *getxattr) +{ + __be32 status; + u32 maxcount; + + status = nfsd4_decode_xattr_name(argp, &getxattr->getxa_name); + if (status) + return status; + + maxcount = svc_max_payload(argp->rqstp); + maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount); + + getxattr->getxa_len = maxcount; + + return status; +} + +static __be32 +nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp, + struct nfsd4_setxattr *setxattr) +{ + DECODE_HEAD; + u32 flags, maxcount, size; + struct kvec head; + struct page **pagelist; + + READ_BUF(4); + flags = be32_to_cpup(p++); + + if (flags > SETXATTR4_REPLACE) + return nfserr_inval; + setxattr->setxa_flags = flags; + + status = nfsd4_decode_xattr_name(argp, &setxattr->setxa_name); + if (status) + return status; + + maxcount = svc_max_payload(argp->rqstp); + maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount); + + READ_BUF(4); + size = be32_to_cpup(p++); + if (size > maxcount) + return nfserr_xattr2big; + + setxattr->setxa_len = size; + if (size > 0) { + status = svcxdr_construct_vector(argp, &head, &pagelist, size); + if (status) + return status; + + status = nfsd4_vbuf_from_vector(argp, &head, pagelist, + &setxattr->setxa_buf, size); + } + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp, + struct nfsd4_listxattrs *listxattrs) +{ + DECODE_HEAD; + u32 maxcount; + + READ_BUF(12); + p = xdr_decode_hyper(p, &listxattrs->lsxa_cookie); + + /* + * If the cookie is too large to have even one user.x attribute + * plus trailing '\0' left in a maximum size buffer, it's invalid. + */ + if (listxattrs->lsxa_cookie >= + (XATTR_LIST_MAX / (XATTR_USER_PREFIX_LEN + 2))) + return nfserr_badcookie; + + maxcount = be32_to_cpup(p++); + if (maxcount < 8) + /* Always need at least 2 words (length and one character) */ + return nfserr_inval; + + maxcount = min(maxcount, svc_max_payload(argp->rqstp)); + listxattrs->lsxa_maxcount = maxcount; + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp, + struct nfsd4_removexattr *removexattr) +{ + return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name); +} + static __be32 nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p) { @@ -1973,6 +2177,11 @@ static const nfsd4_dec nfsd4_dec_ops[] = { [OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek, [OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_CLONE] = (nfsd4_dec)nfsd4_decode_clone, + /* RFC 8276 extended atributes operations */ + [OP_GETXATTR] = (nfsd4_dec)nfsd4_decode_getxattr, + [OP_SETXATTR] = (nfsd4_dec)nfsd4_decode_setxattr, + [OP_LISTXATTRS] = (nfsd4_dec)nfsd4_decode_listxattrs, + [OP_REMOVEXATTR] = (nfsd4_dec)nfsd4_decode_removexattr, }; static inline bool @@ -4458,6 +4667,241 @@ nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p) return nfserr; } +/* + * Encode kmalloc-ed buffer in to XDR stream. + */ +static int +nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen) +{ + u32 cplen; + __be32 *p; + + cplen = min_t(unsigned long, buflen, + ((void *)xdr->end - (void *)xdr->p)); + p = xdr_reserve_space(xdr, cplen); + if (!p) + return nfserr_resource; + + memcpy(p, buf, cplen); + buf += cplen; + buflen -= cplen; + + while (buflen) { + cplen = min_t(u32, buflen, PAGE_SIZE); + p = xdr_reserve_space(xdr, cplen); + if (!p) + return nfserr_resource; + + memcpy(p, buf, cplen); + + if (cplen < PAGE_SIZE) { + /* + * We're done, with a length that wasn't page + * aligned, so possibly not word aligned. Pad + * any trailing bytes with 0. + */ + xdr_encode_opaque_fixed(p, NULL, cplen); + break; + } + + buflen -= PAGE_SIZE; + buf += PAGE_SIZE; + } + + return 0; +} + +static __be32 +nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_getxattr *getxattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p, err; + + p = xdr_reserve_space(xdr, 4); + if (!p) + return nfserr_resource; + + *p = cpu_to_be32(getxattr->getxa_len); + + if (getxattr->getxa_len == 0) + return 0; + + err = nfsd4_vbuf_to_stream(xdr, getxattr->getxa_buf, + getxattr->getxa_len); + + kvfree(getxattr->getxa_buf); + + return err; +} + +static __be32 +nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_setxattr *setxattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + p = xdr_reserve_space(xdr, 20); + if (!p) + return nfserr_resource; + + encode_cinfo(p, &setxattr->setxa_cinfo); + + return 0; +} + +/* + * See if there are cookie values that can be rejected outright. + */ +static __be32 +nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs, + u32 *offsetp) +{ + u64 cookie = listxattrs->lsxa_cookie; + + /* + * If the cookie is larger than the maximum number we can fit + * in either the buffer we just got back from vfs_listxattr, or, + * XDR-encoded, in the return buffer, it's invalid. + */ + if (cookie > (listxattrs->lsxa_len) / (XATTR_USER_PREFIX_LEN + 2)) + return nfserr_badcookie; + + if (cookie > (listxattrs->lsxa_maxcount / + (XDR_QUADLEN(XATTR_USER_PREFIX_LEN + 2) + 4))) + return nfserr_badcookie; + + *offsetp = (u32)cookie; + return 0; +} + +static __be32 +nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_listxattrs *listxattrs) +{ + struct xdr_stream *xdr = &resp->xdr; + u32 cookie_offset, count_offset, eof; + u32 left, xdrleft, slen, count; + u32 xdrlen, offset; + u64 cookie; + char *sp; + __be32 status; + __be32 *p; + u32 nuser; + + eof = 1; + + status = nfsd4_listxattr_validate_cookie(listxattrs, &offset); + if (status) + goto out; + + /* + * Reserve space for the cookie and the name array count. Record + * the offsets to save them later. + */ + cookie_offset = xdr->buf->len; + count_offset = cookie_offset + 8; + p = xdr_reserve_space(xdr, 12); + if (!p) { + status = nfserr_resource; + goto out; + } + + count = 0; + left = listxattrs->lsxa_len; + sp = listxattrs->lsxa_buf; + nuser = 0; + + xdrleft = listxattrs->lsxa_maxcount; + + while (left > 0 && xdrleft > 0) { + slen = strlen(sp); + + /* + * Check if this a user. attribute, skip it if not. + */ + if (strncmp(sp, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + goto contloop; + + slen -= XATTR_USER_PREFIX_LEN; + xdrlen = 4 + ((slen + 3) & ~3); + if (xdrlen > xdrleft) { + if (count == 0) { + /* + * Can't even fit the first attribute name. + */ + status = nfserr_toosmall; + goto out; + } + eof = 0; + goto wreof; + } + + left -= XATTR_USER_PREFIX_LEN; + sp += XATTR_USER_PREFIX_LEN; + if (nuser++ < offset) + goto contloop; + + + p = xdr_reserve_space(xdr, xdrlen); + if (!p) { + status = nfserr_resource; + goto out; + } + + p = xdr_encode_opaque(p, sp, slen); + + xdrleft -= xdrlen; + count++; +contloop: + sp += slen + 1; + left -= slen + 1; + } + + /* + * If there were user attributes to copy, but we didn't copy + * any, the offset was too large (e.g. the cookie was invalid). + */ + if (nuser > 0 && count == 0) { + status = nfserr_badcookie; + goto out; + } + +wreof: + p = xdr_reserve_space(xdr, 4); + if (!p) { + status = nfserr_resource; + goto out; + } + *p = cpu_to_be32(eof); + + cookie = offset + count; + + write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &cookie, 8); + count = htonl(count); + write_bytes_to_xdr_buf(xdr->buf, count_offset, &count, 4); +out: + if (listxattrs->lsxa_len) + kvfree(listxattrs->lsxa_buf); + return status; +} + +static __be32 +nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_removexattr *removexattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + p = xdr_reserve_space(xdr, 20); + if (!p) + return nfserr_resource; + + p = encode_cinfo(p, &removexattr->rmxa_cinfo); + return 0; +} + typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *); /* @@ -4547,6 +4991,12 @@ static const nfsd4_enc nfsd4_enc_ops[] = { [OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek, [OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop, [OP_CLONE] = (nfsd4_enc)nfsd4_encode_noop, + + /* RFC 8276 extended atributes operations */ + [OP_GETXATTR] = (nfsd4_enc)nfsd4_encode_getxattr, + [OP_SETXATTR] = (nfsd4_enc)nfsd4_encode_setxattr, + [OP_LISTXATTRS] = (nfsd4_enc)nfsd4_encode_listxattrs, + [OP_REMOVEXATTR] = (nfsd4_enc)nfsd4_encode_removexattr, }; /* diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index e6ca9d1d2e76..33ebe476428e 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -165,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS42_OP OP_REMOVEXATTR #define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { -- cgit v1.2.3 From c65b326b1eb983bca35ed43d0e453d1b15705f10 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 25 Mar 2020 14:41:46 -0400 Subject: svcrdma: Make svc_rdma_send_error_msg() a global function Prepare for svc_rdma_send_error_msg() to be invoked from another source file. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 4 ++++ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 28 +++++++++++++++++++--------- 2 files changed, 23 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 7ed82625dc0b..1579f7a14ab4 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -195,6 +195,10 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr); +extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, + struct svc_rdma_recv_ctxt *rctxt, + int status); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index fb548b548c4b..57041298fe4f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -804,16 +804,25 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, return svc_rdma_send(rdma, &sctxt->sc_send_wr); } -/* Given the client-provided Write and Reply chunks, the server was not - * able to form a complete reply. Return an RDMA_ERROR message so the - * client can retire this RPC transaction. - * - * Remote Invalidation is skipped for simplicity. +/** + * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response + * @rdma: controlling transport context + * @sctxt: Send context for the response + * @rctxt: Receive context for incoming bad message + * @status: negative errno indicating error that occurred + * + * Given the client-provided Read, Write, and Reply chunks, the + * server was not able to parse the Call or form a complete Reply. + * Return an RDMA_ERROR message so the client can retire the RPC + * transaction. + * + * The caller does not have to release @sctxt. It is released by + * Send completion, or by this function on error. */ -static void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *sctxt, - struct svc_rdma_recv_ctxt *rctxt, - int status) +void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, + struct svc_rdma_recv_ctxt *rctxt, + int status) { __be32 *rdma_argp = rctxt->rc_recv_buf; __be32 *p; @@ -852,6 +861,7 @@ static void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, trace_svcrdma_err_chunk(*rdma_argp); } + /* Remote Invalidation is skipped for simplicity. */ sctxt->sc_send_wr.num_sge = 1; sctxt->sc_send_wr.opcode = IB_WR_SEND; sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; -- cgit v1.2.3 From 0b8dc1b69995cbd81c2c9a2f1730c46cce085f62 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 18 May 2020 11:34:47 -0400 Subject: svcrdma: Remove declarations for functions long removed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pavane pour une infante défunte. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 1579f7a14ab4..d28ca1b6f2eb 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -204,10 +204,6 @@ extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); /* svc_rdma_transport.c */ -extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); -extern void svc_sq_reap(struct svcxprt_rdma *); -extern void svc_rq_reap(struct svcxprt_rdma *); - extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL extern struct svc_xprt_class svc_rdma_bc_class; -- cgit v1.2.3 From 07e9a6325a35fb9655f7b52e2b9dc632da6eef51 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sat, 28 Mar 2020 13:43:22 -0400 Subject: SUNRPC: Add helpers for decoding list discriminators symbolically Use these helpers in a few spots to demonstrate their use. The remaining open-coded discriminator checks in rpcrdma will be addressed in subsequent patches. Signed-off-by: Chuck Lever --- include/linux/sunrpc/xdr.h | 26 ++++++++++++++++++++++++++ net/sunrpc/xprtrdma/rpc_rdma.c | 12 ++++++------ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 17 ++++++++--------- 3 files changed, 40 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 22c207b2425f..5a6a81b7cd9f 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -474,6 +474,32 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr, return ret; } +/** + * xdr_item_is_absent - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is absent + * %false if the following XDR item is present + */ +static inline bool xdr_item_is_absent(const __be32 *p) +{ + return *p == xdr_zero; +} + +/** + * xdr_item_is_present - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is present + * %false if the following XDR item is absent + */ +static inline bool xdr_item_is_present(const __be32 *p) +{ + return *p != xdr_zero; +} + /** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 935bbef2f7be..feecd1f55f18 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1133,11 +1133,11 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) p = xdr_inline_decode(xdr, 0); /* Chunk lists */ - if (*p++ != xdr_zero) + if (xdr_item_is_present(p++)) return false; - if (*p++ != xdr_zero) + if (xdr_item_is_present(p++)) return false; - if (*p++ != xdr_zero) + if (xdr_item_is_present(p++)) return false; /* RPC header */ @@ -1215,7 +1215,7 @@ static int decode_read_list(struct xdr_stream *xdr) p = xdr_inline_decode(xdr, sizeof(*p)); if (unlikely(!p)) return -EIO; - if (unlikely(*p != xdr_zero)) + if (unlikely(xdr_item_is_present(p))) return -EIO; return 0; } @@ -1234,7 +1234,7 @@ static int decode_write_list(struct xdr_stream *xdr, u32 *length) p = xdr_inline_decode(xdr, sizeof(*p)); if (unlikely(!p)) return -EIO; - if (*p == xdr_zero) + if (xdr_item_is_absent(p)) break; if (!first) return -EIO; @@ -1256,7 +1256,7 @@ static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length) return -EIO; *length = 0; - if (*p != xdr_zero) + if (xdr_item_is_present(p)) if (decode_write_chunk(xdr, length)) return -EIO; return 0; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index c072ce61b393..5e78067889f3 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -419,7 +419,7 @@ static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt) len = 0; first = true; - while (*p != xdr_zero) { + while (xdr_item_is_present(p)) { p = xdr_inline_decode(&rctxt->rc_stream, rpcrdma_readseg_maxsz * sizeof(*p)); if (!p) @@ -500,7 +500,7 @@ static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) if (!p) return false; rctxt->rc_write_list = p; - while (*p != xdr_zero) { + while (xdr_item_is_present(p)) { if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK)) return false; ++chcount; @@ -532,12 +532,11 @@ static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt) p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); if (!p) return false; - rctxt->rc_reply_chunk = p; - if (*p != xdr_zero) { + rctxt->rc_reply_chunk = NULL; + if (xdr_item_is_present(p)) { if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK)) return false; - } else { - rctxt->rc_reply_chunk = NULL; + rctxt->rc_reply_chunk = p; } return true; } @@ -568,7 +567,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, p += rpcrdma_fixed_maxsz; /* Read list */ - while (*p++ != xdr_zero) { + while (xdr_item_is_present(p++)) { p++; /* position */ if (inv_rkey == xdr_zero) inv_rkey = *p; @@ -578,7 +577,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, } /* Write list */ - while (*p++ != xdr_zero) { + while (xdr_item_is_present(p++)) { segcount = be32_to_cpup(p++); for (i = 0; i < segcount; i++) { if (inv_rkey == xdr_zero) @@ -590,7 +589,7 @@ static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, } /* Reply chunk */ - if (*p++ != xdr_zero) { + if (xdr_item_is_present(p++)) { segcount = be32_to_cpup(p++); for (i = 0; i < segcount; i++) { if (inv_rkey == xdr_zero) -- cgit v1.2.3 From f60a08697d28b138c73b14c3204947bc8e637197 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 29 Mar 2020 16:44:13 -0400 Subject: svcrdma: Add common XDR decoders for RDMA and Read segments Clean up: De-duplicate some code. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma.h | 37 ++++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/frwr_ops.c | 1 - net/sunrpc/xprtrdma/rpc_rdma.c | 5 +---- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 4 +--- net/sunrpc/xprtrdma/svc_rdma_rw.c | 37 ++++++++++++++------------------ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 5 +---- net/sunrpc/xprtrdma/svc_rdma_transport.c | 1 - 7 files changed, 56 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 320c672d84de..db50380f64f4 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -124,4 +124,41 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } +/** + * xdr_decode_rdma_segment - Decode contents of an RDMA segment + * @p: Pointer to the undecoded RDMA segment + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the RDMA segment + */ +static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle, + u32 *length, u64 *offset) +{ + *handle = be32_to_cpup(p++); + *length = be32_to_cpup(p++); + return xdr_decode_hyper(p, offset); +} + +/** + * xdr_decode_read_segment - Decode contents of a Read segment + * @p: Pointer to the undecoded Read segment + * @position: Upon return, the segment's position + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the Read segment + */ +static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position, + u32 *handle, u32 *length, + u64 *offset) +{ + *position = be32_to_cpup(p++); + return xdr_decode_rdma_segment(p, handle, length, offset); +} + #endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index b647562a26dd..7f94c9a19fd3 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -40,7 +40,6 @@ * New MRs are created on demand. */ -#include #include #include "xprt_rdma.h" diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index feecd1f55f18..5461f01eeca6 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1176,10 +1176,7 @@ static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length) if (unlikely(!p)) return -EIO; - handle = be32_to_cpup(p++); - *length = be32_to_cpup(p++); - xdr_decode_hyper(p, &offset); - + xdr_decode_rdma_segment(p, &handle, length, &offset); trace_xprtrdma_decode_seg(handle, *length, offset); return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 5e78067889f3..c0587d3cd389 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -466,9 +466,7 @@ static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen) if (!p) return false; - handle = be32_to_cpup(p++); - length = be32_to_cpup(p++); - xdr_decode_hyper(p, &offset); + xdr_decode_rdma_segment(p, &handle, &length, &offset); trace_svcrdma_decode_wseg(handle, length, offset); total += length; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 83806fa94def..2038b1b286dd 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -7,6 +7,7 @@ #include +#include #include #include @@ -441,34 +442,32 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; do { unsigned int write_len; - u32 seg_length, seg_handle; - u64 seg_offset; + u32 handle, length; + u64 offset; if (info->wi_seg_no >= info->wi_nsegs) goto out_overflow; - seg_handle = be32_to_cpup(seg); - seg_length = be32_to_cpup(seg + 1); - xdr_decode_hyper(seg + 2, &seg_offset); - seg_offset += info->wi_seg_off; + xdr_decode_rdma_segment(seg, &handle, &length, &offset); + offset += info->wi_seg_off; - write_len = min(remaining, seg_length - info->wi_seg_off); + write_len = min(remaining, length - info->wi_seg_off); ctxt = svc_rdma_get_rw_ctxt(rdma, (write_len >> PAGE_SHIFT) + 2); if (!ctxt) return -ENOMEM; constructor(info, write_len, ctxt); - ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle, + ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle, DMA_TO_DEVICE); if (ret < 0) return -EIO; - trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset); + trace_svcrdma_send_wseg(handle, write_len, offset); list_add(&ctxt->rw_list, &cc->cc_rwctxts); cc->cc_sqecount += ret; - if (write_len == seg_length - info->wi_seg_off) { + if (write_len == length - info->wi_seg_off) { seg += 4; info->wi_seg_no++; info->wi_seg_off = 0; @@ -689,21 +688,17 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, ret = -EINVAL; info->ri_chunklen = 0; while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) { - u32 rs_handle, rs_length; - u64 rs_offset; + u32 handle, length; + u64 offset; - rs_handle = be32_to_cpup(p++); - rs_length = be32_to_cpup(p++); - p = xdr_decode_hyper(p, &rs_offset); - - ret = svc_rdma_build_read_segment(info, rqstp, - rs_handle, rs_length, - rs_offset); + p = xdr_decode_rdma_segment(p, &handle, &length, &offset); + ret = svc_rdma_build_read_segment(info, rqstp, handle, length, + offset); if (ret < 0) break; - trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset); - info->ri_chunklen += rs_length; + trace_svcrdma_send_rseg(handle, length, offset); + info->ri_chunklen += length; } return ret; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index f985f548346a..a78f1d22e9bb 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -106,7 +106,6 @@ #include #include -#include #include #include "xprt_rdma.h" @@ -375,9 +374,7 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src, if (!p) return -EMSGSIZE; - handle = be32_to_cpup(src++); - length = be32_to_cpup(src++); - xdr_decode_hyper(src, &offset); + xdr_decode_rdma_segment(src, &handle, &length, &offset); *p++ = cpu_to_be32(handle); if (*remaining < length) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index d38be57b00ed..3da7901a49e6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -55,7 +55,6 @@ #include #include -#include #include #include -- cgit v1.2.3 From 379c3bc6b4eb989ee37c4ce8ab403719e06fe35f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 7 Apr 2020 15:32:14 -0400 Subject: svcrdma: Add common XDR encoders for RDMA and Read segments Clean up: De-duplicate some code. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma.h | 37 +++++++++++++++++++++++++++++++++++ net/sunrpc/xprtrdma/rpc_rdma.c | 14 +++---------- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 4 +--- 3 files changed, 41 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index db50380f64f4..4af31bbc8802 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -124,6 +124,43 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } +/** + * xdr_encode_rdma_segment - Encode contents of an RDMA segment + * @p: Pointer into a send buffer + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded RDMA segment + */ +static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle, + u32 length, u64 offset) +{ + *p++ = cpu_to_be32(handle); + *p++ = cpu_to_be32(length); + return xdr_encode_hyper(p, offset); +} + +/** + * xdr_encode_read_segment - Encode contents of a Read segment + * @p: Pointer into a send buffer + * @position: The position to encode + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded Read segment + */ +static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position, + u32 handle, u32 length, + u64 offset) +{ + *p++ = cpu_to_be32(position); + return xdr_encode_rdma_segment(p, handle, length, offset); +} + /** * xdr_decode_rdma_segment - Decode contents of an RDMA segment * @p: Pointer to the undecoded RDMA segment diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 5461f01eeca6..73ed51893175 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -275,14 +275,6 @@ out: return n; } -static void -xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr) -{ - *iptr++ = cpu_to_be32(mr->mr_handle); - *iptr++ = cpu_to_be32(mr->mr_length); - xdr_encode_hyper(iptr, mr->mr_offset); -} - static int encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr) { @@ -292,7 +284,7 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr) if (unlikely(!p)) return -EMSGSIZE; - xdr_encode_rdma_segment(p, mr); + xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset); return 0; } @@ -307,8 +299,8 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr, return -EMSGSIZE; *p++ = xdr_one; /* Item present */ - *p++ = cpu_to_be32(position); - xdr_encode_rdma_segment(p, mr); + xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length, + mr->mr_offset); return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index a78f1d22e9bb..38d8f0ee35ec 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -376,7 +376,6 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src, xdr_decode_rdma_segment(src, &handle, &length, &offset); - *p++ = cpu_to_be32(handle); if (*remaining < length) { /* segment only partly filled */ length = *remaining; @@ -385,8 +384,7 @@ static ssize_t svc_rdma_encode_write_segment(__be32 *src, /* entire segment was consumed */ *remaining -= length; } - *p++ = cpu_to_be32(length); - xdr_encode_hyper(p, offset); + xdr_encode_rdma_segment(p, handle, length, offset); trace_svcrdma_encode_wseg(handle, length, offset); return len; -- cgit v1.2.3 From f7bd657b55e3484cadc37a6439de23d2fd703bd6 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 19 May 2020 09:30:32 -0400 Subject: svcrdma: Introduce infrastructure to support completion IDs The goal is to replace CQE kernel memory addresses in completion- related tracepoints. Each completion ID matches an incoming Send or Receive completion to a Completion Queue and to a previous ib_post_*(). The ID can then be displayed in an error message or recorded in a trace record. Signed-off-by: Chuck Lever --- include/linux/sunrpc/rpc_rdma_cid.h | 24 +++++++++++++++++++++ include/trace/events/rpcrdma.h | 43 +++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 include/linux/sunrpc/rpc_rdma_cid.h (limited to 'include/linux') diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h new file mode 100644 index 000000000000..be24ab2baa6a --- /dev/null +++ b/include/linux/sunrpc/rpc_rdma_cid.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * * Copyright (c) 2020, Oracle and/or its affiliates. + */ + +#ifndef RPC_RDMA_CID_H +#define RPC_RDMA_CID_H + +/* + * The rpc_rdma_cid struct records completion ID information. A + * completion ID matches an incoming Send or Receive completion + * to a Completion Queue and to a previous ib_post_*(). The ID + * can then be displayed in an error message or recorded in a + * trace record. + * + * This struct is shared between the server and client RPC/RDMA + * transport implementations. + */ +struct rpc_rdma_cid { + u32 ci_queue_id; + int ci_completion_id; +}; + +#endif /* RPC_RDMA_CID_H */ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 0eff80dee066..70ab989aa3b7 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -11,6 +11,7 @@ #define _TRACE_RPCRDMA_H #include +#include #include #include @@ -18,6 +19,48 @@ ** Event classes **/ +DECLARE_EVENT_CLASS(rpcrdma_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) + __entry->vendor_err = wc->vendor_err; + else + __entry->vendor_err = 0; + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + +DEFINE_COMPLETION_EVENT(dummy); + DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_PROTO( const struct rpcrdma_rep *rep -- cgit v1.2.3 From 9b3bcf8c5c134038e30624db5b57992ae50b80a9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 16:22:26 -0400 Subject: svcrdma: Introduce Receive completion IDs Set up a completion ID in each svc_rdma_recv_ctxt. The ID is used to match an incoming Receive completion to a transport and to a previous ib_post_recv(). Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 4 +++ include/trace/events/rpcrdma.h | 51 +++++++++++++-------------------- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 15 ++++++++-- 3 files changed, 36 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index d28ca1b6f2eb..c3c1e46f510f 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -109,6 +110,8 @@ struct svcxprt_rdma { struct work_struct sc_work; struct llist_head sc_recv_ctxts; + + atomic_t sc_completion_ids; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 @@ -129,6 +132,7 @@ struct svc_rdma_recv_ctxt { struct list_head rc_list; struct ib_recv_wr rc_recv_wr; struct ib_cqe rc_cqe; + struct rpc_rdma_cid rc_cid; struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 70ab989aa3b7..a0330a557e34 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -59,8 +59,6 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) -DEFINE_COMPLETION_EVENT(dummy); - DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_PROTO( const struct rpcrdma_rep *rep @@ -1849,57 +1847,48 @@ DEFINE_SENDCOMP_EVENT(send); TRACE_EVENT(svcrdma_post_recv, TP_PROTO( - const struct ib_recv_wr *wr, - int status + const struct svc_rdma_recv_ctxt *ctxt ), - TP_ARGS(wr, status), + TP_ARGS(ctxt), TP_STRUCT__entry( - __field(const void *, cqe) - __field(int, status) + __field(u32, cq_id) + __field(int, completion_id) ), TP_fast_assign( - __entry->cqe = wr->wr_cqe; - __entry->status = status; + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; ), - TP_printk("cqe=%p status=%d", - __entry->cqe, __entry->status + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id ) ); -TRACE_EVENT(svcrdma_wc_receive, +DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); + +TRACE_EVENT(svcrdma_rq_post_err, TP_PROTO( - const struct ib_wc *wc + const struct svcxprt_rdma *rdma, + int status ), - TP_ARGS(wc), + TP_ARGS(rdma, status), TP_STRUCT__entry( - __field(const void *, cqe) - __field(u32, byte_len) - __field(unsigned int, status) - __field(u32, vendor_err) + __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( - __entry->cqe = wc->wr_cqe; - __entry->status = wc->status; - if (wc->status) { - __entry->byte_len = 0; - __entry->vendor_err = wc->vendor_err; - } else { - __entry->byte_len = wc->byte_len; - __entry->vendor_err = 0; - } + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)", - __entry->cqe, __entry->byte_len, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err + TP_printk("addr=%s status=%d", + __get_str(addr), __entry->status ) ); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index c0587d3cd389..e6d7401232d2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -117,6 +117,13 @@ svc_rdma_next_recv_ctxt(struct list_head *list) rc_list); } +static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_rq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + static struct svc_rdma_recv_ctxt * svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) { @@ -135,6 +142,8 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) goto fail2; + svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); + ctxt->rc_recv_wr.next = NULL; ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; @@ -249,13 +258,14 @@ static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, int ret; svc_xprt_get(&rdma->sc_xprt); + trace_svcrdma_post_recv(ctxt); ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); - trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); if (ret) goto err_post; return 0; err_post: + trace_svcrdma_rq_post_err(rdma, ret); svc_rdma_recv_ctxt_put(rdma, ctxt); svc_xprt_put(&rdma->sc_xprt); return ret; @@ -309,11 +319,10 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) struct ib_cqe *cqe = wc->wr_cqe; struct svc_rdma_recv_ctxt *ctxt; - trace_svcrdma_wc_receive(wc); - /* WARNING: Only wc->wr_cqe and wc->status are reliable */ ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); + trace_svcrdma_wc_receive(wc, &ctxt->rc_cid); if (wc->status != IB_WC_SUCCESS) goto flushed; -- cgit v1.2.3 From 3ac56c2fb166fea25974d8c48bb4a72ee298361b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 30 Apr 2020 13:47:07 -0400 Subject: svcrdma: Introduce Send completion IDs Set up a completion ID in each svc_rdma_send_ctxt. The ID is used to match an incoming Send completion to a transport and to a previous ib_post_send(). Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 2 ++ include/trace/events/rpcrdma.h | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 15 ++++++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index c3c1e46f510f..c91e00bc937e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -151,6 +151,8 @@ struct svc_rdma_recv_ctxt { struct svc_rdma_send_ctxt { struct list_head sc_list; + struct rpc_rdma_cid sc_cid; + struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; struct xdr_buf sc_hdrbuf; diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index df49ae5d447b..782a4d826a4b 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1863,7 +1863,7 @@ TRACE_EVENT(svcrdma_post_send, ) ); -DEFINE_SENDCOMP_EVENT(send); +DEFINE_COMPLETION_EVENT(svcrdma_wc_send); TRACE_EVENT(svcrdma_post_recv, TP_PROTO( diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 38d8f0ee35ec..c720dcf56231 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -122,6 +122,13 @@ svc_rdma_next_send_ctxt(struct list_head *list) sc_list); } +static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, + struct rpc_rdma_cid *cid) +{ + cid->ci_queue_id = rdma->sc_sq_cq->res.id; + cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); +} + static struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) { @@ -144,6 +151,8 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) goto fail2; + svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); + ctxt->sc_send_wr.next = NULL; ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; ctxt->sc_send_wr.sg_list = ctxt->sc_sges; @@ -268,14 +277,14 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) { struct svcxprt_rdma *rdma = cq->cq_context; struct ib_cqe *cqe = wc->wr_cqe; - struct svc_rdma_send_ctxt *ctxt; + struct svc_rdma_send_ctxt *ctxt = + container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); - trace_svcrdma_wc_send(wc); + trace_svcrdma_wc_send(wc, &ctxt->sc_cid); atomic_inc(&rdma->sc_sq_avail); wake_up(&rdma->sc_send_wait); - ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); svc_rdma_send_ctxt_put(rdma, ctxt); if (unlikely(wc->status != IB_WC_SUCCESS)) { -- cgit v1.2.3 From 17f70f8dd52be3723250d21093403bb3a9f2162f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Apr 2020 11:05:33 -0400 Subject: svcrdma: Record send_ctxt completion ID in trace_svcrdma_post_send() First, refactor: Dereference the svc_rdma_send_ctxt inside svc_rdma_send() instead of at every call site. Then, it can be passed into trace_svcrdma_post_send() to get the proper completion ID. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 3 ++- include/trace/events/rpcrdma.h | 18 +++++++++++------- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 11 ++++++----- 4 files changed, 20 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index c91e00bc937e..9dc3a3b88391 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -196,7 +196,8 @@ extern struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); +extern int svc_rdma_send(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 782a4d826a4b..aeeba9188ed5 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -1839,27 +1839,31 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr + const struct svc_rdma_send_ctxt *ctxt ), - TP_ARGS(wr), + TP_ARGS(ctxt), TP_STRUCT__entry( - __field(const void *, cqe) + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, num_sge) __field(u32, inv_rkey) ), TP_fast_assign( - __entry->cqe = wr->wr_cqe; + const struct ib_send_wr *wr = &ctxt->sc_send_wr; + + __entry->cq_id = ctxt->sc_cid.ci_queue_id; + __entry->completion_id = ctxt->sc_cid.ci_completion_id; __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", - __entry->cqe, __entry->num_sge, - __entry->inv_rkey + TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", + __entry->cq_id, __entry->completion_id, + __entry->num_sge, __entry->inv_rkey ) ); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 1ee73f7cf931..5e7c4ba9e147 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -87,7 +87,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, */ get_page(virt_to_page(rqst->rq_buffer)); ctxt->sc_send_wr.opcode = IB_WR_SEND; - return svc_rdma_send(rdma, &ctxt->sc_send_wr); + return svc_rdma_send(rdma, ctxt); } /* Server-side transport endpoint wants a whole page for its send diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index c720dcf56231..73d46e8cdc16 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -298,13 +298,14 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) /** * svc_rdma_send - Post a single Send WR * @rdma: transport on which to post the WR - * @wr: prepared Send WR to post + * @ctxt: send ctxt with a Send WR ready to post * * Returns zero the Send WR was posted successfully. Otherwise, a * negative errno is returned. */ -int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) +int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) { + struct ib_send_wr *wr = &ctxt->sc_send_wr; int ret; might_sleep(); @@ -330,7 +331,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); - trace_svcrdma_post_send(wr); + trace_svcrdma_post_send(ctxt); ret = ib_post_send(rdma->sc_qp, wr, NULL); if (ret) break; @@ -805,7 +806,7 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, } else { sctxt->sc_send_wr.opcode = IB_WR_SEND; } - return svc_rdma_send(rdma, &sctxt->sc_send_wr); + return svc_rdma_send(rdma, sctxt); } /** @@ -869,7 +870,7 @@ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, sctxt->sc_send_wr.num_sge = 1; sctxt->sc_send_wr.opcode = IB_WR_SEND; sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; - if (svc_rdma_send(rdma, &sctxt->sc_send_wr)) + if (svc_rdma_send(rdma, sctxt)) goto put_ctxt; return; -- cgit v1.2.3 From 9a67fcc8f3fd1e294922f28f20003c31d7f6cfeb Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:53 +0000 Subject: nfs: add client side only definitions for user xattrs Add client-side only definitions for user extended attributes (RFC8276). These are the access bits as used by the client code, and the CLNT procedure number definition. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- include/linux/nfs4.h | 5 +++++ include/linux/nfs_fs.h | 3 +++ 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index e6ca9d1d2e76..db13026ac7d1 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -553,6 +553,11 @@ enum { NFSPROC4_CLNT_LAYOUTERROR, NFSPROC4_CLNT_COPY_NOTIFY, + + NFSPROC4_CLNT_GETXATTR, + NFSPROC4_CLNT_SETXATTR, + NFSPROC4_CLNT_LISTXATTRS, + NFSPROC4_CLNT_REMOVEXATTR, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6ee9119acc5d..b743988fcbd0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -212,6 +212,9 @@ struct nfs4_copy_state { #define NFS_ACCESS_EXTEND 0x0008 #define NFS_ACCESS_DELETE 0x0010 #define NFS_ACCESS_EXECUTE 0x0020 +#define NFS_ACCESS_XAREAD 0x0040 +#define NFS_ACCESS_XAWRITE 0x0080 +#define NFS_ACCESS_XALIST 0x0100 /* * Cache validity bit flags -- cgit v1.2.3 From 04a5da690e8f2da23c2ac940f2921e3aa622db82 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:54 +0000 Subject: NFSv4.2: define limits and sizes for user xattr handling Set limits for extended attributes (attribute value size and listxattr buffer size), based on the fs-independent limits (XATTR_*_MAX). Define the maximum XDR sizes for the RFC 8276 XATTR operations. In the case of operations that carry a larger payload (SETXATTR, GETXATTR, LISTXATTR), these exclude that payload, which is added as separate pages, like other operations do. Define, much like for read and write operations, the maximum overhead sizes for get/set/listxattr, and use them to limit the maximum payload size for those operations, in combination with the channel attributes. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 19 ++++++++++-- fs/nfs/nfs42.h | 16 ++++++++++ fs/nfs/nfs42xdr.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++ fs/nfs/nfs4_fs.h | 6 ++++ fs/nfs/nfs4client.c | 31 ++++++++++++++++++++ include/linux/nfs_fs_sb.h | 5 ++++ 6 files changed, 149 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f1ff3076e4a4..055040bf1a8e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -50,6 +50,7 @@ #include "nfs.h" #include "netns.h" #include "sysfs.h" +#include "nfs42.h" #define NFSDBG_FACILITY NFSDBG_CLIENT @@ -749,7 +750,7 @@ error: static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *fsinfo) { - unsigned long max_rpc_payload; + unsigned long max_rpc_payload, raw_max_rpc_payload; /* Work out a lot of parameters */ if (server->rsize == 0) @@ -762,7 +763,9 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax) server->wsize = nfs_block_size(fsinfo->wtmax, NULL); - max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL); + raw_max_rpc_payload = rpc_max_payload(server->client); + max_rpc_payload = nfs_block_size(raw_max_rpc_payload, NULL); + if (server->rsize > max_rpc_payload) server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) @@ -795,6 +798,18 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->clone_blksize = fsinfo->clone_blksize; /* We're airborne Set socket buffersize */ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100); + +#ifdef CONFIG_NFS_V4_2 + /* + * Defaults until limited by the session parameters. + */ + server->gxasize = min_t(unsigned int, raw_max_rpc_payload, + XATTR_SIZE_MAX); + server->sxasize = min_t(unsigned int, raw_max_rpc_payload, + XATTR_SIZE_MAX); + server->lxasize = min_t(unsigned int, raw_max_rpc_payload, + nfs42_listxattr_xdrsize(XATTR_LIST_MAX)); +#endif } /* diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h index c891af949886..51de8ddc7d88 100644 --- a/fs/nfs/nfs42.h +++ b/fs/nfs/nfs42.h @@ -6,6 +6,8 @@ #ifndef __LINUX_FS_NFS_NFS4_2_H #define __LINUX_FS_NFS_NFS4_2_H +#include + /* * FIXME: four LAYOUTSTATS calls per compound at most! Do we need to support * more? Need to consider not to pre-alloc too much for a compound. @@ -36,5 +38,19 @@ static inline bool nfs42_files_from_same_server(struct file *in, return nfs4_check_serverowner_major_id(c_in->cl_serverowner, c_out->cl_serverowner); } + +/* + * Maximum XDR buffer size needed for a listxattr buffer of buflen size. + * + * The upper boundary is a buffer with all 1-byte sized attribute names. + * They would be 7 bytes long in the eventual buffer ("user.x\0"), and + * 8 bytes long XDR-encoded. + * + * Include the trailing eof word as well. + */ +static inline u32 nfs42_listxattr_xdrsize(u32 buflen) +{ + return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4; +} #endif /* CONFIG_NFS_V4_2 */ #endif /* __LINUX_FS_NFS_NFS4_2_H */ diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c03f3246d6c5..6712daa9d85b 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -169,6 +169,80 @@ decode_clone_maxsz + \ decode_getattr_maxsz) +#ifdef CONFIG_NFS_V4_2 +/* Not limited by NFS itself, limited by the generic xattr code */ +#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX) + +#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \ + nfs4_xattr_name_maxsz) +#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + 1) +#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \ + 1 + nfs4_xattr_name_maxsz + 1) +#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) +#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1) +#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1) +#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \ + nfs4_xattr_name_maxsz) +#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \ + decode_change_info_maxsz) + +#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_getxattr_maxsz) +#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_getxattr_maxsz) +#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_setxattr_maxsz) +#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_setxattr_maxsz) +#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_listxattrs_maxsz) +#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_listxattrs_maxsz) +#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_removexattr_maxsz) +#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_removexattr_maxsz) + +/* + * These values specify the maximum amount of data that is not + * associated with the extended attribute name or extended + * attribute list in the SETXATTR, GETXATTR and LISTXATTR + * respectively. + */ +const u32 nfs42_maxsetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_encode_hdr_maxsz + + encode_sequence_maxsz + + encode_putfh_maxsz + 1 + + nfs4_xattr_name_maxsz) + * XDR_UNIT); + +const u32 nfs42_maxgetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_decode_hdr_maxsz + + decode_sequence_maxsz + + decode_putfh_maxsz + 1) * XDR_UNIT); + +const u32 nfs42_maxlistxattrs_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_decode_hdr_maxsz + + decode_sequence_maxsz + + decode_putfh_maxsz + 3) * XDR_UNIT); +#endif + static void encode_fallocate(struct xdr_stream *xdr, const struct nfs42_falloc_args *args) { diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 2b7f6dcd2eb8..526b3e70d57c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -557,6 +557,12 @@ static inline void nfs4_unregister_sysctl(void) /* nfs4xdr.c */ extern const struct rpc_procinfo nfs4_procedures[]; +#ifdef CONFIG_NFS_V4_2 +extern const u32 nfs42_maxsetxattr_overhead; +extern const u32 nfs42_maxgetxattr_overhead; +extern const u32 nfs42_maxlistxattrs_overhead; +#endif + struct nfs4_mount_data; /* callback_xdr.c */ diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0bd77cc1f639..c41cbd86612c 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -992,6 +992,36 @@ static void nfs4_session_limit_rwsize(struct nfs_server *server) #endif /* CONFIG_NFS_V4_1 */ } +/* + * Limit xattr sizes using the channel attributes. + */ +static void nfs4_session_limit_xasize(struct nfs_server *server) +{ +#ifdef CONFIG_NFS_V4_2 + struct nfs4_session *sess; + u32 server_gxa_sz; + u32 server_sxa_sz; + u32 server_lxa_sz; + + if (!nfs4_has_session(server->nfs_client)) + return; + + sess = server->nfs_client->cl_session; + + server_gxa_sz = sess->fc_attrs.max_resp_sz - nfs42_maxgetxattr_overhead; + server_sxa_sz = sess->fc_attrs.max_rqst_sz - nfs42_maxsetxattr_overhead; + server_lxa_sz = sess->fc_attrs.max_resp_sz - + nfs42_maxlistxattrs_overhead; + + if (server->gxasize > server_gxa_sz) + server->gxasize = server_gxa_sz; + if (server->sxasize > server_sxa_sz) + server->sxasize = server_sxa_sz; + if (server->lxasize > server_lxa_sz) + server->lxasize = server_lxa_sz; +#endif +} + static int nfs4_server_common_setup(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe) { @@ -1039,6 +1069,7 @@ static int nfs4_server_common_setup(struct nfs_server *server, goto out; nfs4_session_limit_rwsize(server); + nfs4_session_limit_xasize(server); if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 465fa98258a3..128e01acb4ca 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -163,6 +163,11 @@ struct nfs_server { unsigned int dtsize; /* readdir size */ unsigned short port; /* "port=" setting */ unsigned int bsize; /* server block size */ +#ifdef CONFIG_NFS_V4_2 + unsigned int gxasize; /* getxattr size */ + unsigned int sxasize; /* setxattr size */ + unsigned int lxasize; /* listxattr size */ +#endif unsigned int acregmin; /* attr cache timeouts */ unsigned int acregmax; unsigned int acdirmin; -- cgit v1.2.3 From b78ef845c35dbae25e57b598901a65b13d940c81 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:55 +0000 Subject: NFSv4.2: query the server for extended attribute support Query the server for extended attribute support, and record it as the NFS_CAP_XATTR flag in the server capabilities. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 3 +++ fs/nfs/nfs4proc.c | 3 ++- fs/nfs/nfs4xdr.c | 25 +++++++++++++++++++++++++ include/linux/nfs_fs_sb.h | 1 + include/linux/nfs_xdr.h | 1 + 5 files changed, 32 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 055040bf1a8e..4b8cc93913f7 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -809,6 +809,9 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, XATTR_SIZE_MAX); server->lxasize = min_t(unsigned int, raw_max_rpc_payload, nfs42_listxattr_xdrsize(XATTR_LIST_MAX)); + + if (fsinfo->xattr_support) + server->caps |= NFS_CAP_XATTR; #endif } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e32717fd1169..64e081459327 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -256,6 +256,7 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD1_FS_LAYOUT_TYPES, FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_CLONE_BLKSIZE + | FATTR4_WORD2_XATTR_SUPPORT }; const u32 nfs4_fs_locations_bitmap[3] = { @@ -3740,7 +3741,7 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) -#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL) +#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL) static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 47817ef0aadb..9e1b07640e9a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4201,6 +4201,26 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str return status; } +static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap, + uint32_t *res) +{ + __be32 *p; + + *res = 0; + if (unlikely(bitmap[2] & (FATTR4_WORD2_XATTR_SUPPORT - 1U))) + return -EIO; + if (likely(bitmap[2] & FATTR4_WORD2_XATTR_SUPPORT)) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + *res = be32_to_cpup(p); + bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT; + } + dprintk("%s: XATTR support=%s\n", __func__, + *res == 0 ? "false" : "true"); + return 0; +} + static int verify_attr_len(struct xdr_stream *xdr, unsigned int savep, uint32_t attrlen) { unsigned int attrwords = XDR_QUADLEN(attrlen); @@ -4855,6 +4875,11 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) if (status) goto xdr_error; + status = decode_attr_xattrsupport(xdr, bitmap, + &fsinfo->xattr_support); + if (status) + goto xdr_error; + status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 128e01acb4ca..7eae72a8762e 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -286,5 +286,6 @@ struct nfs_server { #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) #define NFS_CAP_LAYOUTERROR (1U << 26) #define NFS_CAP_COPY_NOTIFY (1U << 27) +#define NFS_CAP_XATTR (1U << 28) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5fd0a9ef425f..dee9b1cfa972 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -150,6 +150,7 @@ struct nfs_fsinfo { __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ + __u32 xattr_support; /* User xattrs supported */ }; struct nfs_fsstat { -- cgit v1.2.3 From 3e1f02123fba086d32dfd5729e6f4e2b54654acc Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:56 +0000 Subject: NFSv4.2: add client side XDR handling for extended attributes Define the argument and response structures that will be used for RFC 8276 extended attribute RPC calls, and implement the necessary functions to encode/decode the extended attribute operations. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/nfs42xdr.c | 368 +++++++++++++++++++++++++++++++++++++++++++++++- fs/nfs/nfs4xdr.c | 6 + include/linux/nfs_xdr.h | 59 +++++++- 3 files changed, 430 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 6712daa9d85b..cc50085e151c 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -169,7 +169,6 @@ decode_clone_maxsz + \ decode_getattr_maxsz) -#ifdef CONFIG_NFS_V4_2 /* Not limited by NFS itself, limited by the generic xattr code */ #define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX) @@ -241,7 +240,6 @@ const u32 nfs42_maxlistxattrs_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_decode_hdr_maxsz + decode_sequence_maxsz + decode_putfh_maxsz + 3) * XDR_UNIT); -#endif static void encode_fallocate(struct xdr_stream *xdr, const struct nfs42_falloc_args *args) @@ -407,6 +405,210 @@ static void encode_layouterror(struct xdr_stream *xdr, encode_device_error(xdr, &args->errors[0]); } +static void encode_setxattr(struct xdr_stream *xdr, + const struct nfs42_setxattrargs *arg, + struct compound_hdr *hdr) +{ + __be32 *p; + + BUILD_BUG_ON(XATTR_CREATE != SETXATTR4_CREATE); + BUILD_BUG_ON(XATTR_REPLACE != SETXATTR4_REPLACE); + + encode_op_hdr(xdr, OP_SETXATTR, decode_setxattr_maxsz, hdr); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(arg->xattr_flags); + encode_string(xdr, strlen(arg->xattr_name), arg->xattr_name); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(arg->xattr_len); + if (arg->xattr_len) + xdr_write_pages(xdr, arg->xattr_pages, 0, arg->xattr_len); +} + +static int decode_setxattr(struct xdr_stream *xdr, + struct nfs4_change_info *cinfo) +{ + int status; + + status = decode_op_hdr(xdr, OP_SETXATTR); + if (status) + goto out; + status = decode_change_info(xdr, cinfo); +out: + return status; +} + + +static void encode_getxattr(struct xdr_stream *xdr, const char *name, + struct compound_hdr *hdr) +{ + encode_op_hdr(xdr, OP_GETXATTR, decode_getxattr_maxsz, hdr); + encode_string(xdr, strlen(name), name); +} + +static int decode_getxattr(struct xdr_stream *xdr, + struct nfs42_getxattrres *res, + struct rpc_rqst *req) +{ + int status; + __be32 *p; + u32 len, rdlen; + + status = decode_op_hdr(xdr, OP_GETXATTR); + if (status) + return status; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + len = be32_to_cpup(p); + if (len > req->rq_rcv_buf.page_len) + return -ERANGE; + + res->xattr_len = len; + + if (len > 0) { + rdlen = xdr_read_pages(xdr, len); + if (rdlen < len) + return -EIO; + } + + return 0; +} + +static void encode_removexattr(struct xdr_stream *xdr, const char *name, + struct compound_hdr *hdr) +{ + encode_op_hdr(xdr, OP_REMOVEXATTR, decode_removexattr_maxsz, hdr); + encode_string(xdr, strlen(name), name); +} + + +static int decode_removexattr(struct xdr_stream *xdr, + struct nfs4_change_info *cinfo) +{ + int status; + + status = decode_op_hdr(xdr, OP_REMOVEXATTR); + if (status) + goto out; + + status = decode_change_info(xdr, cinfo); +out: + return status; +} + +static void encode_listxattrs(struct xdr_stream *xdr, + const struct nfs42_listxattrsargs *arg, + struct compound_hdr *hdr) +{ + __be32 *p; + + encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr); + + p = reserve_space(xdr, 12); + if (unlikely(!p)) + return; + + p = xdr_encode_hyper(p, arg->cookie); + /* + * RFC 8276 says to specify the full max length of the LISTXATTRS + * XDR reply. Count is set to the XDR length of the names array + * plus the EOF marker. So, add the cookie and the names count. + */ + *p = cpu_to_be32(arg->count + 8 + 4); +} + +static int decode_listxattrs(struct xdr_stream *xdr, + struct nfs42_listxattrsres *res) +{ + int status; + __be32 *p; + u32 count, len, ulen; + size_t left, copied; + char *buf; + + status = decode_op_hdr(xdr, OP_LISTXATTRS); + if (status) { + /* + * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL + * should be translated to ERANGE. + */ + if (status == -ETOOSMALL) + status = -ERANGE; + goto out; + } + + p = xdr_inline_decode(xdr, 8); + if (unlikely(!p)) + return -EIO; + + xdr_decode_hyper(p, &res->cookie); + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + left = res->xattr_len; + buf = res->xattr_buf; + + count = be32_to_cpup(p); + copied = 0; + + /* + * We have asked for enough room to encode the maximum number + * of possible attribute names, so everything should fit. + * + * But, don't rely on that assumption. Just decode entries + * until they don't fit anymore, just in case the server did + * something odd. + */ + while (count--) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + len = be32_to_cpup(p); + if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) { + status = -ERANGE; + goto out; + } + + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EIO; + + ulen = len + XATTR_USER_PREFIX_LEN + 1; + if (buf) { + if (ulen > left) { + status = -ERANGE; + goto out; + } + + memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); + memcpy(buf + XATTR_USER_PREFIX_LEN, p, len); + + buf[ulen - 1] = 0; + buf += ulen; + left -= ulen; + } + copied += ulen; + } + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + res->eof = be32_to_cpup(p); + res->copied = copied; + +out: + if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX) + status = -E2BIG; + + return status; +} + /* * Encode ALLOCATE request */ @@ -1062,4 +1264,166 @@ out: return status; } +#ifdef CONFIG_NFS_V4_2 +static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const void *data) +{ + const struct nfs42_setxattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_setxattr(xdr, args, &hdr); + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + void *data) +{ + struct nfs42_setxattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, req); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + + status = decode_setxattr(xdr, &res->cinfo); +out: + return status; +} + +static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const void *data) +{ + const struct nfs42_getxattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + size_t plen; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_getxattr(xdr, args->xattr_name, &hdr); + + plen = args->xattr_len ? args->xattr_len : XATTR_SIZE_MAX; + + rpc_prepare_reply_pages(req, args->xattr_pages, 0, plen, + hdr.replen); + req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; + + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_getxattr(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_getxattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + status = decode_getxattr(xdr, res, rqstp); +out: + return status; +} + +static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfs42_listxattrsargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_listxattrs(xdr, args, &hdr); + + rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, + hdr.replen); + req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; + + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_listxattrsres *res = data; + struct compound_hdr hdr; + int status; + + xdr_set_scratch_buffer(xdr, page_address(res->scratch), PAGE_SIZE); + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + status = decode_listxattrs(xdr, res); +out: + return status; +} + +static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfs42_removexattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_removexattr(xdr, args->xattr_name, &hdr); + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_removexattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, req); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + + status = decode_removexattr(xdr, &res->cinfo); +out: + return status; +} +#endif #endif /* __LINUX_FS_NFS_NFS4_2XDR_H */ diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 9e1b07640e9a..388ac520b104 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -7481,6 +7481,8 @@ static struct { { NFS4ERR_SYMLINK, -ELOOP }, { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, + { NFS4ERR_NOXATTR, -ENODATA }, + { NFS4ERR_XATTR2BIG, -E2BIG }, { -1, -EIO } }; @@ -7609,6 +7611,10 @@ const struct rpc_procinfo nfs4_procedures[] = { PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify), PROC(LOOKUPP, enc_lookupp, dec_lookupp), PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror), + PROC42(GETXATTR, enc_getxattr, dec_getxattr), + PROC42(SETXATTR, enc_setxattr, dec_setxattr), + PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs), + PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr), }; static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)]; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index dee9b1cfa972..9408f3252c8e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1498,7 +1498,64 @@ struct nfs42_seek_res { u32 sr_eof; u64 sr_offset; }; -#endif + +struct nfs42_setxattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; + u32 xattr_flags; + size_t xattr_len; + struct page **xattr_pages; +}; + +struct nfs42_setxattrres { + struct nfs4_sequence_res seq_res; + struct nfs4_change_info cinfo; +}; + +struct nfs42_getxattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; + size_t xattr_len; + struct page **xattr_pages; +}; + +struct nfs42_getxattrres { + struct nfs4_sequence_res seq_res; + size_t xattr_len; +}; + +struct nfs42_listxattrsargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + u32 count; + u64 cookie; + struct page **xattr_pages; +}; + +struct nfs42_listxattrsres { + struct nfs4_sequence_res seq_res; + struct page *scratch; + void *xattr_buf; + size_t xattr_len; + u64 cookie; + bool eof; + size_t copied; +}; + +struct nfs42_removexattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; +}; + +struct nfs42_removexattrres { + struct nfs4_sequence_res seq_res; + struct nfs4_change_info cinfo; +}; + +#endif /* CONFIG_NFS_V4_2 */ struct nfs_page; -- cgit v1.2.3 From d2ae4f8b21c111bb795c557588d89dccd005828d Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:38:57 +0000 Subject: nfs: define nfs_access_get_cached function The only consumer of nfs_access_get_cached_rcu and nfs_access_cached calls these static functions in order to first try RCU access, and then locked access. Combine them in to a single function, and call that. Make this function available to the rest of the NFS code. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 20 ++++++++++++++++---- include/linux/nfs_fs.h | 2 ++ 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5a331da5f55a..f04fc0f7843b 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2460,7 +2460,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co return NULL; } -static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) +static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_access_entry *cache; @@ -2533,6 +2533,20 @@ out: return err; } +int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct +nfs_access_entry *res, bool may_block) +{ + int status; + + status = nfs_access_get_cached_rcu(inode, cred, res); + if (status != 0) + status = nfs_access_get_cached_locked(inode, cred, res, + may_block); + + return status; +} +EXPORT_SYMBOL_GPL(nfs_access_get_cached); + static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set) { struct nfs_inode *nfsi = NFS_I(inode); @@ -2647,9 +2661,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) trace_nfs_access_enter(inode); - status = nfs_access_get_cached_rcu(inode, cred, &cache); - if (status != 0) - status = nfs_access_get_cached(inode, cred, &cache, may_block); + status = nfs_access_get_cached(inode, cred, &cache, may_block); if (status == 0) goto out_cached; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b743988fcbd0..714b577dce19 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -493,6 +493,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); +extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, + bool may_block); /* * linux/fs/nfs/symlink.c -- cgit v1.2.3 From 0f44da51aeef9c974ea744c0d9e24d54eec4e94c Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:00 +0000 Subject: nfs: define and use the NFS_INO_INVALID_XATTR flag Define the NFS_INO_INVALID_XATTR flag, to be used for the NFSv4.2 xattr cache, and use it where appropriate. No functional change as yet. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 7 ++++++- fs/nfs/nfs4proc.c | 3 ++- fs/nfs/nfstrace.h | 3 ++- include/linux/nfs_fs.h | 1 + 4 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0bf1f835de01..629af798dfc9 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -204,7 +204,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) flags &= ~NFS_INO_INVALID_OTHER; flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE - | NFS_INO_REVAL_PAGECACHE); + | NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_XATTR); } if (inode->i_mapping->nrpages == 0) @@ -542,6 +543,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st inode->i_gid = fattr->gid; else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + if (nfs_server_capable(inode, NFS_CAP_XATTR)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { @@ -1375,6 +1378,8 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode_set_iversion_raw(inode, fattr->change_attr); if (S_ISDIR(inode->i_mode)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA); + else if (nfs_server_capable(inode, NFS_CAP_XATTR)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); } /* If we have atomic WCC data, we may update some attributes */ ts = inode->i_ctime; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6540071cb228..0d123fe0a423 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1183,7 +1183,8 @@ nfs4_update_changeattr_locked(struct inode *inode, if (cinfo->before != inode_peek_iversion_raw(inode)) nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | - NFS_INO_INVALID_ACL; + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR; } inode_set_iversion_raw(inode, cinfo->after); nfsi->read_cache_jiffies = timestamp; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 547cec79899f..5a59dcdce0b2 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -59,7 +59,8 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER); { NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \ { NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \ { NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \ - { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }) + { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }, \ + { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }) TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS); TRACE_DEFINE_ENUM(NFS_INO_STALE); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 714b577dce19..943ee750d68c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -234,6 +234,7 @@ struct nfs4_copy_state { #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ +#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ -- cgit v1.2.3 From 95ad37f90c338e3fd4abf61cecfe02b6f3e080f0 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:04 +0000 Subject: NFSv4.2: add client side xattr caching. Implement client side caching for NFSv4.2 extended attributes. The cache is a per-inode hashtable, with name/value entries. There is one special entry for the listxattr cache. NFS inodes have a pointer to a cache structure. The cache structure is allocated on demand, freed when the cache is invalidated. Memory shrinkers keep the size in check. Large entries (> PAGE_SIZE) are collected by a separate shrinker, and freed more aggressively than others. Signed-off-by: Frank van der Linden Signed-off-by: Trond Myklebust --- fs/nfs/Makefile | 2 +- fs/nfs/inode.c | 9 +- fs/nfs/nfs42proc.c | 12 + fs/nfs/nfs42xattr.c | 1083 +++++++++++++++++++++++++++++++++++++++++++ fs/nfs/nfs4_fs.h | 22 + fs/nfs/nfs4proc.c | 42 +- fs/nfs/nfs4super.c | 10 + include/linux/nfs_fs.h | 6 + include/uapi/linux/nfs_fs.h | 1 + 9 files changed, 1179 insertions(+), 8 deletions(-) create mode 100644 fs/nfs/nfs42xattr.c (limited to 'include/linux') diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 2433c3e03cfa..22d11fdc6deb 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -30,7 +30,7 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o -nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o +nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o nfs42xattr.o obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/ obj-$(CONFIG_PNFS_BLOCK) += blocklayout/ diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 629af798dfc9..10048eed485d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -193,6 +193,7 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags) return nfs_check_cache_invalid_not_delegated(inode, flags); } +EXPORT_SYMBOL_GPL(nfs_check_cache_invalid); static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) { @@ -234,11 +235,13 @@ static void nfs_zap_caches_locked(struct inode *inode) | NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR | NFS_INO_REVAL_PAGECACHE); } else nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR | NFS_INO_REVAL_PAGECACHE); nfs_zap_label_cache_locked(nfsi); } @@ -1897,7 +1900,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (!(have_writers || have_delegation)) { invalid |= NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL; + | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR; /* Force revalidate of all attributes */ save_cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME @@ -2100,6 +2104,9 @@ struct inode *nfs_alloc_inode(struct super_block *sb) #if IS_ENABLED(CONFIG_NFS_V4) nfsi->nfs4_acl = NULL; #endif /* CONFIG_NFS_V4 */ +#ifdef CONFIG_NFS_V4_2 + nfsi->xattr_cache = NULL; +#endif return &nfsi->vfs_inode; } EXPORT_SYMBOL_GPL(nfs_alloc_inode); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 8c2e52bc986a..e200522469af 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1182,6 +1182,18 @@ static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, if (ret < 0) return ret; + /* + * Normally, the caching is done one layer up, but for successful + * RPCS, always cache the result here, even if the caller was + * just querying the length, or if the reply was too big for + * the caller. This avoids a second RPC in the case of the + * common query-alloc-retrieve cycle for xattrs. + * + * Note that xattr_len is always capped to XATTR_SIZE_MAX. + */ + + nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); + if (buflen) { if (res.xattr_len > buflen) return -ERANGE; diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c new file mode 100644 index 000000000000..23fdab977a2a --- /dev/null +++ b/fs/nfs/nfs42xattr.c @@ -0,0 +1,1083 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * + * User extended attribute client side cache functions. + * + * Author: Frank van der Linden + */ +#include +#include +#include +#include +#include + +#include "nfs4_fs.h" +#include "internal.h" + +/* + * User extended attributes client side caching is implemented by having + * a cache structure attached to NFS inodes. This structure is allocated + * when needed, and freed when the cache is zapped. + * + * The cache structure contains as hash table of entries, and a pointer + * to a special-cased entry for the listxattr cache. + * + * Accessing and allocating / freeing the caches is done via reference + * counting. The cache entries use a similar refcounting scheme. + * + * This makes freeing a cache, both from the shrinker and from the + * zap cache path, easy. It also means that, in current use cases, + * the large majority of inodes will not waste any memory, as they + * will never have any user extended attributes assigned to them. + * + * Attribute entries are hashed in to a simple hash table. They are + * also part of an LRU. + * + * There are three shrinkers. + * + * Two shrinkers deal with the cache entries themselves: one for + * large entries (> PAGE_SIZE), and one for smaller entries. The + * shrinker for the larger entries works more aggressively than + * those for the smaller entries. + * + * The other shrinker frees the cache structures themselves. + */ + +/* + * 64 buckets is a good default. There is likely no reasonable + * workload that uses more than even 64 user extended attributes. + * You can certainly add a lot more - but you get what you ask for + * in those circumstances. + */ +#define NFS4_XATTR_HASH_SIZE 64 + +#define NFSDBG_FACILITY NFSDBG_XATTRCACHE + +struct nfs4_xattr_cache; +struct nfs4_xattr_entry; + +struct nfs4_xattr_bucket { + spinlock_t lock; + struct hlist_head hlist; + struct nfs4_xattr_cache *cache; + bool draining; +}; + +struct nfs4_xattr_cache { + struct kref ref; + spinlock_t hash_lock; /* protects hashtable and lru */ + struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; + struct list_head lru; + struct list_head dispose; + atomic_long_t nent; + spinlock_t listxattr_lock; + struct inode *inode; + struct nfs4_xattr_entry *listxattr; + struct work_struct work; +}; + +struct nfs4_xattr_entry { + struct kref ref; + struct hlist_node hnode; + struct list_head lru; + struct list_head dispose; + char *xattr_name; + void *xattr_value; + size_t xattr_size; + struct nfs4_xattr_bucket *bucket; + uint32_t flags; +}; + +#define NFS4_XATTR_ENTRY_EXTVAL 0x0001 + +/* + * LRU list of NFS inodes that have xattr caches. + */ +static struct list_lru nfs4_xattr_cache_lru; +static struct list_lru nfs4_xattr_entry_lru; +static struct list_lru nfs4_xattr_large_entry_lru; + +static struct kmem_cache *nfs4_xattr_cache_cachep; + +static struct workqueue_struct *nfs4_xattr_cache_wq; + +/* + * Hashing helper functions. + */ +static void +nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) +{ + unsigned int i; + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + INIT_HLIST_HEAD(&cache->buckets[i].hlist); + spin_lock_init(&cache->buckets[i].lock); + cache->buckets[i].cache = cache; + cache->buckets[i].draining = false; + } +} + +/* + * Locking order: + * 1. inode i_lock or bucket lock + * 2. list_lru lock (taken by list_lru_* functions) + */ + +/* + * Wrapper functions to add a cache entry to the right LRU. + */ +static bool +nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry) +{ + struct list_lru *lru; + + lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + return list_lru_add(lru, &entry->lru); +} + +static bool +nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry) +{ + struct list_lru *lru; + + lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + return list_lru_del(lru, &entry->lru); +} + +/* + * This function allocates cache entries. They are the normal + * extended attribute name/value pairs, but may also be a listxattr + * cache. Those allocations use the same entry so that they can be + * treated as one by the memory shrinker. + * + * xattr cache entries are allocated together with names. If the + * value fits in to one page with the entry structure and the name, + * it will also be part of the same allocation (kmalloc). This is + * expected to be the vast majority of cases. Larger allocations + * have a value pointer that is allocated separately by kvmalloc. + * + * Parameters: + * + * @name: Name of the extended attribute. NULL for listxattr cache + * entry. + * @value: Value of attribute, or listxattr cache. NULL if the + * value is to be copied from pages instead. + * @pages: Pages to copy the value from, if not NULL. Passed in to + * make it easier to copy the value after an RPC, even if + * the value will not be passed up to application (e.g. + * for a 'query' getxattr with NULL buffer). + * @len: Length of the value. Can be 0 for zero-length attribues. + * @value and @pages will be NULL if @len is 0. + */ +static struct nfs4_xattr_entry * +nfs4_xattr_alloc_entry(const char *name, const void *value, + struct page **pages, size_t len) +{ + struct nfs4_xattr_entry *entry; + void *valp; + char *namep; + size_t alloclen, slen; + char *buf; + uint32_t flags; + + BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) + + XATTR_NAME_MAX + 1 > PAGE_SIZE); + + alloclen = sizeof(struct nfs4_xattr_entry); + if (name != NULL) { + slen = strlen(name) + 1; + alloclen += slen; + } else + slen = 0; + + if (alloclen + len <= PAGE_SIZE) { + alloclen += len; + flags = 0; + } else { + flags = NFS4_XATTR_ENTRY_EXTVAL; + } + + buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (buf == NULL) + return NULL; + entry = (struct nfs4_xattr_entry *)buf; + + if (name != NULL) { + namep = buf + sizeof(struct nfs4_xattr_entry); + memcpy(namep, name, slen); + } else { + namep = NULL; + } + + + if (flags & NFS4_XATTR_ENTRY_EXTVAL) { + valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (valp == NULL) { + kfree(buf); + return NULL; + } + } else if (len != 0) { + valp = buf + sizeof(struct nfs4_xattr_entry) + slen; + } else + valp = NULL; + + if (valp != NULL) { + if (value != NULL) + memcpy(valp, value, len); + else + _copy_from_pages(valp, pages, 0, len); + } + + entry->flags = flags; + entry->xattr_value = valp; + kref_init(&entry->ref); + entry->xattr_name = namep; + entry->xattr_size = len; + entry->bucket = NULL; + INIT_LIST_HEAD(&entry->lru); + INIT_LIST_HEAD(&entry->dispose); + INIT_HLIST_NODE(&entry->hnode); + + return entry; +} + +static void +nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry) +{ + if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) + kvfree(entry->xattr_value); + kfree(entry); +} + +static void +nfs4_xattr_free_entry_cb(struct kref *kref) +{ + struct nfs4_xattr_entry *entry; + + entry = container_of(kref, struct nfs4_xattr_entry, ref); + + if (WARN_ON(!list_empty(&entry->lru))) + return; + + nfs4_xattr_free_entry(entry); +} + +static void +nfs4_xattr_free_cache_cb(struct kref *kref) +{ + struct nfs4_xattr_cache *cache; + int i; + + cache = container_of(kref, struct nfs4_xattr_cache, ref); + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) + return; + cache->buckets[i].draining = false; + } + + cache->listxattr = NULL; + + kmem_cache_free(nfs4_xattr_cache_cachep, cache); + +} + +static struct nfs4_xattr_cache * +nfs4_xattr_alloc_cache(void) +{ + struct nfs4_xattr_cache *cache; + + cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, + GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (cache == NULL) + return NULL; + + kref_init(&cache->ref); + atomic_long_set(&cache->nent, 0); + + return cache; +} + +/* + * Set the listxattr cache, which is a special-cased cache entry. + * The special value ERR_PTR(-ESTALE) is used to indicate that + * the cache is being drained - this prevents a new listxattr + * cache from being added to what is now a stale cache. + */ +static int +nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, + struct nfs4_xattr_entry *new) +{ + struct nfs4_xattr_entry *old; + int ret = 1; + + spin_lock(&cache->listxattr_lock); + + old = cache->listxattr; + + if (old == ERR_PTR(-ESTALE)) { + ret = 0; + goto out; + } + + cache->listxattr = new; + if (new != NULL && new != ERR_PTR(-ESTALE)) + nfs4_xattr_entry_lru_add(new); + + if (old != NULL) { + nfs4_xattr_entry_lru_del(old); + kref_put(&old->ref, nfs4_xattr_free_entry_cb); + } +out: + spin_unlock(&cache->listxattr_lock); + + return ret; +} + +/* + * Unlink a cache from its parent inode, clearing out an invalid + * cache. Must be called with i_lock held. + */ +static struct nfs4_xattr_cache * +nfs4_xattr_cache_unlink(struct inode *inode) +{ + struct nfs_inode *nfsi; + struct nfs4_xattr_cache *oldcache; + + nfsi = NFS_I(inode); + + oldcache = nfsi->xattr_cache; + if (oldcache != NULL) { + list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru); + oldcache->inode = NULL; + } + nfsi->xattr_cache = NULL; + nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR; + + return oldcache; + +} + +/* + * Discard a cache. Usually called by a worker, since walking all + * the entries can take up some cycles that we don't want to waste + * in the I/O path. Can also be called from the shrinker callback. + * + * The cache is dead, it has already been unlinked from its inode, + * and no longer appears on the cache LRU list. + * + * Mark all buckets as draining, so that no new entries are added. This + * could still happen in the unlikely, but possible case that another + * thread had grabbed a reference before it was unlinked from the inode, + * and is still holding it for an add operation. + * + * Remove all entries from the LRU lists, so that there is no longer + * any way to 'find' this cache. Then, remove the entries from the hash + * table. + * + * At that point, the cache will remain empty and can be freed when the final + * reference drops, which is very likely the kref_put at the end of + * this function, or the one called immediately afterwards in the + * shrinker callback. + */ +static void +nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) +{ + unsigned int i; + struct nfs4_xattr_entry *entry; + struct nfs4_xattr_bucket *bucket; + struct hlist_node *n; + + nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + bucket = &cache->buckets[i]; + + spin_lock(&bucket->lock); + bucket->draining = true; + hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { + nfs4_xattr_entry_lru_del(entry); + hlist_del_init(&entry->hnode); + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } + spin_unlock(&bucket->lock); + } + + atomic_long_set(&cache->nent, 0); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +static void +nfs4_xattr_discard_cache_worker(struct work_struct *work) +{ + struct nfs4_xattr_cache *cache = container_of(work, + struct nfs4_xattr_cache, work); + + nfs4_xattr_discard_cache(cache); +} + +static void +nfs4_xattr_reap_cache(struct nfs4_xattr_cache *cache) +{ + queue_work(nfs4_xattr_cache_wq, &cache->work); +} + +/* + * Get a referenced copy of the cache structure. Avoid doing allocs + * while holding i_lock. Which means that we do some optimistic allocation, + * and might have to free the result in rare cases. + * + * This function only checks the NFS_INO_INVALID_XATTR cache validity bit + * and acts accordingly, replacing the cache when needed. For the read case + * (!add), this means that the caller must make sure that the cache + * is valid before caling this function. getxattr and listxattr call + * revalidate_inode to do this. The attribute cache timeout (for the + * non-delegated case) is expected to be dealt with in the revalidate + * call. + */ + +static struct nfs4_xattr_cache * +nfs4_xattr_get_cache(struct inode *inode, int add) +{ + struct nfs_inode *nfsi; + struct nfs4_xattr_cache *cache, *oldcache, *newcache; + + nfsi = NFS_I(inode); + + cache = oldcache = NULL; + + spin_lock(&inode->i_lock); + + if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) + oldcache = nfs4_xattr_cache_unlink(inode); + else + cache = nfsi->xattr_cache; + + if (cache != NULL) + kref_get(&cache->ref); + + spin_unlock(&inode->i_lock); + + if (add && cache == NULL) { + newcache = NULL; + + cache = nfs4_xattr_alloc_cache(); + if (cache == NULL) + goto out; + + spin_lock(&inode->i_lock); + if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) { + /* + * The cache was invalidated again. Give up, + * since what we want to enter is now likely + * outdated anyway. + */ + spin_unlock(&inode->i_lock); + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + cache = NULL; + goto out; + } + + /* + * Check if someone beat us to it. + */ + if (nfsi->xattr_cache != NULL) { + newcache = nfsi->xattr_cache; + kref_get(&newcache->ref); + } else { + kref_get(&cache->ref); + nfsi->xattr_cache = cache; + cache->inode = inode; + list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); + } + + spin_unlock(&inode->i_lock); + + /* + * If there was a race, throw away the cache we just + * allocated, and use the new one allocated by someone + * else. + */ + if (newcache != NULL) { + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + cache = newcache; + } + } + +out: + /* + * Discarding an old cache is done via a workqueue. + */ + if (oldcache != NULL) + nfs4_xattr_reap_cache(oldcache); + + return cache; +} + +static inline struct nfs4_xattr_bucket * +nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) +{ + return &cache->buckets[jhash(name, strlen(name), 0) & + (ARRAY_SIZE(cache->buckets) - 1)]; +} + +static struct nfs4_xattr_entry * +nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) +{ + struct nfs4_xattr_entry *entry; + + entry = NULL; + + hlist_for_each_entry(entry, &bucket->hlist, hnode) { + if (!strcmp(entry->xattr_name, name)) + break; + } + + return entry; +} + +static int +nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, + struct nfs4_xattr_entry *entry) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *oldentry = NULL; + int ret = 1; + + bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); + entry->bucket = bucket; + + spin_lock(&bucket->lock); + + if (bucket->draining) { + ret = 0; + goto out; + } + + oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name); + if (oldentry != NULL) { + hlist_del_init(&oldentry->hnode); + nfs4_xattr_entry_lru_del(oldentry); + } else { + atomic_long_inc(&cache->nent); + } + + hlist_add_head(&entry->hnode, &bucket->hlist); + nfs4_xattr_entry_lru_add(entry); + +out: + spin_unlock(&bucket->lock); + + if (oldentry != NULL) + kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb); + + return ret; +} + +static void +nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *entry; + + bucket = nfs4_xattr_hash_bucket(cache, name); + + spin_lock(&bucket->lock); + + entry = nfs4_xattr_get_entry(bucket, name); + if (entry != NULL) { + hlist_del_init(&entry->hnode); + nfs4_xattr_entry_lru_del(entry); + atomic_long_dec(&cache->nent); + } + + spin_unlock(&bucket->lock); + + if (entry != NULL) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); +} + +static struct nfs4_xattr_entry * +nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *entry; + + bucket = nfs4_xattr_hash_bucket(cache, name); + + spin_lock(&bucket->lock); + + entry = nfs4_xattr_get_entry(bucket, name); + if (entry != NULL) + kref_get(&entry->ref); + + spin_unlock(&bucket->lock); + + return entry; +} + +/* + * Entry point to retrieve an entry from the cache. + */ +ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf, + ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + ssize_t ret; + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return -ENOENT; + + ret = 0; + entry = nfs4_xattr_hash_find(cache, name); + + if (entry != NULL) { + dprintk("%s: cache hit '%s', len %lu\n", __func__, + entry->xattr_name, (unsigned long)entry->xattr_size); + if (buflen == 0) { + /* Length probe only */ + ret = entry->xattr_size; + } else if (buflen < entry->xattr_size) + ret = -ERANGE; + else { + memcpy(buf, entry->xattr_value, entry->xattr_size); + ret = entry->xattr_size; + } + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } else { + dprintk("%s: cache miss '%s'\n", __func__, name); + ret = -ENOENT; + } + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + + return ret; +} + +/* + * Retrieve a cached list of xattrs from the cache. + */ +ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + ssize_t ret; + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return -ENOENT; + + spin_lock(&cache->listxattr_lock); + + entry = cache->listxattr; + + if (entry != NULL && entry != ERR_PTR(-ESTALE)) { + if (buflen == 0) { + /* Length probe only */ + ret = entry->xattr_size; + } else if (entry->xattr_size > buflen) + ret = -ERANGE; + else { + memcpy(buf, entry->xattr_value, entry->xattr_size); + ret = entry->xattr_size; + } + } else { + ret = -ENOENT; + } + + spin_unlock(&cache->listxattr_lock); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + + return ret; +} + +/* + * Add an xattr to the cache. + * + * This also invalidates the xattr list cache. + */ +void nfs4_xattr_cache_add(struct inode *inode, const char *name, + const char *buf, struct page **pages, ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + + dprintk("%s: add '%s' len %lu\n", __func__, + name, (unsigned long)buflen); + + cache = nfs4_xattr_get_cache(inode, 1); + if (cache == NULL) + return; + + entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen); + if (entry == NULL) + goto out; + + (void)nfs4_xattr_set_listcache(cache, NULL); + + if (!nfs4_xattr_hash_add(cache, entry)) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + +out: + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + + +/* + * Remove an xattr from the cache. + * + * This also invalidates the xattr list cache. + */ +void nfs4_xattr_cache_remove(struct inode *inode, const char *name) +{ + struct nfs4_xattr_cache *cache; + + dprintk("%s: remove '%s'\n", __func__, name); + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return; + + (void)nfs4_xattr_set_listcache(cache, NULL); + nfs4_xattr_hash_remove(cache, name); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Cache listxattr output, replacing any possible old one. + */ +void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, + ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + + cache = nfs4_xattr_get_cache(inode, 1); + if (cache == NULL) + return; + + entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen); + if (entry == NULL) + goto out; + + /* + * This is just there to be able to get to bucket->cache, + * which is obviously the same for all buckets, so just + * use bucket 0. + */ + entry->bucket = &cache->buckets[0]; + + if (!nfs4_xattr_set_listcache(cache, entry)) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + +out: + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Zap the entire cache. Called when an inode is evicted. + */ +void nfs4_xattr_cache_zap(struct inode *inode) +{ + struct nfs4_xattr_cache *oldcache; + + spin_lock(&inode->i_lock); + oldcache = nfs4_xattr_cache_unlink(inode); + spin_unlock(&inode->i_lock); + + if (oldcache) + nfs4_xattr_discard_cache(oldcache); +} + +/* + * The entry LRU is shrunk more aggressively than the cache LRU, + * by settings @seeks to 1. + * + * Cache structures are freed only when they've become empty, after + * pruning all but one entry. + */ + +static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink, + struct shrink_control *sc); + +static struct shrinker nfs4_xattr_cache_shrinker = { + .count_objects = nfs4_xattr_cache_count, + .scan_objects = nfs4_xattr_cache_scan, + .seeks = DEFAULT_SEEKS, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static struct shrinker nfs4_xattr_entry_shrinker = { + .count_objects = nfs4_xattr_entry_count, + .scan_objects = nfs4_xattr_entry_scan, + .seeks = DEFAULT_SEEKS, + .batch = 512, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static struct shrinker nfs4_xattr_large_entry_shrinker = { + .count_objects = nfs4_xattr_entry_count, + .scan_objects = nfs4_xattr_entry_scan, + .seeks = 1, + .batch = 512, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static enum lru_status +cache_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *dispose = arg; + struct inode *inode; + struct nfs4_xattr_cache *cache = container_of(item, + struct nfs4_xattr_cache, lru); + + if (atomic_long_read(&cache->nent) > 1) + return LRU_SKIP; + + /* + * If a cache structure is on the LRU list, we know that + * its inode is valid. Try to lock it to break the link. + * Since we're inverting the lock order here, only try. + */ + inode = cache->inode; + + if (!spin_trylock(&inode->i_lock)) + return LRU_SKIP; + + kref_get(&cache->ref); + + cache->inode = NULL; + NFS_I(inode)->xattr_cache = NULL; + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR; + list_lru_isolate(lru, &cache->lru); + + spin_unlock(&inode->i_lock); + + list_add_tail(&cache->dispose, dispose); + return LRU_REMOVED; +} + +static unsigned long +nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + LIST_HEAD(dispose); + unsigned long freed; + struct nfs4_xattr_cache *cache; + + freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc, + cache_lru_isolate, &dispose); + while (!list_empty(&dispose)) { + cache = list_first_entry(&dispose, struct nfs4_xattr_cache, + dispose); + list_del_init(&cache->dispose); + nfs4_xattr_discard_cache(cache); + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + } + + return freed; +} + + +static unsigned long +nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long count; + + count = list_lru_count(&nfs4_xattr_cache_lru); + return vfs_pressure_ratio(count); +} + +static enum lru_status +entry_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *dispose = arg; + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry = container_of(item, + struct nfs4_xattr_entry, lru); + + bucket = entry->bucket; + cache = bucket->cache; + + /* + * Unhook the entry from its parent (either a cache bucket + * or a cache structure if it's a listxattr buf), so that + * it's no longer found. Then add it to the isolate list, + * to be freed later. + * + * In both cases, we're reverting lock order, so use + * trylock and skip the entry if we can't get the lock. + */ + if (entry->xattr_name != NULL) { + /* Regular cache entry */ + if (!spin_trylock(&bucket->lock)) + return LRU_SKIP; + + kref_get(&entry->ref); + + hlist_del_init(&entry->hnode); + atomic_long_dec(&cache->nent); + list_lru_isolate(lru, &entry->lru); + + spin_unlock(&bucket->lock); + } else { + /* Listxattr cache entry */ + if (!spin_trylock(&cache->listxattr_lock)) + return LRU_SKIP; + + kref_get(&entry->ref); + + cache->listxattr = NULL; + list_lru_isolate(lru, &entry->lru); + + spin_unlock(&cache->listxattr_lock); + } + + list_add_tail(&entry->dispose, dispose); + return LRU_REMOVED; +} + +static unsigned long +nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + LIST_HEAD(dispose); + unsigned long freed; + struct nfs4_xattr_entry *entry; + struct list_lru *lru; + + lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose); + + while (!list_empty(&dispose)) { + entry = list_first_entry(&dispose, struct nfs4_xattr_entry, + dispose); + list_del_init(&entry->dispose); + + /* + * Drop two references: the one that we just grabbed + * in entry_lru_isolate, and the one that was set + * when the entry was first allocated. + */ + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } + + return freed; +} + +static unsigned long +nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long count; + struct list_lru *lru; + + lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + count = list_lru_count(lru); + return vfs_pressure_ratio(count); +} + + +static void nfs4_xattr_cache_init_once(void *p) +{ + struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p; + + spin_lock_init(&cache->listxattr_lock); + atomic_long_set(&cache->nent, 0); + nfs4_xattr_hash_init(cache); + cache->listxattr = NULL; + INIT_WORK(&cache->work, nfs4_xattr_discard_cache_worker); + INIT_LIST_HEAD(&cache->lru); + INIT_LIST_HEAD(&cache->dispose); +} + +int __init nfs4_xattr_cache_init(void) +{ + int ret = 0; + + nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache", + sizeof(struct nfs4_xattr_cache), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT), + nfs4_xattr_cache_init_once); + if (nfs4_xattr_cache_cachep == NULL) + return -ENOMEM; + + ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru, + &nfs4_xattr_large_entry_shrinker); + if (ret) + goto out4; + + ret = list_lru_init_memcg(&nfs4_xattr_entry_lru, + &nfs4_xattr_entry_shrinker); + if (ret) + goto out3; + + ret = list_lru_init_memcg(&nfs4_xattr_cache_lru, + &nfs4_xattr_cache_shrinker); + if (ret) + goto out2; + + nfs4_xattr_cache_wq = alloc_workqueue("nfs4_xattr", WQ_MEM_RECLAIM, 0); + if (nfs4_xattr_cache_wq == NULL) + goto out1; + + ret = register_shrinker(&nfs4_xattr_cache_shrinker); + if (ret) + goto out0; + + ret = register_shrinker(&nfs4_xattr_entry_shrinker); + if (ret) + goto out; + + ret = register_shrinker(&nfs4_xattr_large_entry_shrinker); + if (!ret) + return 0; + + unregister_shrinker(&nfs4_xattr_entry_shrinker); +out: + unregister_shrinker(&nfs4_xattr_cache_shrinker); +out0: + destroy_workqueue(nfs4_xattr_cache_wq); +out1: + list_lru_destroy(&nfs4_xattr_cache_lru); +out2: + list_lru_destroy(&nfs4_xattr_entry_lru); +out3: + list_lru_destroy(&nfs4_xattr_large_entry_lru); +out4: + kmem_cache_destroy(nfs4_xattr_cache_cachep); + + return ret; +} + +void nfs4_xattr_cache_exit(void) +{ + unregister_shrinker(&nfs4_xattr_entry_shrinker); + unregister_shrinker(&nfs4_xattr_cache_shrinker); + list_lru_destroy(&nfs4_xattr_entry_lru); + list_lru_destroy(&nfs4_xattr_cache_lru); + kmem_cache_destroy(nfs4_xattr_cache_cachep); + destroy_workqueue(nfs4_xattr_cache_wq); +} diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 2fa9e4ea98d2..7e16a586f3fc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -626,12 +626,34 @@ static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state * nfs4_stateid_match_other(&state->open_stateid, stateid); } +/* nfs42xattr.c */ +#ifdef CONFIG_NFS_V4_2 +extern int __init nfs4_xattr_cache_init(void); +extern void nfs4_xattr_cache_exit(void); +extern void nfs4_xattr_cache_add(struct inode *inode, const char *name, + const char *buf, struct page **pages, + ssize_t buflen); +extern void nfs4_xattr_cache_remove(struct inode *inode, const char *name); +extern ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, + char *buf, ssize_t buflen); +extern void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, + ssize_t buflen); +extern ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, + ssize_t buflen); +extern void nfs4_xattr_cache_zap(struct inode *inode); #else +static inline void nfs4_xattr_cache_zap(struct inode *inode) +{ +} +#endif /* CONFIG_NFS_V4_2 */ + +#else /* CONFIG_NFS_V4 */ #define nfs4_close_state(a, b) do { } while (0) #define nfs4_close_sync(a, b) do { } while (0) #define nfs4_state_protect(a, b, c, d) do { } while (0) #define nfs4_state_protect_write(a, b, c, d) do { } while (0) + #endif /* CONFIG_NFS_V4 */ #endif /* __LINUX_FS_NFS_NFS4_FS.H */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 92a07956f07b..f670ff64b31e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7448,6 +7448,7 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, size_t buflen, int flags) { struct nfs_access_entry cache; + int ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; @@ -7466,10 +7467,17 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, return -EACCES; } - if (buf == NULL) - return nfs42_proc_removexattr(inode, key); - else - return nfs42_proc_setxattr(inode, key, buf, buflen, flags); + if (buf == NULL) { + ret = nfs42_proc_removexattr(inode, key); + if (!ret) + nfs4_xattr_cache_remove(inode, key); + } else { + ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); + if (!ret) + nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); + } + + return ret; } static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, @@ -7477,6 +7485,7 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, const char *key, void *buf, size_t buflen) { struct nfs_access_entry cache; + ssize_t ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; @@ -7486,7 +7495,17 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, return -EACCES; } - return nfs42_proc_getxattr(inode, key, buf, buflen); + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret) + return ret; + + ret = nfs4_xattr_cache_get(inode, key, buf, buflen); + if (ret >= 0 || (ret < 0 && ret != -ENOENT)) + return ret; + + ret = nfs42_proc_getxattr(inode, key, buf, buflen); + + return ret; } static ssize_t @@ -7494,7 +7513,7 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) { u64 cookie; bool eof; - int ret, size; + ssize_t ret, size; char *buf; size_t buflen; struct nfs_access_entry cache; @@ -7507,6 +7526,14 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) return 0; } + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret) + return ret; + + ret = nfs4_xattr_cache_list(inode, list, list_len); + if (ret >= 0 || (ret < 0 && ret != -ENOENT)) + return ret; + cookie = 0; eof = false; buflen = list_len ? list_len : XATTR_LIST_MAX; @@ -7526,6 +7553,9 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) size += ret; } + if (list_len) + nfs4_xattr_cache_set_list(inode, list, size); + return size; } diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 1475f932d7da..0c1ab846b83d 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -69,6 +69,7 @@ static void nfs4_evict_inode(struct inode *inode) pnfs_destroy_layout(NFS_I(inode)); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); + nfs4_xattr_cache_zap(inode); } struct nfs_referral_count { @@ -268,6 +269,12 @@ static int __init init_nfs_v4(void) if (err) goto out1; +#ifdef CONFIG_NFS_V4_2 + err = nfs4_xattr_cache_init(); + if (err) + goto out2; +#endif + err = nfs4_register_sysctl(); if (err) goto out2; @@ -288,6 +295,9 @@ static void __exit exit_nfs_v4(void) nfs4_pnfs_v3_ds_connect_unload(); unregister_nfs_version(&nfs_v4); +#ifdef CONFIG_NFS_V4_2 + nfs4_xattr_cache_exit(); +#endif nfs4_unregister_sysctl(); nfs_idmap_quit(); nfs_dns_resolver_destroy(); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 943ee750d68c..a2c6455ea3fa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -102,6 +102,8 @@ struct nfs_delegation; struct posix_acl; +struct nfs4_xattr_cache; + /* * nfs fs inode data in memory */ @@ -188,6 +190,10 @@ struct nfs_inode { struct fscache_cookie *fscache; #endif struct inode vfs_inode; + +#ifdef CONFIG_NFS_V4_2 + struct nfs4_xattr_cache *xattr_cache; +#endif }; struct nfs4_copy_state { diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h index 7bcc8cd6831d..3afe3767c55d 100644 --- a/include/uapi/linux/nfs_fs.h +++ b/include/uapi/linux/nfs_fs.h @@ -56,6 +56,7 @@ #define NFSDBG_PNFS 0x1000 #define NFSDBG_PNFS_LD 0x2000 #define NFSDBG_STATE 0x4000 +#define NFSDBG_XATTRCACHE 0x8000 #define NFSDBG_ALL 0xFFFF -- cgit v1.2.3 From a5e6f964bb2c613933de58a35ddfa306128ba004 Mon Sep 17 00:00:00 2001 From: Misono Tomohiro Date: Fri, 10 Jul 2020 17:00:03 +0900 Subject: rtc: cleanup obsolete comment about struct rtc_class_ops Commit ea369ea6d828 ("rtc: remove .open() and .release()") removes open/release callback from struct rtc_class_ops. Also commit 80d4bb515b78 ("RTC: Cleanup rtc_class_ops->irq_set_state") and commit 696160fec162 ("RTC: Cleanup rtc_class_ops->irq_set_freq()") removes irq callbacks. So, just remove related comments so that readers will not be confused. Signed-off-by: Misono Tomohiro Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20200710080003.7986-1-misono.tomohiro@jp.fujitsu.com --- include/linux/rtc.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rtc.h b/include/linux/rtc.h index bba3db3f7efa..22d1575e4991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -55,10 +55,6 @@ extern struct class *rtc_class; * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs - * - non-ioctl() chardev hooks: open(), release() - * - * REVISIT those periodic irq calls *do* have ops_lock when they're - * issued through ioctl() ... */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); -- cgit v1.2.3 From e2e5c55eed8023ecfbf4c9b623ef7dec343d1845 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 13:50:27 -0600 Subject: remoteproc: Add new RPROC_DETACHED state Add a new RPROC_DETACHED state to take into account scenarios where the remoteproc core needs to attach to a remote processor that is booted by another entity. Signed-off-by: Mathieu Poirier Reviewed-by: Bjorn Andersson Tested-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20200714195035.1426873-2-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_sysfs.c | 1 + include/linux/remoteproc.h | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 52b871327b55..264759713934 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -72,6 +72,7 @@ static const char * const rproc_state_string[] = { [RPROC_RUNNING] = "running", [RPROC_CRASHED] = "crashed", [RPROC_DELETED] = "deleted", + [RPROC_DETACHED] = "detached", [RPROC_LAST] = "invalid", }; diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index e7b7bab8b235..21182ad2d059 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -400,6 +400,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_DETACHED: device has been booted by another entity and waiting + * for the core to attach to it * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -414,7 +416,8 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_LAST = 5, + RPROC_DETACHED = 5, + RPROC_LAST = 6, }; /** -- cgit v1.2.3 From a6a4f2857524007848f7957af432cddb4d43b593 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 13:50:28 -0600 Subject: remoteproc: Add new attach() remoteproc operation Add an new attach() operation in order to properly deal with scenarios where the remoteproc core needs to attach to a remote processor that has been booted by another entity. Signed-off-by: Mathieu Poirier Reviewed-by: Bjorn Andersson Tested-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20200714195035.1426873-3-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_internal.h | 8 ++++++++ include/linux/remoteproc.h | 2 ++ 2 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 4ba7cb59d3e8..fc710866f8ce 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -79,6 +79,14 @@ static inline int rproc_unprepare_device(struct rproc *rproc) return 0; } +static inline int rproc_attach_device(struct rproc *rproc) +{ + if (rproc->ops->attach) + return rproc->ops->attach(rproc); + + return 0; +} + static inline int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 21182ad2d059..bf6a310ba870 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -359,6 +359,7 @@ enum rsc_handling_status { * @unprepare: unprepare device after stop * @start: power on the device and boot it * @stop: power off the device + * @attach: attach to a device that his already powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -379,6 +380,7 @@ struct rproc_ops { int (*unprepare)(struct rproc *rproc); int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); + int (*attach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); -- cgit v1.2.3 From 4a4dca1941fedc1b02635ff0b4ed51b9857d0382 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 13:50:35 -0600 Subject: remoteproc: Properly handle firmware name when attaching This patch prevents the firmware image name from being displayed when the remoteproc core is attaching to a remote processor. This is needed needed since there is no guarantee about the nature of the firmware image that is loaded by the external entity. Signed-off-by: Mathieu Poirier Reviewed-by: Arnaud Pouliquen Reviewed-by: Bjorn Andersson Tested-by: Arnaud Pouliquen Link: https://lore.kernel.org/r/20200714195035.1426873-10-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 18 ++++++++++++++++++ drivers/remoteproc/remoteproc_sysfs.c | 16 ++++++++++++++-- include/linux/remoteproc.h | 2 ++ 3 files changed, 34 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 565778ccb39c..1b52004655ab 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1627,6 +1627,14 @@ static int rproc_stop(struct rproc *rproc, bool crashed) rproc->state = RPROC_OFFLINE; + /* + * The remote processor has been stopped and is now offline, which means + * that the next time it is brought back online the remoteproc core will + * be responsible to load its firmware. As such it is no longer + * autonomous. + */ + rproc->autonomous = false; + dev_info(dev, "stopped remote processor %s\n", rproc->name); return 0; @@ -2145,6 +2153,16 @@ int rproc_add(struct rproc *rproc) /* create debugfs entries */ rproc_create_debug_dir(rproc); + /* + * Remind ourselves the remote processor has been attached to rather + * than booted by the remoteproc core. This is important because the + * RPROC_DETACHED state will be lost as soon as the remote processor + * has been attached to. Used in firmware_show() and reset in + * rproc_stop(). + */ + if (rproc->state == RPROC_DETACHED) + rproc->autonomous = true; + /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) { ret = rproc_trigger_auto_boot(rproc); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 264759713934..eea514cec50e 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -15,8 +15,20 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rproc *rproc = to_rproc(dev); - - return sprintf(buf, "%s\n", rproc->firmware); + const char *firmware = rproc->firmware; + + /* + * If the remote processor has been started by an external + * entity we have no idea of what image it is running. As such + * simply display a generic string rather then rproc->firmware. + * + * Here we rely on the autonomous flag because a remote processor + * may have been attached to and currently in a running state. + */ + if (rproc->autonomous) + firmware = "unknown"; + + return sprintf(buf, "%s\n", firmware); } /* Change firmware name via sysfs */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index bf6a310ba870..cf5e31556780 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -491,6 +491,7 @@ struct rproc_dump_segment { * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started + * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc */ @@ -524,6 +525,7 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; -- cgit v1.2.3 From ab91e7a6da7eeb8aa54843748652c186daee43eb Mon Sep 17 00:00:00 2001 From: He Zhe Date: Mon, 6 Jul 2020 17:52:24 +0800 Subject: freezer: Add unsafe versions of freezable_schedule_timeout_interruptible for NFS commit 0688e64bc600 ("NFS: Allow signal interruption of NFS4ERR_DELAYed operations") introduces nfs4_delay_interruptible which also needs an _unsafe version to avoid the following call trace for the same reason explained in commit 416ad3c9c006 ("freezer: add unsafe versions of freezable helpers for NFS") CPU: 4 PID: 3968 Comm: rm Tainted: G W 5.8.0-rc4 #1 Hardware name: Marvell OcteonTX CN96XX board (DT) Call trace: dump_backtrace+0x0/0x1dc show_stack+0x20/0x30 dump_stack+0xdc/0x150 debug_check_no_locks_held+0x98/0xa0 nfs4_delay_interruptible+0xd8/0x120 nfs4_handle_exception+0x130/0x170 nfs4_proc_rmdir+0x8c/0x220 nfs_rmdir+0xa4/0x360 vfs_rmdir.part.0+0x6c/0x1b0 do_rmdir+0x18c/0x210 __arm64_sys_unlinkat+0x64/0x7c el0_svc_common.constprop.0+0x7c/0x110 do_el0_svc+0x24/0xa0 el0_sync_handler+0x13c/0x1b8 el0_sync+0x158/0x180 Signed-off-by: He Zhe Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 +- include/linux/freezer.h | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f670ff64b31e..113e0d6dd3d3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -416,7 +416,7 @@ static int nfs4_delay_interruptible(long *timeout) { might_sleep(); - freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout)); + freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout)); if (!signal_pending(current)) return 0; return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 21f5aa0b217f..27828145ca09 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout) return __retval; } +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout) +{ + long __retval; + + freezer_do_not_count(); + __retval = schedule_timeout_interruptible(timeout); + freezer_count_unsafe(); + return __retval; +} + /* Like schedule_timeout_killable(), but should not block the freezer. */ static inline long freezable_schedule_timeout_killable(long timeout) { @@ -285,6 +296,9 @@ static inline void set_freezable(void) {} #define freezable_schedule_timeout_interruptible(timeout) \ schedule_timeout_interruptible(timeout) +#define freezable_schedule_timeout_interruptible_unsafe(timeout) \ + schedule_timeout_interruptible(timeout) + #define freezable_schedule_timeout_killable(timeout) \ schedule_timeout_killable(timeout) -- cgit v1.2.3 From d9473cbfb0c5cbb279dfdeaec780934729537d27 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 14 Jul 2020 14:04:41 -0600 Subject: remoteproc: Make function rproc_resource_cleanup() public Make function rproc_resource_cleanup() public so that it can be used by platform drivers when allocating resources to be used by a detached remote processor. Acked-by: Arnaud Pouliquen Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200714200445.1427257-8-mathieu.poirier@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_core.c | 3 ++- include/linux/remoteproc.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 1b52004655ab..6fa9f75754b3 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1274,7 +1274,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc) * This function will free all resources acquired for @rproc, and it * is called whenever @rproc either shuts down or fails to boot. */ -static void rproc_resource_cleanup(struct rproc *rproc) +void rproc_resource_cleanup(struct rproc *rproc) { struct rproc_mem_entry *entry, *tmp; struct rproc_debug_trace *trace, *ttmp; @@ -1318,6 +1318,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) rproc_coredump_cleanup(rproc); } +EXPORT_SYMBOL(rproc_resource_cleanup); static int rproc_start(struct rproc *rproc, const struct firmware *fw) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index cf5e31556780..7c0567029f7c 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -610,6 +610,7 @@ void rproc_put(struct rproc *rproc); int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); +void rproc_resource_cleanup(struct rproc *rproc); struct rproc *devm_rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, -- cgit v1.2.3 From c6a8b84da4c28bda61b842a089651c3ec9d89a48 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:36:50 -0700 Subject: modules: linux/moduleparam.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: Jessica Yu Signed-off-by: Jessica Yu --- include/linux/moduleparam.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 3ef917ff0964..1ad5aa3b86d9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -108,7 +108,7 @@ struct kparam_array * ".") the kernel commandline parameter. Note that - is changed to _, so * the user can use "foo-bar=1" even for variable "foo_bar". * - * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * @perm is 0 if the variable is not to appear in sysfs, or 0444 * for world-readable, 0644 for root-writable, etc. Note that if it * is writable, you may need to use kernel_param_lock() around * accesses (esp. charp, which can be kfreed when it changes). -- cgit v1.2.3 From 1c91b46555aa1dd86331025e62d55d92459e0faf Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:26 +0200 Subject: backlight: backlight: Add backlight_is_blank() The backlight support has three properties that express the state: - power - state - fb_blank It is un-documented and easy to get wrong. Add backlight_is_blank() helper to make it simpler for drivers to get the check of the state correct. A lot of drivers also includes checks for fb_blank. This check is redundant when the state is checked and thus not needed in this helper function. But added anyway to avoid introducing subtle bugs due to the creative use of fb_blank in some drivers. Introducing this helper will for some drivers results in added support for fb_blank. This will be a change in functionality, which will improve the backlight driver. Rolling out this helper to all relevant backlight drivers will eliminate almost all accesses to fb_blank. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Peter Ujfalusi Reviewed-by: Emil Velikov Reviewed-by: Daniel Vetter Signed-off-by: Lee Jones --- include/linux/backlight.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 56e4580d4f55..56e51ebab740 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -175,6 +175,25 @@ static inline void backlight_put(struct backlight_device *bd) put_device(&bd->dev); } +/** + * backlight_is_blank - Return true if display is expected to be blank + * @bd: the backlight device + * + * Display is expected to be blank if any of these is true:: + * + * 1) if power in not UNBLANK + * 2) if fb_blank is not UNBLANK + * 3) if state indicate BLANK or SUSPENDED + * + * Returns true if display is expected to be blank, false otherwise. + */ +static inline bool backlight_is_blank(const struct backlight_device *bd) +{ + return bd->props.power != FB_BLANK_UNBLANK || + bd->props.fb_blank != FB_BLANK_UNBLANK || + bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); +} + extern struct backlight_device *backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); -- cgit v1.2.3 From ca7c20b2132d228ec76df3c96f5d0b5ae3d6f218 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:27 +0200 Subject: backlight: backlight: Improve backlight_ops documentation Improve the documentation for backlight_ops and adapt it to kernel-doc style. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 59 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 56e51ebab740..4ff60774ae22 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -55,19 +55,66 @@ enum backlight_scale { struct backlight_device; struct fb_info; +/** + * struct backlight_ops - backlight operations + * + * The backlight operations are specified when the backlight device is registered. + */ struct backlight_ops { + /** + * @options: Configure how operations are called from the core. + * + * The options parameter is used to adjust the behaviour of the core. + * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called + * upon suspend and resume. + */ unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) - /* Notify the backlight driver some property has changed */ + /** + * @update_status: Operation called when properties have changed. + * + * Notify the backlight driver some property has changed. + * The update_status operation is protected by the update_lock. + * + * The backlight driver is expected to use backlight_is_blank() + * to check if the display is blanked and set brightness accordingly. + * update_status() is called when any of the properties has changed. + * + * RETURNS: + * + * 0 on success, negative error code if any failure occurred. + */ int (*update_status)(struct backlight_device *); - /* Return the current backlight brightness (accounting for power, - fb_blank etc.) */ + + /** + * @get_brightness: Return the current backlight brightness. + * + * The driver may implement this as a readback from the HW. + * This operation is optional and if not present then the current + * brightness property value is used. + * + * RETURNS: + * + * A brightness value which is 0 or a positive number. + * On failure a negative error code is returned. + */ int (*get_brightness)(struct backlight_device *); - /* Check if given framebuffer device is the one bound to this backlight; - return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ - int (*check_fb)(struct backlight_device *, struct fb_info *); + + /** + * @check_fb: Check the framebuffer device. + * + * Check if given framebuffer device is the one bound to this backlight. + * This operation is optional and if not implemented it is assumed that the + * fbdev is always the one bound to the backlight. + * + * RETURNS: + * + * If info is NULL or the info matches the fbdev bound to the backlight return true. + * If info does not match the fbdev bound to the backlight return false. + */ + int (*check_fb)(struct backlight_device *bd, struct fb_info *info); }; /* This structure defines all the properties of a backlight */ -- cgit v1.2.3 From cabf161335ca9fe695bb87469ed99be881cd08f5 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:28 +0200 Subject: backlight: backlight: Improve backlight_properties documentation Improve the documentation for backlight_properties and adapt it to kernel-doc style. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 96 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 85 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 4ff60774ae22..3554aef008ea 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -117,28 +117,102 @@ struct backlight_ops { int (*check_fb)(struct backlight_device *bd, struct fb_info *info); }; -/* This structure defines all the properties of a backlight */ +/** + * struct backlight_properties - backlight properties + * + * This structure defines all the properties of a backlight. + */ struct backlight_properties { - /* Current User requested brightness (0 - max_brightness) */ + /** + * @brightness: The current brightness requested by the user. + * + * The backlight core makes sure the range is (0 to max_brightness) + * when the brightness is set via the sysfs attribute: + * /sys/class/backlight//brightness. + * + * This value can be set in the backlight_properties passed + * to devm_backlight_device_register() to set a default brightness + * value. + */ int brightness; - /* Maximal value for brightness (read-only) */ + + /** + * @max_brightness: The maximum brightness value. + * + * This value must be set in the backlight_properties passed to + * devm_backlight_device_register() and shall not be modified by the + * driver after registration. + */ int max_brightness; - /* Current FB Power mode (0: full on, 1..3: power saving - modes; 4: full off), see FB_BLANK_XXX */ + + /** + * @power: The current power mode. + * + * User space can configure the power mode using the sysfs + * attribute: /sys/class/backlight//bl_power + * When the power property is updated update_status() is called. + * + * The possible values are: (0: full on, 1 to 3: power saving + * modes; 4: full off), see FB_BLANK_XXX. + * + * When the backlight device is enabled @power is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @power is set to FB_BLANK_POWERDOWN. + */ int power; - /* FB Blanking active? (values as for power) */ - /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + + /** + * @fb_blank: The power state from the FBIOBLANK ioctl. + * + * When the FBIOBLANK ioctl is called @fb_blank is set to the + * blank parameter and the update_status() operation is called. + * + * When the backlight device is enabled @fb_blank is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @fb_blank is set to FB_BLANK_POWERDOWN. + * + * Backlight drivers should avoid using this property. It has been + * replaced by state & BL_CORE_FBLANK (although most drivers should + * use backlight_is_blank() as the preferred means to get the blank + * state). + * + * fb_blank is deprecated and will be removed. + */ int fb_blank; - /* Backlight type */ + + /** + * @type: The type of backlight supported. + * + * The backlight type allows userspace to make appropriate + * policy decisions based on the backlight type. + * + * This value must be set in the backlight_properties + * passed to devm_backlight_device_register(). + */ enum backlight_type type; - /* Flags used to signal drivers of state changes */ + + /** + * @state: The state of the backlight core. + * + * The state is a bitmask. BL_CORE_FBBLANK is set when the display + * is expected to be blank. BL_CORE_SUSPENDED is set when the + * driver is suspended. + * + * backlight drivers are expected to use backlight_is_blank() + * in their update_status() operation rather than reading the + * state property. + * + * The state is maintained by the core and drivers may not modify it. + */ unsigned int state; - /* Type of the brightness scale (linear, non-linear, ...) */ - enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ + /** + * @scale: The type of the brightness scale. + */ + enum backlight_scale scale; }; struct backlight_device { -- cgit v1.2.3 From 6f10cd124c44eea018672d5852708ca5dca4d06d Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:29 +0200 Subject: backlight: backlight: Improve backlight_device documentation Improve the documentation for backlight_device and adapt it to kernel-doc style. The updated documentation is more strict on how locking is used. With the update neither update_lock nor ops_lock may be used outside the backlight core. This restriction was introduced to keep the locking simple by keeping it in the core. It was verified that this documents the current state by renaming update_lock => bl_update_lock and ops_lock => bl_ops_lock. The rename did not reveal any uses outside the backlight core. The rename is NOT part of this patch. Signed-off-by: Sam Ravnborg Reviewed-by: Emil Velikov Reviewed-by: Daniel Thompson Reviewed-by: Jingoo Han Signed-off-by: Lee Jones --- include/linux/backlight.h | 72 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 3554aef008ea..cf9977169fe8 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -14,21 +14,6 @@ #include #include -/* Notes on locking: - * - * backlight_device->ops_lock is an internal backlight lock protecting the - * ops pointer and no code outside the core should need to touch it. - * - * Access to update_status() is serialised by the update_lock mutex since - * most drivers seem to need this and historically get it wrong. - * - * Most drivers don't need locking on their get_brightness() method. - * If yours does, you need to implement it in the driver. You can use the - * update_lock mutex if appropriate. - * - * Any other use of the locks below is probably wrong. - */ - enum backlight_update_reason { BACKLIGHT_UPDATE_HOTKEY, BACKLIGHT_UPDATE_SYSFS, @@ -215,30 +200,71 @@ struct backlight_properties { enum backlight_scale scale; }; +/** + * struct backlight_device - backlight device data + * + * This structure holds all data required by a backlight device. + */ struct backlight_device { - /* Backlight properties */ + /** + * @props: Backlight properties + */ struct backlight_properties props; - /* Serialise access to update_status method */ + /** + * @update_lock: The lock used when calling the update_status() operation. + * + * update_lock is an internal backlight lock that serialise access + * to the update_status() operation. The backlight core holds the update_lock + * when calling the update_status() operation. The update_lock shall not + * be used by backlight drivers. + */ struct mutex update_lock; - /* This protects the 'ops' field. If 'ops' is NULL, the driver that - registered this device has been unloaded, and if class_get_devdata() - points to something in the body of that driver, it is also invalid. */ + /** + * @ops_lock: The lock used around everything related to backlight_ops. + * + * ops_lock is an internal backlight lock that protects the ops pointer + * and is used around all accesses to ops and when the operations are + * invoked. The ops_lock shall not be used by backlight drivers. + */ struct mutex ops_lock; + + /** + * @ops: Pointer to the backlight operations. + * + * If ops is NULL, the driver that registered this device has been unloaded, + * and if class_get_devdata() points to something in the body of that driver, + * it is also invalid. + */ const struct backlight_ops *ops; - /* The framebuffer notifier block */ + /** + * @fb_notif: The framebuffer notifier block + */ struct notifier_block fb_notif; - /* list entry of all registered backlight devices */ + /** + * @entry: List entry of all registered backlight devices + */ struct list_head entry; + /** + * @dev: Parent device. + */ struct device dev; - /* Multiple framebuffers may share one backlight device */ + /** + * @fb_bl_on: The state of individual fbdev's. + * + * Multiple fbdev's may share one backlight device. The fb_bl_on + * records the state of the individual fbdev. + */ bool fb_bl_on[FB_MAX]; + /** + * @use_count: The number of uses of fb_bl_on. + */ int use_count; }; -- cgit v1.2.3 From d160fd4e918da33f5ffbcf005cd95888dbbe4f76 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:30 +0200 Subject: backlight: backlight: Document inline functions in backlight.h Add documentation for the inline functions in backlight.h Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index cf9977169fe8..99381b5a289d 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -268,6 +268,10 @@ struct backlight_device { int use_count; }; +/** + * backlight_update_status - force an update of the backlight device status + * @bd: the backlight device + */ static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; @@ -361,6 +365,18 @@ extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) +/** + * bl_get_data - access devdata + * @bl_dev: pointer to backlight device + * + * When a backlight device is registered the driver has the possibility + * to supply a void * devdata. bl_get_data() return a pointer to the + * devdata. + * + * RETURNS: + * + * pointer to devdata stored while registering the backlight device. + */ static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); -- cgit v1.2.3 From 2d15bb47f3331db979bd0a1987812ca1a3ff40c6 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:31 +0200 Subject: backlight: backlight: Document enums in backlight.h Add kernel-doc documentation for the backlight enums Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 99381b5a289d..1d56b34ff33c 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -14,26 +14,98 @@ #include #include +/** + * enum backlight_update_reason - what method was used to update backlight + * + * A driver indicates the method (reason) used for updating the backlight + * when calling backlight_force_update(). + */ enum backlight_update_reason { + /** + * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key. + */ BACKLIGHT_UPDATE_HOTKEY, + + /** + * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs. + */ BACKLIGHT_UPDATE_SYSFS, }; +/** + * enum backlight_type - the type of backlight control + * + * The type of interface used to control the backlight. + */ enum backlight_type { + /** + * @BACKLIGHT_RAW: + * + * The backlight is controlled using hardware registers. + */ BACKLIGHT_RAW = 1, + + /** + * @BACKLIGHT_PLATFORM: + * + * The backlight is controlled using a platform-specific interface. + */ BACKLIGHT_PLATFORM, + + /** + * @BACKLIGHT_FIRMWARE: + * + * The backlight is controlled using a standard firmware interface. + */ BACKLIGHT_FIRMWARE, + + /** + * @BACKLIGHT_TYPE_MAX: Number of entries. + */ BACKLIGHT_TYPE_MAX, }; +/** + * enum backlight_notification - the type of notification + * + * The notifications that is used for notification sent to the receiver + * that registered notifications using backlight_register_notifier(). + */ enum backlight_notification { + /** + * @BACKLIGHT_REGISTERED: The backlight device is registered. + */ BACKLIGHT_REGISTERED, + + /** + * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. + */ BACKLIGHT_UNREGISTERED, }; +/** enum backlight_scale - the type of scale used for brightness values + * + * The type of scale used for brightness values. + */ enum backlight_scale { + /** + * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown. + */ BACKLIGHT_SCALE_UNKNOWN = 0, + + /** + * @BACKLIGHT_SCALE_LINEAR: The scale is linear. + * + * The linear scale will increase brightness the same for each step. + */ BACKLIGHT_SCALE_LINEAR, + + /** + * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear. + * + * This is often used when the brightness values tries to adjust to + * the relative perception of the eye demanding a non-linear scale. + */ BACKLIGHT_SCALE_NON_LINEAR, }; -- cgit v1.2.3 From 7ecdea4a0226f6c5cd0e86859d1b38cf17bc8529 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:32 +0200 Subject: backlight: generic_bl: Remove this driver as it is unused The backlight_bl driver required initialization using struct generic_bl_info. As there are no more references to this struct there is no users left. So it is safe to delete the driver. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- drivers/video/backlight/Kconfig | 8 --- drivers/video/backlight/Makefile | 1 - drivers/video/backlight/generic_bl.c | 110 ----------------------------------- include/linux/backlight.h | 9 --- 4 files changed, 128 deletions(-) delete mode 100644 drivers/video/backlight/generic_bl.c (limited to 'include/linux') diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 95c546cc8774..87f9fc238d28 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -173,14 +173,6 @@ config BACKLIGHT_EP93XX To compile this driver as a module, choose M here: the module will be called ep93xx_bl. -config BACKLIGHT_GENERIC - tristate "Generic (aka Sharp Corgi) Backlight Driver" - default y - help - Say y to enable the generic platform backlight driver previously - known as the Corgi backlight driver. If you have a Sharp Zaurus - SL-C7xx, SL-Cxx00 or SL-6000x say y. - config BACKLIGHT_IPAQ_MICRO tristate "iPAQ microcontroller backlight driver" depends on MFD_IPAQ_MICRO diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 2072d21b60f7..13463b99f1f9 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o -obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c deleted file mode 100644 index 8fe63dbc8590..000000000000 --- a/drivers/video/backlight/generic_bl.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic Backlight Driver - * - * Copyright (c) 2004-2008 Richard Purdie - */ - -#include -#include -#include -#include -#include -#include -#include - -static int genericbl_intensity; -static struct backlight_device *generic_backlight_device; -static struct generic_bl_info *bl_machinfo; - -static int genericbl_send_intensity(struct backlight_device *bd) -{ - int intensity = bd->props.brightness; - - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.state & BL_CORE_FBBLANK) - intensity = 0; - if (bd->props.state & BL_CORE_SUSPENDED) - intensity = 0; - - bl_machinfo->set_bl_intensity(intensity); - - genericbl_intensity = intensity; - - if (bl_machinfo->kick_battery) - bl_machinfo->kick_battery(); - - return 0; -} - -static int genericbl_get_intensity(struct backlight_device *bd) -{ - return genericbl_intensity; -} - -static const struct backlight_ops genericbl_ops = { - .options = BL_CORE_SUSPENDRESUME, - .get_brightness = genericbl_get_intensity, - .update_status = genericbl_send_intensity, -}; - -static int genericbl_probe(struct platform_device *pdev) -{ - struct backlight_properties props; - struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev); - const char *name = "generic-bl"; - struct backlight_device *bd; - - bl_machinfo = machinfo; - if (!machinfo->limit_mask) - machinfo->limit_mask = -1; - - if (machinfo->name) - name = machinfo->name; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.type = BACKLIGHT_RAW; - props.max_brightness = machinfo->max_intensity; - bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev, - NULL, &genericbl_ops, &props); - if (IS_ERR(bd)) - return PTR_ERR(bd); - - platform_set_drvdata(pdev, bd); - - bd->props.power = FB_BLANK_UNBLANK; - bd->props.brightness = machinfo->default_intensity; - backlight_update_status(bd); - - generic_backlight_device = bd; - - dev_info(&pdev->dev, "Generic Backlight Driver Initialized.\n"); - return 0; -} - -static int genericbl_remove(struct platform_device *pdev) -{ - struct backlight_device *bd = platform_get_drvdata(pdev); - - bd->props.power = 0; - bd->props.brightness = 0; - backlight_update_status(bd); - - dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n"); - return 0; -} - -static struct platform_driver genericbl_driver = { - .probe = genericbl_probe, - .remove = genericbl_remove, - .driver = { - .name = "generic-bl", - }, -}; - -module_platform_driver(genericbl_driver); - -MODULE_AUTHOR("Richard Purdie "); -MODULE_DESCRIPTION("Generic Backlight Driver"); -MODULE_LICENSE("GPL"); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 1d56b34ff33c..d0f01dc3b98d 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -454,15 +454,6 @@ static inline void * bl_get_data(struct backlight_device *bl_dev) return dev_get_drvdata(&bl_dev->dev); } -struct generic_bl_info { - const char *name; - int max_intensity; - int default_intensity; - int limit_mask; - void (*set_bl_intensity)(int intensity); - void (*kick_battery)(void); -}; - #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else -- cgit v1.2.3 From 9c4aa3118bab6f10904b7bc9bc34b501a083751b Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:33 +0200 Subject: backlight: backlight: Drop extern from prototypes No need to put "extern" in front of prototypes. While touching the prototypes adjust indent to follow the kernel style. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Emil Velikov Signed-off-by: Lee Jones --- include/linux/backlight.h | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index d0f01dc3b98d..c1824426fc9e 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -417,23 +417,26 @@ static inline bool backlight_is_blank(const struct backlight_device *bd) bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } -extern struct backlight_device *backlight_device_register(const char *name, - struct device *dev, void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern struct backlight_device *devm_backlight_device_register( - struct device *dev, const char *name, struct device *parent, - void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern void backlight_device_unregister(struct backlight_device *bd); -extern void devm_backlight_device_unregister(struct device *dev, - struct backlight_device *bd); -extern void backlight_force_update(struct backlight_device *bd, - enum backlight_update_reason reason); -extern int backlight_register_notifier(struct notifier_block *nb); -extern int backlight_unregister_notifier(struct notifier_block *nb); -extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +struct backlight_device * +backlight_device_register(const char *name, struct device *dev, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +struct backlight_device * +devm_backlight_device_register(struct device *dev, const char *name, + struct device *parent, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +void backlight_device_unregister(struct backlight_device *bd); +void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +int backlight_register_notifier(struct notifier_block *nb); +int backlight_unregister_notifier(struct notifier_block *nb); struct backlight_device *backlight_device_get_by_name(const char *name); -extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); +struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +int backlight_device_set_brightness(struct backlight_device *bd, + unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) -- cgit v1.2.3 From 2144d00ed0db28c764513080f95e4c49ea9133b0 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:36 +0200 Subject: backlight: backlight: Introduce backlight_get_brightness() Based on an idea from Emil Velikov, add a helper that checks backlight_is_blank() and return 0 as brightness if display is blank or the property value if not. This allows us to simplify the update_status() implementation in most of the backlight drivers. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- include/linux/backlight.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/backlight.h b/include/linux/backlight.h index c1824426fc9e..26e89a8033f5 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -417,6 +417,25 @@ static inline bool backlight_is_blank(const struct backlight_device *bd) bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } +/** + * backlight_get_brightness - Returns the current brightness value + * @bd: the backlight device + * + * Returns the current brightness value, taking in consideration the current + * state. If backlight_is_blank() returns true then return 0 as brightness + * otherwise return the current brightness property value. + * + * Backlight drivers are expected to use this function in their update_status() + * operation to get the brightness value. + */ +static inline int backlight_get_brightness(const struct backlight_device *bd) +{ + if (backlight_is_blank(bd)) + return 0; + else + return bd->props.brightness; +} + struct backlight_device * backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, -- cgit v1.2.3 From 0f6a3256fd810eeca9c56cccafee46359d995138 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:42 +0200 Subject: backlight: backlight: Drop backlight_put() There are no external users of backlight_put(). Drop it and open code the two users in backlight.c. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 7 +++++-- include/linux/backlight.h | 10 ---------- 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 82dc93ca859a..cba8505fef5a 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -718,7 +718,10 @@ EXPORT_SYMBOL(of_find_backlight); static void devm_backlight_release(void *data) { - backlight_put(data); + struct backlight_device *bd = data; + + if (bd) + put_device(&bd->dev); } /** @@ -746,7 +749,7 @@ struct backlight_device *devm_of_find_backlight(struct device *dev) return bd; ret = devm_add_action(dev, devm_backlight_release, bd); if (ret) { - backlight_put(bd); + put_device(&bd->dev); return ERR_PTR(ret); } return bd; diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 26e89a8033f5..64f91324c911 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -388,16 +388,6 @@ static inline int backlight_disable(struct backlight_device *bd) return backlight_update_status(bd); } -/** - * backlight_put - Drop backlight reference - * @bd: the backlight device to put - */ -static inline void backlight_put(struct backlight_device *bd) -{ - if (bd) - put_device(&bd->dev); -} - /** * backlight_is_blank - Return true if display is expected to be blank * @bd: the backlight device -- cgit v1.2.3 From b6539a11e807c531e51ff7ad9a25c3a1b6ff7340 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Jul 2020 10:07:43 +0200 Subject: backlight: backlight: Make of_find_backlight static There are no external users of of_find_backlight, as they have all changed to use the managed version. Make of_find_backlight static to prevent new external users. Signed-off-by: Sam Ravnborg Reviewed-by: Daniel Thompson Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 18 +----------------- include/linux/backlight.h | 6 ------ 2 files changed, 1 insertion(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index cba8505fef5a..537fe1b376ad 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -673,22 +673,7 @@ struct backlight_device *of_find_backlight_by_node(struct device_node *node) EXPORT_SYMBOL(of_find_backlight_by_node); #endif -/** - * of_find_backlight - Get backlight device - * @dev: Device - * - * This function looks for a property named 'backlight' on the DT node - * connected to @dev and looks up the backlight device. - * - * Call backlight_put() to drop the reference on the backlight device. - * - * Returns: - * A pointer to the backlight device if found. - * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight - * device is found. - * NULL if there's no backlight property. - */ -struct backlight_device *of_find_backlight(struct device *dev) +static struct backlight_device *of_find_backlight(struct device *dev) { struct backlight_device *bd = NULL; struct device_node *np; @@ -714,7 +699,6 @@ struct backlight_device *of_find_backlight(struct device *dev) return bd; } -EXPORT_SYMBOL(of_find_backlight); static void devm_backlight_release(void *data) { diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 64f91324c911..614653e07e3a 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -477,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node) #endif #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -struct backlight_device *of_find_backlight(struct device *dev); struct backlight_device *devm_of_find_backlight(struct device *dev); #else -static inline struct backlight_device *of_find_backlight(struct device *dev) -{ - return NULL; -} - static inline struct backlight_device * devm_of_find_backlight(struct device *dev) { -- cgit v1.2.3 From 76abf9cea6c8215ea45b9359f11f0e30127c544a Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Thu, 16 Jul 2020 15:20:33 -0700 Subject: remoteproc: Pass size and offset as arguments to segment dump function Change the segment dump API signature to include size and offset arguments. Refactor the qcom_q6v5_mss driver to use these arguments while copying the segment. Doing this lays the ground work for "inline" coredump functionality being added in the next patch. Tested-by: Sibi Sankar Reviewed-by: Bjorn Andersson Reviewed-by: Sibi Sankar Reviewed-by: Mathieu Poirier Signed-off-by: Rishabh Bhatnagar Link: https://lore.kernel.org/r/1594938035-7327-4-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/qcom_q6v5_mss.c | 10 +++++----- drivers/remoteproc/remoteproc_coredump.c | 5 +++-- include/linux/remoteproc.h | 5 +++-- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 1ee809b772ff..a3c412a3f143 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1202,7 +1202,7 @@ out: static void qcom_q6v5_dump_segment(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest) + void *dest, size_t cp_offset, size_t size) { int ret = 0; struct q6v5 *qproc = rproc->priv; @@ -1222,16 +1222,16 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, } if (!ret) - ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); + ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size); if (ptr) { - memcpy(dest, ptr, segment->size); + memcpy(dest, ptr, size); iounmap(ptr); } else { - memset(dest, 0xff, segment->size); + memset(dest, 0xff, size); } - qproc->current_dump_size += segment->size; + qproc->current_dump_size += size; /* Reclaim mba after copying segments */ if (qproc->current_dump_size == qproc->total_dump_size) { diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index ded02442374a..390f563ec019 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -72,7 +72,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, void (*dumpfn)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest), + void *dest, size_t offset, + size_t size), void *priv) { struct rproc_dump_segment *segment; @@ -183,7 +184,7 @@ void rproc_coredump(struct rproc *rproc) elf_phdr_set_p_align(class, phdr, 0); if (segment->dump) { - segment->dump(rproc, segment, data + offset); + segment->dump(rproc, segment, data + offset, 0, segment->size); } else { ptr = rproc_da_to_va(rproc, segment->da, segment->size); if (!ptr) { diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 7c0567029f7c..5dab13b6baae 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -456,7 +456,7 @@ struct rproc_dump_segment { void *priv; void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest); + void *dest, size_t offset, size_t size); loff_t offset; }; @@ -638,7 +638,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, void (*dumpfn)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest), + void *dest, size_t offset, + size_t size), void *priv); int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); -- cgit v1.2.3 From c97319881c9116dc7c56dd30115567b4078c4ba6 Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Thu, 16 Jul 2020 15:20:34 -0700 Subject: remoteproc: Add inline coredump functionality The current coredump implementation uses vmalloc area to copy all the segments. But this might put strain on low memory targets as the firmware size sometimes is in tens of MBs. The situation becomes worse if there are multiple remote processors undergoing recovery at the same time. This patch adds inline coredump functionality that avoids extra memory usage. This requires recovery to be halted until data is read by userspace and free function is called. Reviewed-by: Bjorn Andersson Reviewed-by: Sibi Sankar Reviewed-by: Mathieu Poirier Signed-off-by: Rishabh Bhatnagar Tested-by: Sibi Sankar Link: https://lore.kernel.org/r/1594938035-7327-5-git-send-email-rishabhb@codeaurora.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/remoteproc_coredump.c | 156 +++++++++++++++++++++++++++---- include/linux/remoteproc.h | 16 ++++ 2 files changed, 154 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c index 390f563ec019..bb15a29038e8 100644 --- a/drivers/remoteproc/remoteproc_coredump.c +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -5,6 +5,7 @@ * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ +#include #include #include #include @@ -12,6 +13,12 @@ #include "remoteproc_internal.h" #include "remoteproc_elf_helpers.h" +struct rproc_coredump_state { + struct rproc *rproc; + void *header; + struct completion dump_done; +}; + /** * rproc_coredump_cleanup() - clean up dump_segments list * @rproc: the remote processor handle @@ -115,12 +122,110 @@ int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) } EXPORT_SYMBOL(rproc_coredump_set_elf_info); +static void rproc_coredump_free(void *data) +{ + struct rproc_coredump_state *dump_state = data; + + vfree(dump_state->header); + complete(&dump_state->dump_done); +} + +static void *rproc_coredump_find_segment(loff_t user_offset, + struct list_head *segments, + size_t *data_left) +{ + struct rproc_dump_segment *segment; + + list_for_each_entry(segment, segments, node) { + if (user_offset < segment->size) { + *data_left = segment->size - user_offset; + return segment; + } + user_offset -= segment->size; + } + + *data_left = 0; + return NULL; +} + +static void rproc_copy_segment(struct rproc *rproc, void *dest, + struct rproc_dump_segment *segment, + size_t offset, size_t size) +{ + void *ptr; + + if (segment->dump) { + segment->dump(rproc, segment, dest, offset, size); + } else { + ptr = rproc_da_to_va(rproc, segment->da + offset, size); + if (!ptr) { + dev_err(&rproc->dev, + "invalid copy request for segment %pad with offset %zu and size %zu)\n", + &segment->da, offset, size); + memset(dest, 0xff, size); + } else { + memcpy(dest, ptr, size); + } + } +} + +static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t header_sz) +{ + size_t seg_data, bytes_left = count; + ssize_t copy_sz; + struct rproc_dump_segment *seg; + struct rproc_coredump_state *dump_state = data; + struct rproc *rproc = dump_state->rproc; + void *elfcore = dump_state->header; + + /* Copy the vmalloc'ed header first. */ + if (offset < header_sz) { + copy_sz = memory_read_from_buffer(buffer, count, &offset, + elfcore, header_sz); + + return copy_sz; + } + + /* + * Find out the segment memory chunk to be copied based on offset. + * Keep copying data until count bytes are read. + */ + while (bytes_left) { + seg = rproc_coredump_find_segment(offset - header_sz, + &rproc->dump_segments, + &seg_data); + /* EOF check */ + if (!seg) { + dev_info(&rproc->dev, "Ramdump done, %lld bytes read", + offset); + break; + } + + copy_sz = min_t(size_t, bytes_left, seg_data); + + rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data, + copy_sz); + + offset += copy_sz; + buffer += copy_sz; + bytes_left -= copy_sz; + } + + return count - bytes_left; +} + /** * rproc_coredump() - perform coredump * @rproc: rproc handle * * This function will generate an ELF header for the registered segments - * and create a devcoredump device associated with rproc. + * and create a devcoredump device associated with rproc. Based on the + * coredump configuration this function will directly copy the segments + * from device memory to userspace or copy segments from device memory to + * a separate buffer, which can then be read by userspace. + * The first approach avoids using extra vmalloc memory. But it will stall + * recovery flow until dump is read by userspace. */ void rproc_coredump(struct rproc *rproc) { @@ -130,11 +235,13 @@ void rproc_coredump(struct rproc *rproc) size_t data_size; size_t offset; void *data; - void *ptr; u8 class = rproc->elf_class; int phnum = 0; + struct rproc_coredump_state dump_state; + enum rproc_dump_mechanism dump_conf = rproc->dump_conf; - if (list_empty(&rproc->dump_segments)) + if (list_empty(&rproc->dump_segments) || + dump_conf == RPROC_COREDUMP_DISABLED) return; if (class == ELFCLASSNONE) { @@ -144,7 +251,14 @@ void rproc_coredump(struct rproc *rproc) data_size = elf_size_of_hdr(class); list_for_each_entry(segment, &rproc->dump_segments, node) { - data_size += elf_size_of_phdr(class) + segment->size; + /* + * For default configuration buffer includes headers & segments. + * For inline dump buffer just includes headers as segments are + * directly read from device memory. + */ + data_size += elf_size_of_phdr(class); + if (dump_conf == RPROC_COREDUMP_DEFAULT) + data_size += segment->size; phnum++; } @@ -183,23 +297,29 @@ void rproc_coredump(struct rproc *rproc) elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); elf_phdr_set_p_align(class, phdr, 0); - if (segment->dump) { - segment->dump(rproc, segment, data + offset, 0, segment->size); - } else { - ptr = rproc_da_to_va(rproc, segment->da, segment->size); - if (!ptr) { - dev_err(&rproc->dev, - "invalid coredump segment (%pad, %zu)\n", - &segment->da, segment->size); - memset(data + offset, 0xff, segment->size); - } else { - memcpy(data + offset, ptr, segment->size); - } - } + if (dump_conf == RPROC_COREDUMP_DEFAULT) + rproc_copy_segment(rproc, data + offset, segment, 0, + segment->size); offset += elf_phdr_get_p_filesz(class, phdr); phdr += elf_size_of_phdr(class); } + if (dump_conf == RPROC_COREDUMP_DEFAULT) { + dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + return; + } + + /* Initialize the dump state struct to be used by rproc_coredump_read */ + dump_state.rproc = rproc; + dump_state.header = data; + init_completion(&dump_state.dump_done); + + dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL, + rproc_coredump_read, rproc_coredump_free); - dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + /* + * Wait until the dump is read and free is called. Data is freed + * by devcoredump framework automatically after 5 minutes. + */ + wait_for_completion(&dump_state.dump_done); } diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 5dab13b6baae..0e8d2ff575b4 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -439,6 +439,20 @@ enum rproc_crash_type { RPROC_FATAL_ERROR, }; +/** + * enum rproc_dump_mechanism - Coredump options for core + * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with + recovery + * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall + recovery until all segments are read + * @RPROC_COREDUMP_DISABLED: Don't perform any dump + */ +enum rproc_dump_mechanism { + RPROC_COREDUMP_DEFAULT, + RPROC_COREDUMP_INLINE, + RPROC_COREDUMP_DISABLED, +}; + /** * struct rproc_dump_segment - segment info from ELF header * @node: list node related to the rproc segment list @@ -471,6 +485,7 @@ struct rproc_dump_segment { * @dev: virtual device for refcounting and common remoteproc behavior * @power: refcount of users who need this rproc powered up * @state: state of the device + * @dump_conf: Currently selected coredump configuration * @lock: lock which protects concurrent manipulations of the rproc * @dbg_dir: debugfs directory of this rproc device * @traces: list of trace buffers @@ -505,6 +520,7 @@ struct rproc { struct device dev; atomic_t power; unsigned int state; + enum rproc_dump_mechanism dump_conf; struct mutex lock; struct dentry *dbg_dir; struct list_head traces; -- cgit v1.2.3 From 53aab92dec447f93489e07924e310d605a389dea Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 21 Jul 2020 10:17:16 -0700 Subject: Input: synaptics-rmi4 - drop a duplicated word Drop the repeated word "to" in a comment. Signed-off-by: Randy Dunlap Link: https://lore.kernel.org/r/20200719003131.21050-1-rdunlap@infradead.org Signed-off-by: Dmitry Torokhov --- include/linux/rmi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rmi.h b/include/linux/rmi.h index 7b22366d0065..8ed37f93f3c8 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -206,7 +206,7 @@ struct rmi_device_platform_data_spi { * * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to - * to re-initialize. You can override the default wait period here. + * re-initialize. You can override the default wait period here. * @irq: irq associated with the attn gpio line, or negative */ struct rmi_device_platform_data { -- cgit v1.2.3 From c1e18d4fb9590268105e724444792656fcb3927d Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Mon, 15 Jun 2020 22:35:21 +0200 Subject: platform/chrome: cros_ec_proto: Do not export cros_ec_cmd_xfer() Now that all the remaining users of cros_ec_cmd_xfer() has been removed, make this function private to the cros_ec_proto module. Signed-off-by: Enric Balletbo i Serra --- drivers/platform/chrome/cros_ec_proto.c | 5 ++--- include/linux/platform_data/cros_ec_proto.h | 3 --- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 3e745e0fe092..11a2db7cd0f7 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -496,8 +496,8 @@ EXPORT_SYMBOL(cros_ec_query_all); * * Return: 0 on success or negative error code. */ -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg) +static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg) { int ret; @@ -541,7 +541,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, return ret; } -EXPORT_SYMBOL(cros_ec_cmd_xfer); /** * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 383243326676..4a415ae851ef 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -216,9 +216,6 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -- cgit v1.2.3 From 5f77d6ca5ca74e4b4a5e2e010f7ff50c45dea326 Mon Sep 17 00:00:00 2001 From: Liu Yi L Date: Fri, 24 Jul 2020 09:49:14 +0800 Subject: iommu/vt-d: Enforce PASID devTLB field mask Set proper masks to avoid invalid input spillover to reserved bits. Signed-off-by: Liu Yi L Signed-off-by: Jacob Pan Signed-off-by: Lu Baolu Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20200724014925.15523-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- include/linux/intel-iommu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 3e8fa1c7a1e6..311117b50e93 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -381,8 +381,8 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ -- cgit v1.2.3 From 78df6c86f0691f5b6e325006aeb470de443351ea Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Fri, 24 Jul 2020 09:49:15 +0800 Subject: iommu/vt-d: Remove global page support in devTLB flush Global pages support is removed from VT-d spec 3.0 for dev TLB invalidation. This patch is to remove the bits for vSVA. Similar change already made for the native SVA. See the link below. Signed-off-by: Jacob Pan Signed-off-by: Lu Baolu Reviewed-by: Eric Auger Link: https://lore.kernel.org/linux-iommu/20190830142919.GE11578@8bytes.org/T/ Link: https://lore.kernel.org/r/20200724014925.15523-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 4 +--- drivers/iommu/intel/iommu.c | 4 ++-- include/linux/intel-iommu.h | 3 +-- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 683b812c5c47..9be08b9400ee 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1438,8 +1438,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, /* PASID-based device IOTLB Invalidate */ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u32 pasid, u16 qdep, u64 addr, - unsigned int size_order, u64 granu) + u32 pasid, u16 qdep, u64 addr, unsigned int size_order) { unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; @@ -1447,7 +1446,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); - desc.qw1 = QI_DEV_EIOTLB_GLOB(granu); /* * If S bit is 0, we only flush a single page. If S bit is set, diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d759e7234e98..bdd1e7d81178 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5474,7 +5474,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, info->pfsid, pasid, info->ats_qdep, inv_info->addr_info.addr, - size, granu); + size); break; case IOMMU_CACHE_INV_TYPE_DEV_IOTLB: if (info->ats_enabled) @@ -5482,7 +5482,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, info->pfsid, pasid, info->ats_qdep, inv_info->addr_info.addr, - size, granu); + size); else pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n"); break; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 311117b50e93..c7a8aae36771 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -381,7 +381,6 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) #define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) @@ -705,7 +704,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, u32 pasid, u16 qdep, u64 addr, - unsigned int size_order, u64 granu); + unsigned int size_order); void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid); -- cgit v1.2.3 From dd6692f1b883bac46036000a1e3a0b3785f89e87 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 24 Jul 2020 09:49:21 +0800 Subject: iommu/vt-d: Refactor device_to_iommu() helper It is refactored in two ways: - Make it global so that it could be used in other files. - Make bus/devfn optional so that callers could ignore these two returned values when they only want to get the coresponding iommu pointer. Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20200724014925.15523-9-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 55 +++++++++++++-------------------------------- drivers/iommu/intel/svm.c | 8 +++---- include/linux/intel-iommu.h | 3 +-- 3 files changed, 21 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 0a4831532442..ce8458e8119c 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -778,16 +778,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge) return false; } -static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; + struct pci_dev *pdev = NULL; struct intel_iommu *iommu; struct device *tmp; - struct pci_dev *pdev = NULL; u16 segment = 0; int i; - if (iommu_dummy(dev)) + if (!dev || iommu_dummy(dev)) return NULL; if (dev_is_pci(dev)) { @@ -818,8 +818,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (pdev && pdev->is_virtfn) goto got_pdev; - *bus = drhd->devices[i].bus; - *devfn = drhd->devices[i].devfn; + if (bus && devfn) { + *bus = drhd->devices[i].bus; + *devfn = drhd->devices[i].devfn; + } goto out; } @@ -829,8 +831,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (pdev && drhd->include_all) { got_pdev: - *bus = pdev->bus->number; - *devfn = pdev->devfn; + if (bus && devfn) { + *bus = pdev->bus->number; + *devfn = pdev->devfn; + } goto out; } } @@ -5146,11 +5150,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain, struct device *dev) { int ret; - u8 bus, devfn; unsigned long flags; struct intel_iommu *iommu; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return -ENODEV; @@ -5236,9 +5239,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu; int addr_width; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return -ENODEV; @@ -5668,9 +5670,8 @@ static bool intel_iommu_capable(enum iommu_cap cap) static struct iommu_device *intel_iommu_probe_device(struct device *dev) { struct intel_iommu *iommu; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return ERR_PTR(-ENODEV); @@ -5683,9 +5684,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) static void intel_iommu_release_device(struct device *dev) { struct intel_iommu *iommu; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return; @@ -5835,37 +5835,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) return generic_device_group(dev); } -#ifdef CONFIG_INTEL_IOMMU_SVM -struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) -{ - struct intel_iommu *iommu; - u8 bus, devfn; - - if (iommu_dummy(dev)) { - dev_warn(dev, - "No IOMMU translation for device; cannot enable SVM\n"); - return NULL; - } - - iommu = device_to_iommu(dev, &bus, &devfn); - if ((!iommu)) { - dev_err(dev, "No IOMMU for device; cannot enable SVM\n"); - return NULL; - } - - return iommu; -} -#endif /* CONFIG_INTEL_IOMMU_SVM */ - static int intel_iommu_enable_auxd(struct device *dev) { struct device_domain_info *info; struct intel_iommu *iommu; unsigned long flags; - u8 bus, devfn; int ret; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu || dmar_disabled) return -EINVAL; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index d386853121a2..65d2327dcd0d 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -231,7 +231,7 @@ static LIST_HEAD(global_svm_list); int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, struct iommu_gpasid_bind_data *data) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct dmar_domain *dmar_domain; struct intel_svm_dev *sdev; struct intel_svm *svm; @@ -369,7 +369,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, int intel_svm_unbind_gpasid(struct device *dev, int pasid) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct intel_svm_dev *sdev; struct intel_svm *svm; int ret = -EINVAL; @@ -426,7 +426,7 @@ static int intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops, struct mm_struct *mm, struct intel_svm_dev **sd) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct device_domain_info *info; struct intel_svm_dev *sdev; struct intel_svm *svm = NULL; @@ -604,7 +604,7 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid) struct intel_svm *svm; int ret = -EINVAL; - iommu = intel_svm_device_to_iommu(dev); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) goto out; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index c7a8aae36771..a57ffbcc84c7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -727,6 +727,7 @@ void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct dmar_domain *find_domain(struct device *dev); struct device_domain_info *get_domain_info(struct device *dev); +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM extern void intel_svm_check(struct intel_iommu *iommu); @@ -765,8 +766,6 @@ struct intel_svm { struct list_head devs; struct list_head list; }; - -extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); #else static inline void intel_svm_check(struct intel_iommu *iommu) {} #endif -- cgit v1.2.3 From 8b73712115ebd603b75876f7d96d59e41d9107ad Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 24 Jul 2020 09:49:24 +0800 Subject: iommu/vt-d: Add page response ops support After page requests are handled, software must respond to the device which raised the page request with the result. This is done through the iommu ops.page_response if the request was reported to outside of vendor iommu driver through iommu_report_device_fault(). This adds the VT-d implementation of page_response ops. Co-developed-by: Jacob Pan Co-developed-by: Liu Yi L Signed-off-by: Jacob Pan Signed-off-by: Liu Yi L Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20200724014925.15523-12-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 1 + drivers/iommu/intel/svm.c | 99 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/intel-iommu.h | 3 ++ 3 files changed, 103 insertions(+) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ce8458e8119c..1cb3a2a050c3 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -6067,6 +6067,7 @@ const struct iommu_ops intel_iommu_ops = { .sva_bind = intel_svm_bind, .sva_unbind = intel_svm_unbind, .sva_get_pasid = intel_svm_get_pasid, + .page_response = intel_svm_page_response, #endif }; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 140114ab1375..85ce8daa3177 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -1078,3 +1078,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva) return pasid; } + +int intel_svm_page_response(struct device *dev, + struct iommu_fault_event *evt, + struct iommu_page_response *msg) +{ + struct iommu_fault_page_request *prm; + struct intel_svm_dev *sdev = NULL; + struct intel_svm *svm = NULL; + struct intel_iommu *iommu; + bool private_present; + bool pasid_present; + bool last_page; + u8 bus, devfn; + int ret = 0; + u16 sid; + + if (!dev || !dev_is_pci(dev)) + return -ENODEV; + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return -ENODEV; + + if (!msg || !evt) + return -EINVAL; + + mutex_lock(&pasid_mutex); + + prm = &evt->fault.prm; + sid = PCI_DEVID(bus, devfn); + pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; + last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + + if (!pasid_present) { + ret = -EINVAL; + goto out; + } + + if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { + ret = -EINVAL; + goto out; + } + + ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev); + if (ret || !sdev) { + ret = -ENODEV; + goto out; + } + + /* + * For responses from userspace, need to make sure that the + * pasid has been bound to its mm. + */ + if (svm->flags & SVM_FLAG_GUEST_MODE) { + struct mm_struct *mm; + + mm = get_task_mm(current); + if (!mm) { + ret = -EINVAL; + goto out; + } + + if (mm != svm->mm) { + ret = -ENODEV; + mmput(mm); + goto out; + } + + mmput(mm); + } + + /* + * Per VT-d spec. v3.0 ch7.7, system software must respond + * with page group response if private data is present (PDP) + * or last page in group (LPIG) bit is set. This is an + * additional VT-d requirement beyond PCI ATS spec. + */ + if (last_page || private_present) { + struct qi_desc desc; + + desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | + QI_PGRP_PASID_P(pasid_present) | + QI_PGRP_PDP(private_present) | + QI_PGRP_RESP_CODE(msg->code) | + QI_PGRP_RESP_TYPE; + desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); + desc.qw2 = 0; + desc.qw3 = 0; + if (private_present) + memcpy(&desc.qw2, prm->private_data, + sizeof(prm->private_data)); + + qi_submit_sync(iommu, &desc, 1, 0); + } +out: + mutex_unlock(&pasid_mutex); + return ret; +} diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index a57ffbcc84c7..9ff5e340948b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -740,6 +740,9 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata); void intel_svm_unbind(struct iommu_sva *handle); int intel_svm_get_pasid(struct iommu_sva *handle); +int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, + struct iommu_page_response *msg); + struct svm_dev_ops; struct intel_svm_dev { -- cgit v1.2.3 From 75c88143f3b879664cc5bf68b91854c1a98f5e5b Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 22 Jul 2020 10:38:20 +0300 Subject: clk: at91: clk-master: add master clock support for SAMA7G5 Add master clock support (MCK1..4) for SAMA7G5. SAMA7G5's PMC has multiple master clocks feeding different subsystems. One of them feeds image subsystem and is changeable based on image subsystem needs. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/1595403506-8209-13-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/clk-master.c | 310 +++++++++++++++++++++++++++++++++++++++++- drivers/clk/at91/pmc.h | 7 + include/linux/clk/at91_pmc.h | 1 + 3 files changed, 313 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index 88d545b1698c..bd0d8a69a2cf 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -17,30 +17,49 @@ #define MASTER_DIV_SHIFT 8 #define MASTER_DIV_MASK 0x3 +#define PMC_MCR 0x30 +#define PMC_MCR_ID_MSK GENMASK(3, 0) +#define PMC_MCR_CMD BIT(7) +#define PMC_MCR_DIV GENMASK(10, 8) +#define PMC_MCR_CSS GENMASK(20, 16) +#define PMC_MCR_CSS_SHIFT (16) +#define PMC_MCR_EN BIT(28) + +#define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK) + +#define MASTER_MAX_ID 4 + #define to_clk_master(hw) container_of(hw, struct clk_master, hw) struct clk_master { struct clk_hw hw; struct regmap *regmap; + spinlock_t *lock; const struct clk_master_layout *layout; const struct clk_master_characteristics *characteristics; + u32 *mux_table; u32 mckr; + int chg_pid; + u8 id; + u8 parent; + u8 div; }; -static inline bool clk_master_ready(struct regmap *regmap) +static inline bool clk_master_ready(struct clk_master *master) { + unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY; unsigned int status; - regmap_read(regmap, AT91_PMC_SR, &status); + regmap_read(master->regmap, AT91_PMC_SR, &status); - return !!(status & AT91_PMC_MCKRDY); + return !!(status & bit); } static int clk_master_prepare(struct clk_hw *hw) { struct clk_master *master = to_clk_master(hw); - while (!clk_master_ready(master->regmap)) + while (!clk_master_ready(master)) cpu_relax(); return 0; @@ -50,7 +69,7 @@ static int clk_master_is_prepared(struct clk_hw *hw) { struct clk_master *master = to_clk_master(hw); - return clk_master_ready(master->regmap); + return clk_master_ready(master); } static unsigned long clk_master_recalc_rate(struct clk_hw *hw, @@ -143,6 +162,287 @@ at91_clk_register_master(struct regmap *regmap, return hw; } +static unsigned long +clk_sama7g5_master_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_master *master = to_clk_master(hw); + + return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div)); +} + +static void clk_sama7g5_master_best_diff(struct clk_rate_request *req, + struct clk_hw *parent, + unsigned long parent_rate, + long *best_rate, + long *best_diff, + u32 div) +{ + unsigned long tmp_rate, tmp_diff; + + if (div == MASTER_PRES_MAX) + tmp_rate = parent_rate / 3; + else + tmp_rate = parent_rate >> div; + + tmp_diff = abs(req->rate - tmp_rate); + + if (*best_diff < 0 || *best_diff >= tmp_diff) { + *best_rate = tmp_rate; + *best_diff = tmp_diff; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } +} + +static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_master *master = to_clk_master(hw); + struct clk_rate_request req_parent = *req; + struct clk_hw *parent; + long best_rate = LONG_MIN, best_diff = LONG_MIN; + unsigned long parent_rate; + unsigned int div, i; + + /* First: check the dividers of MCR. */ + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + parent = clk_hw_get_parent_by_index(hw, i); + if (!parent) + continue; + + parent_rate = clk_hw_get_rate(parent); + if (!parent_rate) + continue; + + for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + clk_sama7g5_master_best_diff(req, parent, parent_rate, + &best_rate, &best_diff, + div); + if (!best_diff) + break; + } + + if (!best_diff) + break; + } + + /* Second: try to request rate form changeable parent. */ + if (master->chg_pid < 0) + goto end; + + parent = clk_hw_get_parent_by_index(hw, master->chg_pid); + if (!parent) + goto end; + + for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + if (div == MASTER_PRES_MAX) + req_parent.rate = req->rate * 3; + else + req_parent.rate = req->rate << div; + + if (__clk_determine_rate(parent, &req_parent)) + continue; + + clk_sama7g5_master_best_diff(req, parent, req_parent.rate, + &best_rate, &best_diff, div); + + if (!best_diff) + break; + } + +end: + pr_debug("MCK: %s, best_rate = %ld, parent clk: %s @ %ld\n", + __func__, best_rate, + __clk_get_name((req->best_parent_hw)->clk), + req->best_parent_rate); + + if (best_rate < 0) + return -EINVAL; + + req->rate = best_rate; + + return 0; +} + +static u8 clk_sama7g5_master_get_parent(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + u8 index; + + spin_lock_irqsave(master->lock, flags); + index = clk_mux_val_to_index(&master->hw, master->mux_table, 0, + master->parent); + spin_unlock_irqrestore(master->lock, flags); + + return index; +} + +static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + + if (index >= clk_hw_get_num_parents(hw)) + return -EINVAL; + + spin_lock_irqsave(master->lock, flags); + master->parent = clk_mux_index_to_val(master->mux_table, 0, index); + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static int clk_sama7g5_master_enable(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + unsigned int val, cparent; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id)); + regmap_read(master->regmap, PMC_MCR, &val); + regmap_update_bits(master->regmap, PMC_MCR, + PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV | + PMC_MCR_CMD | PMC_MCR_ID_MSK, + PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) | + (master->div << MASTER_DIV_SHIFT) | + PMC_MCR_CMD | PMC_MCR_ID(master->id)); + + cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; + + /* Wait here only if parent is being changed. */ + while ((cparent != master->parent) && !clk_master_ready(master)) + cpu_relax(); + + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static void clk_sama7g5_master_disable(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_update_bits(master->regmap, PMC_MCR, + PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK, + PMC_MCR_CMD | PMC_MCR_ID(master->id)); + + spin_unlock_irqrestore(master->lock, flags); +} + +static int clk_sama7g5_master_is_enabled(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_read(master->regmap, PMC_MCR, &val); + + spin_unlock_irqrestore(master->lock, flags); + + return !!(val & PMC_MCR_EN); +} + +static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long div, flags; + + div = DIV_ROUND_CLOSEST(parent_rate, rate); + if ((div > (1 << (MASTER_PRES_MAX - 1))) || (div & (div - 1))) + return -EINVAL; + + if (div == 3) + div = MASTER_PRES_MAX; + else + div = ffs(div) - 1; + + spin_lock_irqsave(master->lock, flags); + master->div = div; + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static const struct clk_ops sama7g5_master_ops = { + .enable = clk_sama7g5_master_enable, + .disable = clk_sama7g5_master_disable, + .is_enabled = clk_sama7g5_master_is_enabled, + .recalc_rate = clk_sama7g5_master_recalc_rate, + .determine_rate = clk_sama7g5_master_determine_rate, + .set_rate = clk_sama7g5_master_set_rate, + .get_parent = clk_sama7g5_master_get_parent, + .set_parent = clk_sama7g5_master_set_parent, +}; + +struct clk_hw * __init +at91_clk_sama7g5_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, + u32 *mux_table, + spinlock_t *lock, u8 id, + bool critical, int chg_pid) +{ + struct clk_master *master; + struct clk_hw *hw; + struct clk_init_data init; + unsigned long flags; + unsigned int val; + int ret; + + if (!name || !num_parents || !parent_names || !mux_table || + !lock || id > MASTER_MAX_ID) + return ERR_PTR(-EINVAL); + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sama7g5_master_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + if (chg_pid >= 0) + init.flags |= CLK_SET_RATE_PARENT; + if (critical) + init.flags |= CLK_IS_CRITICAL; + + master->hw.init = &init; + master->regmap = regmap; + master->id = id; + master->chg_pid = chg_pid; + master->lock = lock; + master->mux_table = mux_table; + + spin_lock_irqsave(master->lock, flags); + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_read(master->regmap, PMC_MCR, &val); + master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; + master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT; + spin_unlock_irqrestore(master->lock, flags); + + hw = &master->hw; + ret = clk_hw_register(NULL, &master->hw); + if (ret) { + kfree(master); + hw = ERR_PTR(ret); + } + + return hw; +} + const struct clk_master_layout at91rm9200_master_layout = { .mask = 0x31F, .pres_shift = 2, diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 2bfe1405f9f8..29d150feaa46 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -154,6 +154,13 @@ at91_clk_register_master(struct regmap *regmap, const char *name, const struct clk_master_layout *layout, const struct clk_master_characteristics *characteristics); +struct clk_hw * __init +at91_clk_sama7g5_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, u32 *mux_table, + spinlock_t *lock, u8 id, bool critical, + int chg_pid); + struct clk_hw * __init at91_clk_register_peripheral(struct regmap *regmap, const char *name, const char *parent_name, u32 id); diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 49a53a137610..77d6dabc4c3c 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -174,6 +174,7 @@ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ +#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ -- cgit v1.2.3 From 0416824edca1cdcb6e00e6f909423bf0fc529eef Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 22 Jul 2020 10:38:23 +0300 Subject: clk: at91: add macro for pll ids mask Add macro for PLL IDs mask. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/1595403506-8209-16-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- include/linux/clk/at91_pmc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 77d6dabc4c3c..dc5e85f124e0 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -59,6 +59,7 @@ #define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ #define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ #define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ +#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */ #define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ -- cgit v1.2.3 From ef396df99251b848596c717b63ff4fe74a941193 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 22 Jul 2020 10:38:25 +0300 Subject: clk: at91: clk-utmi: add utmi support for sama7g5 Add UTMI support for SAMA7G5. SAMA7G5's UTMI control is done via XTALF register. Values written at bits 2..0 in this register correspond to the on board crystal oscillator frequency. Signed-off-by: Claudiu Beznea Link: https://lore.kernel.org/r/1595403506-8209-18-git-send-email-claudiu.beznea@microchip.com Signed-off-by: Stephen Boyd --- drivers/clk/at91/clk-utmi.c | 103 ++++++++++++++++++++++++++++++++++++++++--- drivers/clk/at91/pmc.h | 4 ++ include/linux/clk/at91_pmc.h | 2 + 3 files changed, 104 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c index f1ef4e1f41a9..df9f3fc3b6a6 100644 --- a/drivers/clk/at91/clk-utmi.c +++ b/drivers/clk/at91/clk-utmi.c @@ -120,9 +120,11 @@ static const struct clk_ops utmi_ops = { .recalc_rate = clk_utmi_recalc_rate, }; -struct clk_hw * __init -at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, - const char *name, const char *parent_name) +static struct clk_hw * __init +at91_clk_register_utmi_internal(struct regmap *regmap_pmc, + struct regmap *regmap_sfr, + const char *name, const char *parent_name, + const struct clk_ops *ops, unsigned long flags) { struct clk_utmi *utmi; struct clk_hw *hw; @@ -134,10 +136,10 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &utmi_ops; + init.ops = ops; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; - init.flags = CLK_SET_RATE_GATE; + init.flags = flags; utmi->hw.init = &init; utmi->regmap_pmc = regmap_pmc; @@ -152,3 +154,94 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return hw; } + +struct clk_hw * __init +at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, + const char *name, const char *parent_name) +{ + return at91_clk_register_utmi_internal(regmap_pmc, regmap_sfr, name, + parent_name, &utmi_ops, CLK_SET_RATE_GATE); +} + +static int clk_utmi_sama7g5_prepare(struct clk_hw *hw) +{ + struct clk_utmi *utmi = to_clk_utmi(hw); + struct clk_hw *hw_parent; + unsigned long parent_rate; + unsigned int val; + + hw_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(hw_parent); + + switch (parent_rate) { + case 16000000: + val = 0; + break; + case 20000000: + val = 2; + break; + case 24000000: + val = 3; + break; + case 32000000: + val = 5; + break; + default: + pr_err("UTMICK: unsupported main_xtal rate\n"); + return -EINVAL; + } + + regmap_write(utmi->regmap_pmc, AT91_PMC_XTALF, val); + + return 0; + +} + +static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw) +{ + struct clk_utmi *utmi = to_clk_utmi(hw); + struct clk_hw *hw_parent; + unsigned long parent_rate; + unsigned int val; + + hw_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(hw_parent); + + regmap_read(utmi->regmap_pmc, AT91_PMC_XTALF, &val); + switch (val & 0x7) { + case 0: + if (parent_rate == 16000000) + return 1; + break; + case 2: + if (parent_rate == 20000000) + return 1; + break; + case 3: + if (parent_rate == 24000000) + return 1; + break; + case 5: + if (parent_rate == 32000000) + return 1; + break; + default: + break; + } + + return 0; +} + +static const struct clk_ops sama7g5_utmi_ops = { + .prepare = clk_utmi_sama7g5_prepare, + .is_prepared = clk_utmi_sama7g5_is_prepared, + .recalc_rate = clk_utmi_recalc_rate, +}; + +struct clk_hw * __init +at91_clk_sama7g5_register_utmi(struct regmap *regmap_pmc, const char *name, + const char *parent_name) +{ + return at91_clk_register_utmi_internal(regmap_pmc, NULL, name, + parent_name, &sama7g5_utmi_ops, 0); +} diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 6340b9be8205..7b86affc6d7c 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -236,6 +236,10 @@ struct clk_hw * __init at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, const char *name, const char *parent_name); +struct clk_hw * __init +at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name, + const char *parent_name); + #ifdef CONFIG_PM void pmc_register_id(u8 id); void pmc_register_pck(u8 pck); diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index dc5e85f124e0..a4f82e836a7c 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -137,6 +137,8 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) +#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ + #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ #define AT91_PMC_USBS_PLLA (0 << 0) -- cgit v1.2.3 From f34ce7a7018c2f71d78fc7f512f6daf01e487114 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 12 Jun 2020 11:39:55 +0800 Subject: iommu: Add gfp parameter to io_pgtable_ops->map() Now the ARM page tables are always allocated by GFP_ATOMIC parameter, but the iommu_ops->map() function has been added a gfp_t parameter by commit 781ca2de89ba ("iommu: Add gfp parameter to iommu_ops::map"), thus io_pgtable_ops->map() should use the gfp parameter passed from iommu_ops->map() to allocate page pages, which can avoid wasting the memory allocators atomic pools for some non-atomic contexts. Signed-off-by: Baolin Wang Acked-by: Will Deacon Link: https://lore.kernel.org/r/3093df4cb95497aaf713fca623ce4ecebb197c2e.1591930156.git.baolin.wang@linux.alibaba.com Signed-off-by: Joerg Roedel --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/io-pgtable-arm-v7s.c | 18 +++++++++--------- drivers/iommu/io-pgtable-arm.c | 18 +++++++++--------- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 2 +- include/linux/io-pgtable.h | 2 +- 10 files changed, 26 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index ed28aeba6d59..5a39eee8cf83 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, prot); + ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL); iova += pgsize; paddr += pgsize; len -= pgsize; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index f578677a5c41..7b59f06b3913 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, if (!ops) return -ENODEV; - return ops->map(ops, iova, paddr, size, prot); + return ops->map(ops, iova, paddr, size, prot, gfp); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 243bc4cb2705..dc1d2535798a 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1227,7 +1227,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; arm_smmu_rpm_get(smmu); - ret = ops->map(ops, iova, paddr, size, prot); + ret = ops->map(ops, iova, paddr, size, prot, gfp); arm_smmu_rpm_put(smmu); return ret; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 4272fe4e17f4..a688f22cbe3b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, int prot, - int lvl, arm_v7s_iopte *ptep) + int lvl, arm_v7s_iopte *ptep, gfp_t gfp) { struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_v7s_iopte pte, *cptep; @@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); + cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data); if (!cptep) return -ENOMEM; @@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable *iop = &data->iop; @@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; - ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void) if (ops->map(ops, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | - IOMMU_CACHE)) + IOMMU_CACHE, GFP_KERNEL)) return __FAIL(ops); /* Overlapping mappings */ if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); /* Remap of partial unmap */ - if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) + if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova_start + size + 42) @@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 04fbd4bf0ff9..4a5a7b083a9b 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + int lvl, arm_lpae_iopte *ptep, gfp_t gfp) { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); @@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); + cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); if (!cptep) return -ENOMEM; @@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, @@ -461,7 +461,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, } static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int iommu_prot) + phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; @@ -483,7 +483,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return -ERANGE; prot = arm_lpae_prot_to_pte(data, iommu_prot); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); + ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -1178,12 +1178,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) if (ops->map(ops, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | - IOMMU_CACHE)) + IOMMU_CACHE, GFP_KERNEL)) return __FAIL(ops, i); /* Overlapping mappings */ if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -1198,7 +1198,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) return __FAIL(ops, i); /* Remap of partial unmap */ - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) + if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) @@ -1216,7 +1216,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) return __FAIL(ops, i); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 4c2972f3153b..87475b2f7ef1 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -687,7 +687,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, if (!domain) return -ENODEV; - return domain->iop->map(domain->iop, iova, paddr, size, prot); + return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index f773cc85f311..3615cd6241c4 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&priv->pgtlock, flags); - ret = priv->iop->map(priv->iop, iova, pa, len, prot); + ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); spin_unlock_irqrestore(&priv->pgtlock, flags); return ret; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 2be96f1cdbd2..b7b16414a217 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -397,7 +397,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, paddr |= BIT_ULL(32); /* Synchronize with the tlb_lock */ - return dom->iop->map(dom->iop, iova, paddr, size, prot); + return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); } static size_t mtk_iommu_unmap(struct iommu_domain *domain, diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c3e1fbd1988c..cfcfd7553b30 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -441,7 +441,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); + ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); return ret; } diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 53d53c6c2be9..23285ba645db 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -155,7 +155,7 @@ struct io_pgtable_cfg { */ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, -- cgit v1.2.3 From b1012ca8dc4f9b1a1fe8e2cb1590dd6d43ea3849 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 23 Jul 2020 09:34:37 +0800 Subject: iommu/vt-d: Skip TE disabling on quirky gfx dedicated iommu The VT-d spec requires (10.4.4 Global Command Register, TE field) that: Hardware implementations supporting DMA draining must drain any in-flight DMA read/write requests queued within the Root-Complex before completing the translation enable command and reflecting the status of the command through the TES field in the Global Status register. Unfortunately, some integrated graphic devices fail to do so after some kind of power state transition. As the result, the system might stuck in iommu_disable_translation(), waiting for the completion of TE transition. This provides a quirk list for those devices and skips TE disabling if the qurik hits. Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=208363 Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=206571 Signed-off-by: Lu Baolu Tested-by: Koba Ko Tested-by: Jun Miao Cc: Ashok Raj Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200723013437.2268-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 1 + drivers/iommu/intel/iommu.c | 27 +++++++++++++++++++++++++++ include/linux/dmar.h | 1 + include/linux/intel-iommu.h | 2 ++ 4 files changed, 31 insertions(+) (limited to 'include/linux') diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index f6cbe3f95c8d..93e6345f3414 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) } drhd->iommu = iommu; + iommu->drhd = drhd; return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b3faf99457a6..0c2d582ff8cd 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -356,6 +356,7 @@ static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int intel_no_bounce; +static int iommu_skip_te_disable; #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 @@ -1633,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu) u32 sts; unsigned long flag; + if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && + (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flag); iommu->gcmd &= ~DMA_GCMD_TE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); @@ -4043,6 +4048,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ + drhd->gfx_dedicated = 1; if (!dmar_map_gfx) { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, @@ -6170,6 +6176,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); +static void quirk_igfx_skip_te_disable(struct pci_dev *dev) +{ + unsigned short ver; + + if (!IS_GFX_DEVICE(dev)) + return; + + ver = (dev->device >> 8) & 0xff; + if (ver != 0x45 && ver != 0x46 && ver != 0x4c && + ver != 0x4e && ver != 0x8a && ver != 0x98 && + ver != 0x9a) + return; + + if (risky_device(dev)) + return; + + pci_info(dev, "Skip IOMMU disabling for graphics\n"); + iommu_skip_te_disable = 1; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable); + /* On Tylersburg chipsets, some BIOSes have been known to enable the ISOCH DMAR unit for the Azalia sound device, but not give it any TLB entries, which causes it to deadlock. Check for that. We do diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7bf029df737..65565820328a 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -48,6 +48,7 @@ struct dmar_drhd_unit { u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; + u8 gfx_dedicated:1; /* graphic dedicated */ struct intel_iommu *iommu; }; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 9ff5e340948b..b1ed2f25f7c0 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -599,6 +599,8 @@ struct intel_iommu { struct iommu_device iommu; /* IOMMU core code handle */ int node; u32 flags; /* Software defined flags */ + + struct dmar_drhd_unit *drhd; }; /* PCI domain-device relationship */ -- cgit v1.2.3 From 0c2a34937f7e4c4776bb261114c475392da2355c Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 30 Jun 2020 18:24:40 +0200 Subject: i2c: revert "i2c: core: Allow drivers to disable i2c-core irq mapping" This manually reverts commit d1d84bb95364ed604015c2b788caaf3dbca0262f. The only user has gone two years ago with commit 589edb56b424 ("ACPI / scan: Create platform device for INT33FE ACPI nodes") and no new user has showed up. Remove and hope we will never need it again. Signed-off-by: Wolfram Sang Acked-by: Hans de Goede Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 6 +++--- include/linux/i2c.h | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 26f03a14a478..dc43242a85ba 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -319,11 +319,9 @@ static int i2c_device_probe(struct device *dev) if (!client) return 0; - driver = to_i2c_driver(dev->driver); - client->irq = client->init_irq; - if (!client->irq && !driver->disable_i2c_core_irq_mapping) { + if (!client->irq) { int irq = -ENOENT; if (client->flags & I2C_CLIENT_HOST_NOTIFY) { @@ -349,6 +347,8 @@ static int i2c_device_probe(struct device *dev) client->irq = irq; } + driver = to_i2c_driver(dev->driver); + /* * An I2C ID table is not mandatory, if and only if, a suitable OF * or ACPI ID table is supplied for the probing device. diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b8b8963f8bb9..098405df431f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -231,7 +231,6 @@ enum i2c_alert_protocol { * @detect: Callback for device detection * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) - * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver. @@ -290,8 +289,6 @@ struct i2c_driver { int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; - - bool disable_i2c_core_irq_mapping; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) -- cgit v1.2.3 From 92fe2aa859f52ce6aa595ca97fec110dc7100e63 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:07:30 -0700 Subject: libnvdimm: Validate command family indices The ND_CMD_CALL format allows for a general passthrough of passlisted commands targeting a given command set. However there is no validation of the family index relative to what the bus supports. - Update the NFIT bus implementation (the only one that supports ND_CMD_CALL passthrough) to also passlist the valid set of command family indices. - Update the generic __nd_ioctl() path to validate that field on behalf of all implementations. Fixes: 31eca76ba2fc ("nfit, libnvdimm: limited/whitelisted dimm command marshaling mechanism") Cc: Vishal Verma Cc: Dave Jiang Cc: Ira Weiny Cc: "Rafael J. Wysocki" Cc: Len Brown Cc: Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- drivers/acpi/nfit/core.c | 11 +++++++++-- drivers/acpi/nfit/nfit.h | 1 - drivers/nvdimm/bus.c | 16 ++++++++++++++++ include/linux/libnvdimm.h | 2 ++ include/uapi/linux/ndctl.h | 4 ++++ 5 files changed, 31 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7c138a4edc03..1f72ce1a782b 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1823,6 +1823,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem) static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle) { + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; unsigned long dsm_mask, label_mask; @@ -1834,6 +1835,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; nfit_mem->family = NVDIMM_FAMILY_INTEL; + set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", @@ -1886,10 +1888,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, * Note, that checking for function0 (bit0) tells us if any commands * are reachable through this GUID. */ + clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) - if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) { + set_bit(i, &nd_desc->dimm_family_mask); if (family < 0 || i == default_dsm_family) family = i; + } /* limit the supported commands to those that are publicly documented */ nfit_mem->family = family; @@ -2153,6 +2158,9 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; + set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); + set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); + adev = to_acpi_dev(acpi_desc); if (!adev) return; @@ -2160,7 +2168,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &nd_desc->cmd_mask); - set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); dsm_mask = (1 << ND_CMD_ARS_CAP) | diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index f5525f8bb770..5c5e7ebba8dc 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -33,7 +33,6 @@ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) -#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV #define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 09087c38fabd..955265656b96 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -1037,9 +1037,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, dimm_name = "bus"; } + /* Validate command family support against bus declared support */ if (cmd == ND_CMD_CALL) { + unsigned long *mask; + if (copy_from_user(&pkg, p, sizeof(pkg))) return -EFAULT; + + if (nvdimm) { + if (pkg.nd_family > NVDIMM_FAMILY_MAX) + return -EINVAL; + mask = &nd_desc->dimm_family_mask; + } else { + if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX) + return -EINVAL; + mask = &nd_desc->bus_family_mask; + } + + if (!test_bit(pkg.nd_family, mask)) + return -EINVAL; } if (!desc || diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 18da4059be09..bd39a2cf7972 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -78,6 +78,8 @@ struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long bus_dsm_mask; unsigned long cmd_mask; + unsigned long dimm_family_mask; + unsigned long bus_family_mask; struct module *module; char *provider_name; struct device_node *of_node; diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 0e09dc5cec19..e9468b9332bd 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -245,6 +245,10 @@ struct nd_cmd_pkg { #define NVDIMM_FAMILY_MSFT 3 #define NVDIMM_FAMILY_HYPERV 4 #define NVDIMM_FAMILY_PAPR 5 +#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR + +#define NVDIMM_BUS_FAMILY_NFIT 0 +#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_NFIT #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ struct nd_cmd_pkg) -- cgit v1.2.3 From d46e6a2176f8edf7030db34aeb54a4f016fabe0a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:07:35 -0700 Subject: ACPI: NFIT: Move bus_dsm_mask out of generic nvdimm_bus_descriptor DSMs are strictly an ACPI mechanism, evict the bus_dsm_mask concept from the generic 'struct nvdimm_bus_descriptor' object. As a side effect the test facility ->bus_nfit_cmd_force_en is no longer necessary. The test infrastructure can communicate that information directly in ->bus_dsm_mask. Cc: Vishal Verma Cc: Dave Jiang Cc: Ira Weiny Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- drivers/acpi/nfit/core.c | 8 ++++---- drivers/acpi/nfit/nfit.h | 2 +- include/linux/libnvdimm.h | 1 - tools/testing/nvdimm/test/nfit.c | 16 ++++++++-------- 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 1f72ce1a782b..9fdd655bdf0e 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -478,7 +478,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = nd_desc->bus_dsm_mask; + dsm_mask = acpi_desc->bus_dsm_mask; desc = nd_cmd_bus_desc(cmd); guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; @@ -1238,8 +1238,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev, { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); + return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); } static struct device_attribute dev_attr_bus_dsm_mask = __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); @@ -2157,7 +2158,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) int i; nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; - nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); @@ -2180,7 +2180,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) (1 << NFIT_CMD_ARS_INJECT_GET); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) - set_bit(i, &nd_desc->bus_dsm_mask); + set_bit(i, &acpi_desc->bus_dsm_mask); } static ssize_t range_index_show(struct device *dev, diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 5c5e7ebba8dc..da097149d94d 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -237,7 +237,7 @@ struct acpi_nfit_desc { unsigned long scrub_flags; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; - unsigned long bus_nfit_cmd_force_en; + unsigned long bus_dsm_mask; unsigned int platform_cap; unsigned int scrub_tmo; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index bd39a2cf7972..ad9898ece7d3 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -76,7 +76,6 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long bus_dsm_mask; unsigned long cmd_mask; unsigned long dimm_family_mask; unsigned long bus_family_mask; diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index a8ee5c4d41eb..a59174ba1d2a 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -2507,10 +2507,10 @@ static void nfit_test0_setup(struct nfit_test *t) set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en); - set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en); - set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en); - set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en); - set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en); + set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_dsm_mask); + set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_dsm_mask); + set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_dsm_mask); + set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_dsm_mask); set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en); set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en); set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en); @@ -2731,11 +2731,11 @@ static int nfit_ctl_test(struct device *dev) .module = THIS_MODULE, .provider_name = "ACPI.NFIT", .ndctl = acpi_nfit_ctl, - .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA - | 1UL << NFIT_CMD_ARS_INJECT_SET - | 1UL << NFIT_CMD_ARS_INJECT_CLEAR - | 1UL << NFIT_CMD_ARS_INJECT_GET, }, + .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA + | 1UL << NFIT_CMD_ARS_INJECT_SET + | 1UL << NFIT_CMD_ARS_INJECT_CLEAR + | 1UL << NFIT_CMD_ARS_INJECT_GET, .dev = &adev->dev, }; -- cgit v1.2.3 From 60d360acddc54344409a710af07c561e025f13f5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:08:07 -0700 Subject: driver-core: Introduce DEVICE_ATTR_ADMIN_{RO,RW} A common pattern for using plain DEVICE_ATTR() instead of DEVICE_ATTR_RO() and DEVICE_ATTR_RW() is for attributes that want to limit read to only root. I.e. many users of DEVICE_ATTR() are specifying 0400 or 0600 for permissions. Given the expectation that CAP_SYS_ADMIN is needed to access these sensitive attributes add an explicit helper with the _ADMIN_ identifier for DEVICE_ATTR_ADMIN_{RO,RW}. Cc: "Rafael J. Wysocki" Reviewed-by: Greg Kroah-Hartman Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- include/linux/device.h | 4 ++++ include/linux/sysfs.h | 7 +++++++ 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024..d7c2570368fa 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -128,8 +128,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_ADMIN_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#define DEVICE_ATTR_ADMIN_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 86067dbe7745..34e84122f635 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -123,6 +123,13 @@ struct attribute_group { .show = _name##_show, \ } +#define __ATTR_RW_MODE(_name, _mode) { \ + .attr = { .name = __stringify(_name), \ + .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ + .show = _name##_show, \ + .store = _name##_store, \ +} + #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ -- cgit v1.2.3 From 8b7beaf9f185249f29912b5e2d7bc4147c5c2a6a Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 27 Jul 2020 13:43:39 -0600 Subject: PCI: Add Intel QuickAssist device IDs Add device IDs for the following Intel QuickAssist devices: DH895XCC, C3XXX and C62X. The defines in this patch are going to be referenced in two independent drivers, qat and vfio-pci. Signed-off-by: Giovanni Cabiddu Acked-by: Bjorn Helgaas Reviewed-by: Fiona Trahe Reviewed-by: Andy Shevchenko Signed-off-by: Alex Williamson --- include/linux/pci_ids.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0ad57693f392..f3166b1425ca 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2659,6 +2659,8 @@ #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 @@ -2708,6 +2710,8 @@ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 @@ -2924,6 +2928,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8 +#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 -- cgit v1.2.3 From 2f3ee5e481ce850b5b51a306b01a5e9187b206ae Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 24 Jul 2020 13:11:42 -0500 Subject: remoteproc: kill IPA notify code The IPA code now uses the generic remoteproc SSR notification mechanism. This makes the original IPA notification code unused and unnecessary, so get rid of it. This is effectively a revert of commit d7f5f3c89c1a ("remoteproc: add IPA notification to q6v5 driver"). Reviewed-by: Bjorn Andersson Signed-off-by: Alex Elder Link: https://lore.kernel.org/r/20200724181142.13581-3-elder@linaro.org Signed-off-by: Bjorn Andersson --- drivers/remoteproc/Kconfig | 4 -- drivers/remoteproc/Makefile | 1 - drivers/remoteproc/qcom_q6v5_ipa_notify.c | 85 ------------------------- drivers/remoteproc/qcom_q6v5_mss.c | 38 ----------- include/linux/remoteproc/qcom_q6v5_ipa_notify.h | 82 ------------------------ 5 files changed, 210 deletions(-) delete mode 100644 drivers/remoteproc/qcom_q6v5_ipa_notify.c delete mode 100644 include/linux/remoteproc/qcom_q6v5_ipa_notify.h (limited to 'include/linux') diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 3e70d972a186..48315dc4a30c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -154,7 +154,6 @@ config QCOM_Q6V5_MSS select QCOM_MDT_LOADER select QCOM_PIL_INFO select QCOM_Q6V5_COMMON - select QCOM_Q6V5_IPA_NOTIFY select QCOM_RPROC_COMMON select QCOM_SCM help @@ -196,9 +195,6 @@ config QCOM_Q6V5_WCSS Say y here to support the Qualcomm Peripheral Image Loader for the Hexagon V5 based WCSS remote processors. -config QCOM_Q6V5_IPA_NOTIFY - tristate - config QCOM_SYSMON tristate "Qualcomm sysmon driver" depends on RPMSG diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 7ce1b8ef7677..4d4307dc8fa9 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -24,7 +24,6 @@ obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o -obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o qcom_wcnss_pil-y += qcom_wcnss.o diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c deleted file mode 100644 index e1c10a128bfd..000000000000 --- a/drivers/remoteproc/qcom_q6v5_ipa_notify.c +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Qualcomm IPA notification subdev support - * - * Copyright (C) 2019 Linaro Ltd. - */ - -#include -#include -#include -#include - -static void -ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event) -{ - struct qcom_rproc_ipa_notify *ipa_notify; - qcom_ipa_notify_t notify; - - ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev); - notify = ipa_notify->notify; - if (notify) - notify(ipa_notify->data, event); -} - -static int ipa_notify_prepare(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_STARTING); - - return 0; -} - -static int ipa_notify_start(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_RUNNING); - - return 0; -} - -static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed) - -{ - ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING); -} - -static void ipa_notify_unprepare(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_OFFLINE); -} - -static void ipa_notify_removing(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_REMOVING); -} - -/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify) -{ - ipa_notify->notify = NULL; - ipa_notify->data = NULL; - ipa_notify->subdev.prepare = ipa_notify_prepare; - ipa_notify->subdev.start = ipa_notify_start; - ipa_notify->subdev.stop = ipa_notify_stop; - ipa_notify->subdev.unprepare = ipa_notify_unprepare; - - rproc_add_subdev(rproc, &ipa_notify->subdev); -} -EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev); - -/* Remove the IPA notification subdevice */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify) -{ - struct rproc_subdev *subdev = &ipa_notify->subdev; - - ipa_notify_removing(subdev); - - rproc_remove_subdev(rproc, subdev); - ipa_notify->notify = NULL; /* Make it obvious */ -} -EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev"); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index ae3739082fdd..68f158931010 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -23,7 +23,6 @@ #include #include #include -#include "linux/remoteproc/qcom_q6v5_ipa_notify.h" #include #include #include @@ -197,7 +196,6 @@ struct q6v5 { struct qcom_rproc_glink glink_subdev; struct qcom_rproc_subdev smd_subdev; struct qcom_rproc_ssr ssr_subdev; - struct qcom_rproc_ipa_notify ipa_notify_subdev; struct qcom_sysmon *sysmon; bool need_mem_protection; bool has_alt_reset; @@ -1607,39 +1605,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) return 0; } -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -/* Register IPA notification function */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data) -{ - struct qcom_rproc_ipa_notify *ipa_notify; - struct q6v5 *qproc = rproc->priv; - - if (!notify) - return -EINVAL; - - ipa_notify = &qproc->ipa_notify_subdev; - if (ipa_notify->notify) - return -EBUSY; - - ipa_notify->notify = notify; - ipa_notify->data = data; - - return 0; -} -EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); - -/* Deregister IPA notification function */ -void qcom_deregister_ipa_notify(struct rproc *rproc) -{ - struct q6v5 *qproc = rproc->priv; - - qproc->ipa_notify_subdev.notify = NULL; -} -EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - static int q6v5_probe(struct platform_device *pdev) { const struct rproc_hexagon_res *desc; @@ -1766,7 +1731,6 @@ static int q6v5_probe(struct platform_device *pdev) qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); qcom_add_smd_subdev(rproc, &qproc->smd_subdev); qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); - qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); if (IS_ERR(qproc->sysmon)) { ret = PTR_ERR(qproc->sysmon); @@ -1782,7 +1746,6 @@ static int q6v5_probe(struct platform_device *pdev) remove_sysmon_subdev: qcom_remove_sysmon_subdev(qproc->sysmon); remove_subdevs: - qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); @@ -1804,7 +1767,6 @@ static int q6v5_remove(struct platform_device *pdev) rproc_del(rproc); qcom_remove_sysmon_subdev(qproc->sysmon); - qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h deleted file mode 100644 index 0820edc0ab7d..000000000000 --- a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Copyright (C) 2019 Linaro Ltd. */ - -#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__ -#define __QCOM_Q6V5_IPA_NOTIFY_H__ - -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -#include - -enum qcom_rproc_event { - MODEM_STARTING = 0, /* Modem is about to be started */ - MODEM_RUNNING = 1, /* Startup complete; modem is operational */ - MODEM_STOPPING = 2, /* Modem is about to shut down */ - MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */ - MODEM_OFFLINE = 4, /* Modem is now offline */ - MODEM_REMOVING = 5, /* Modem is about to be removed */ -}; - -typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event); - -struct qcom_rproc_ipa_notify { - struct rproc_subdev subdev; - - qcom_ipa_notify_t notify; - void *data; -}; - -/** - * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * Register the @ipa_notify subdevice with the @rproc so modem events - * can be sent to IPA when they occur. - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_register_ipa_notify() - Register IPA notification function - * @rproc: Remote processor handle - * @notify: Non-null IPA notification callback function pointer - * @data: Data supplied to IPA notification callback function - * - * @Return: 0 if successful, or a negative error code otherwise - * - * This is defined in "qcom_q6v5_mss.c". - */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data); -/** - * qcom_deregister_ipa_notify() - Deregister IPA notification function - * @rproc: Remote processor handle - * - * This is defined in "qcom_q6v5_mss.c". - */ -void qcom_deregister_ipa_notify(struct rproc *rproc); - -#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -struct qcom_rproc_ipa_notify { /* empty */ }; - -#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ -#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ - -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */ -- cgit v1.2.3 From 48001ea50d17f3eb06a552e9ecf21f7fc01b25da Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:08:18 -0700 Subject: PM, libnvdimm: Add runtime firmware activation support Abstract platform specific mechanics for nvdimm firmware activation behind a handful of generic ops. At the bus level ->activate_state() indicates the unified state (idle, busy, armed) of all DIMMs on the bus, and ->capability() indicates the system state expectations for activate. At the DIMM level ->activate_state() indicates the per-DIMM state, ->activate_result() indicates the outcome of the last activation attempt, and ->arm() attempts to transition the DIMM from 'idle' to 'armed'. A new hibernate_quiet_exec() facility is added to support firmware activation in an OS defined system quiesce state. It leverages the fact that the hibernate-freeze state wants to assert that a memory hibernation snapshot can be taken. This is in contrast to a platform firmware defined quiesce state that may forcefully quiet the memory controller independent of whether an individual device-driver properly supports hibernate-freeze. The libnvdimm sysfs interface is extended to support detection of a firmware activate capability. The mechanism supports enumeration and triggering of firmware activate, optionally in the hibernate_quiet_exec() context. [rafael: hibernate_quiet_exec() proposal] [vishal: fix up sparse warning, grammar in Documentation/] Cc: Pavel Machek Cc: Ira Weiny Cc: Len Brown Cc: Jonathan Corbet Cc: Dave Jiang Cc: Vishal Verma Reported-by: kernel test robot Co-developed-by: "Rafael J. Wysocki" Signed-off-by: "Rafael J. Wysocki" Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- Documentation/ABI/testing/sysfs-bus-nvdimm | 2 + .../driver-api/nvdimm/firmware-activate.rst | 86 ++++++++++++ drivers/nvdimm/core.c | 149 +++++++++++++++++++++ drivers/nvdimm/dimm_devs.c | 115 ++++++++++++++++ drivers/nvdimm/nd-core.h | 1 + include/linux/libnvdimm.h | 44 ++++++ include/linux/suspend.h | 6 + kernel/power/hibernate.c | 97 ++++++++++++++ 8 files changed, 500 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-bus-nvdimm create mode 100644 Documentation/driver-api/nvdimm/firmware-activate.rst (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-bus-nvdimm b/Documentation/ABI/testing/sysfs-bus-nvdimm new file mode 100644 index 000000000000..d64380262be8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-nvdimm @@ -0,0 +1,2 @@ +The libnvdimm sub-system implements a common sysfs interface for +platform nvdimm resources. See Documentation/driver-api/nvdimm/. diff --git a/Documentation/driver-api/nvdimm/firmware-activate.rst b/Documentation/driver-api/nvdimm/firmware-activate.rst new file mode 100644 index 000000000000..7ee7decbbdc3 --- /dev/null +++ b/Documentation/driver-api/nvdimm/firmware-activate.rst @@ -0,0 +1,86 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================== +NVDIMM Runtime Firmware Activation +================================== + +Some persistent memory devices run a firmware locally on the device / +"DIMM" to perform tasks like media management, capacity provisioning, +and health monitoring. The process of updating that firmware typically +involves a reboot because it has implications for in-flight memory +transactions. However, reboots are disruptive and at least the Intel +persistent memory platform implementation, described by the Intel ACPI +DSM specification [1], has added support for activating firmware at +runtime. + +A native sysfs interface is implemented in libnvdimm to allow platform +to advertise and control their local runtime firmware activation +capability. + +The libnvdimm bus object, ndbusX, implements an ndbusX/firmware/activate +attribute that shows the state of the firmware activation as one of 'idle', +'armed', 'overflow', and 'busy'. + +- idle: + No devices are set / armed to activate firmware + +- armed: + At least one device is armed + +- busy: + In the busy state armed devices are in the process of transitioning + back to idle and completing an activation cycle. + +- overflow: + If the platform has a concept of incremental work needed to perform + the activation it could be the case that too many DIMMs are armed for + activation. In that scenario the potential for firmware activation to + timeout is indicated by the 'overflow' state. + +The 'ndbusX/firmware/activate' property can be written with a value of +either 'live', or 'quiesce'. A value of 'quiesce' triggers the kernel to +run firmware activation from within the equivalent of the hibernation +'freeze' state where drivers and applications are notified to stop their +modifications of system memory. A value of 'live' attempts +firmware activation without this hibernation cycle. The +'ndbusX/firmware/activate' property will be elided completely if no +firmware activation capability is detected. + +Another property 'ndbusX/firmware/capability' indicates a value of +'live' or 'quiesce', where 'live' indicates that the firmware +does not require or inflict any quiesce period on the system to update +firmware. A capability value of 'quiesce' indicates that firmware does +expect and injects a quiet period for the memory controller, but 'live' +may still be written to 'ndbusX/firmware/activate' as an override to +assume the risk of racing firmware update with in-flight device and +application activity. The 'ndbusX/firmware/capability' property will be +elided completely if no firmware activation capability is detected. + +The libnvdimm memory-device / DIMM object, nmemX, implements +'nmemX/firmware/activate' and 'nmemX/firmware/result' attributes to +communicate the per-device firmware activation state. Similar to the +'ndbusX/firmware/activate' attribute, the 'nmemX/firmware/activate' +attribute indicates 'idle', 'armed', or 'busy'. The state transitions +from 'armed' to 'idle' when the system is prepared to activate firmware, +firmware staged + state set to armed, and 'ndbusX/firmware/activate' is +triggered. After that activation event the nmemX/firmware/result +attribute reflects the state of the last activation as one of: + +- none: + No runtime activation triggered since the last time the device was reset + +- success: + The last runtime activation completed successfully. + +- fail: + The last runtime activation failed for device-specific reasons. + +- not_staged: + The last runtime activation failed due to a sequencing error of the + firmware image not being staged. + +- need_reset: + Runtime firmware activation failed, but the firmware can still be + activated via the legacy method of power-cycling the system. + +[1]: https://docs.pmem.io/persistent-memory/ diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index fe9bd6febdd2..c21ba0602029 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -4,6 +4,7 @@ */ #include #include +#include #include #include #include @@ -389,8 +390,156 @@ static const struct attribute_group nvdimm_bus_attribute_group = { .attrs = nvdimm_bus_attributes, }; +static ssize_t capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + switch (cap) { + case NVDIMM_FWA_CAP_QUIESCE: + return sprintf(buf, "quiesce\n"); + case NVDIMM_FWA_CAP_LIVE: + return sprintf(buf, "live\n"); + default: + return -EOPNOTSUPP; + } +} + +static DEVICE_ATTR_RO(capability); + +static ssize_t activate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + enum nvdimm_fwa_state state; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + state = nd_desc->fw_ops->activate_state(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return -EOPNOTSUPP; + + switch (state) { + case NVDIMM_FWA_IDLE: + return sprintf(buf, "idle\n"); + case NVDIMM_FWA_BUSY: + return sprintf(buf, "busy\n"); + case NVDIMM_FWA_ARMED: + return sprintf(buf, "armed\n"); + case NVDIMM_FWA_ARM_OVERFLOW: + return sprintf(buf, "overflow\n"); + default: + return -ENXIO; + } +} + +static int exec_firmware_activate(void *data) +{ + struct nvdimm_bus_descriptor *nd_desc = data; + + return nd_desc->fw_ops->activate(nd_desc); +} + +static ssize_t activate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_state state; + bool quiesce; + ssize_t rc; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + if (sysfs_streq(buf, "live")) + quiesce = false; + else if (sysfs_streq(buf, "quiesce")) + quiesce = true; + else + return -EINVAL; + + nvdimm_bus_lock(dev); + state = nd_desc->fw_ops->activate_state(nd_desc); + + switch (state) { + case NVDIMM_FWA_BUSY: + rc = -EBUSY; + break; + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + if (quiesce) + rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); + else + rc = nd_desc->fw_ops->activate(nd_desc); + break; + case NVDIMM_FWA_IDLE: + default: + rc = -ENXIO; + } + nvdimm_bus_unlock(dev); + + if (rc == 0) + rc = len; + return rc; +} + +static DEVICE_ATTR_ADMIN_RW(activate); + +static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + + /* + * Both 'activate' and 'capability' disappear when no ops + * detected, or a negative capability is indicated. + */ + if (!nd_desc->fw_ops) + return 0; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return 0; + + return a->mode; +} +static struct attribute *nvdimm_bus_firmware_attributes[] = { + &dev_attr_activate.attr, + &dev_attr_capability.attr, + NULL, +}; + +static const struct attribute_group nvdimm_bus_firmware_attribute_group = { + .name = "firmware", + .attrs = nvdimm_bus_firmware_attributes, + .is_visible = nvdimm_bus_firmware_visible, +}; + const struct attribute_group *nvdimm_bus_attribute_groups[] = { &nvdimm_bus_attribute_group, + &nvdimm_bus_firmware_attribute_group, NULL, }; diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index b7b77e8d9027..85b53a7f44f2 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -446,9 +446,124 @@ static const struct attribute_group nvdimm_attribute_group = { .is_visible = nvdimm_visible, }; +static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_result result; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + result = nvdimm->fw_ops->activate_result(nvdimm); + nvdimm_bus_unlock(dev); + + switch (result) { + case NVDIMM_FWA_RESULT_NONE: + return sprintf(buf, "none\n"); + case NVDIMM_FWA_RESULT_SUCCESS: + return sprintf(buf, "success\n"); + case NVDIMM_FWA_RESULT_FAIL: + return sprintf(buf, "fail\n"); + case NVDIMM_FWA_RESULT_NOTSTAGED: + return sprintf(buf, "not_staged\n"); + case NVDIMM_FWA_RESULT_NEEDRESET: + return sprintf(buf, "need_reset\n"); + default: + return -ENXIO; + } +} +static DEVICE_ATTR_ADMIN_RO(result); + +static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_state state; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + state = nvdimm->fw_ops->activate_state(nvdimm); + nvdimm_bus_unlock(dev); + + switch (state) { + case NVDIMM_FWA_IDLE: + return sprintf(buf, "idle\n"); + case NVDIMM_FWA_BUSY: + return sprintf(buf, "busy\n"); + case NVDIMM_FWA_ARMED: + return sprintf(buf, "armed\n"); + default: + return -ENXIO; + } +} + +static ssize_t activate_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_trigger arg; + int rc; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + if (sysfs_streq(buf, "arm")) + arg = NVDIMM_FWA_ARM; + else if (sysfs_streq(buf, "disarm")) + arg = NVDIMM_FWA_DISARM; + else + return -EINVAL; + + nvdimm_bus_lock(dev); + rc = nvdimm->fw_ops->arm(nvdimm, arg); + nvdimm_bus_unlock(dev); + + if (rc < 0) + return rc; + return len; +} +static DEVICE_ATTR_ADMIN_RW(activate); + +static struct attribute *nvdimm_firmware_attributes[] = { + &dev_attr_activate.attr, + &dev_attr_result.attr, +}; + +static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_capability cap; + + if (!nd_desc->fw_ops) + return 0; + if (!nvdimm->fw_ops) + return 0; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return 0; + + return a->mode; +} + +static const struct attribute_group nvdimm_firmware_attribute_group = { + .name = "firmware", + .attrs = nvdimm_firmware_attributes, + .is_visible = nvdimm_firmware_visible, +}; + static const struct attribute_group *nvdimm_attribute_groups[] = { &nd_device_attribute_group, &nvdimm_attribute_group, + &nvdimm_firmware_attribute_group, NULL, }; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index ddb9d97d9129..564faa36a3ca 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -45,6 +45,7 @@ struct nvdimm { struct kernfs_node *overwrite_state; } sec; struct delayed_work dwork; + const struct nvdimm_fw_ops *fw_ops; }; static inline unsigned long nvdimm_security_flags( diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index ad9898ece7d3..15dbcb718316 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -86,6 +86,7 @@ struct nvdimm_bus_descriptor { int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *data); + const struct nvdimm_bus_fw_ops *fw_ops; }; struct nd_cmd_desc { @@ -200,6 +201,49 @@ struct nvdimm_security_ops { int (*query_overwrite)(struct nvdimm *nvdimm); }; +enum nvdimm_fwa_state { + NVDIMM_FWA_INVALID, + NVDIMM_FWA_IDLE, + NVDIMM_FWA_ARMED, + NVDIMM_FWA_BUSY, + NVDIMM_FWA_ARM_OVERFLOW, +}; + +enum nvdimm_fwa_trigger { + NVDIMM_FWA_ARM, + NVDIMM_FWA_DISARM, +}; + +enum nvdimm_fwa_capability { + NVDIMM_FWA_CAP_INVALID, + NVDIMM_FWA_CAP_NONE, + NVDIMM_FWA_CAP_QUIESCE, + NVDIMM_FWA_CAP_LIVE, +}; + +enum nvdimm_fwa_result { + NVDIMM_FWA_RESULT_INVALID, + NVDIMM_FWA_RESULT_NONE, + NVDIMM_FWA_RESULT_SUCCESS, + NVDIMM_FWA_RESULT_NOTSTAGED, + NVDIMM_FWA_RESULT_NEEDRESET, + NVDIMM_FWA_RESULT_FAIL, +}; + +struct nvdimm_bus_fw_ops { + enum nvdimm_fwa_state (*activate_state) + (struct nvdimm_bus_descriptor *nd_desc); + enum nvdimm_fwa_capability (*capability) + (struct nvdimm_bus_descriptor *nd_desc); + int (*activate)(struct nvdimm_bus_descriptor *nd_desc); +}; + +struct nvdimm_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm); + enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm); + int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg); +}; + void badrange_init(struct badrange *badrange); int badrange_add(struct badrange *badrange, u64 addr, u64 length); void badrange_forget(struct badrange *badrange, phys_addr_t start, diff --git a/include/linux/suspend.h b/include/linux/suspend.h index b960098acfb0..cb9afad82a90 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -453,6 +453,8 @@ extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; int pfn_is_nosave(unsigned long pfn); + +int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} @@ -464,6 +466,10 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } + +static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { + return -ENOTSUPP; +} #endif /* CONFIG_HIBERNATION */ #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 02ec716a4927..e6fab3f09c98 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -795,6 +795,103 @@ int hibernate(void) return error; } +/** + * hibernate_quiet_exec - Execute a function with all devices frozen. + * @func: Function to execute. + * @data: Data pointer to pass to @func. + * + * Return the @func return value or an error code if it cannot be executed. + */ +int hibernate_quiet_exec(int (*func)(void *data), void *data) +{ + int error, nr_calls = 0; + + lock_system_sleep(); + + if (!hibernate_acquire()) { + error = -EBUSY; + goto unlock; + } + + pm_prepare_console(); + + error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); + if (error) { + nr_calls--; + goto exit; + } + + error = freeze_processes(); + if (error) + goto exit; + + lock_device_hotplug(); + + pm_suspend_clear_flags(); + + error = platform_begin(true); + if (error) + goto thaw; + + error = freeze_kernel_threads(); + if (error) + goto thaw; + + error = dpm_prepare(PMSG_FREEZE); + if (error) + goto dpm_complete; + + suspend_console(); + + error = dpm_suspend(PMSG_FREEZE); + if (error) + goto dpm_resume; + + error = dpm_suspend_end(PMSG_FREEZE); + if (error) + goto dpm_resume; + + error = platform_pre_snapshot(true); + if (error) + goto skip; + + error = func(data); + +skip: + platform_finish(true); + + dpm_resume_start(PMSG_THAW); + +dpm_resume: + dpm_resume(PMSG_THAW); + + resume_console(); + +dpm_complete: + dpm_complete(PMSG_THAW); + + thaw_kernel_threads(); + +thaw: + platform_end(true); + + unlock_device_hotplug(); + + thaw_processes(); + +exit: + __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); + + pm_restore_console(); + + hibernate_release(); + +unlock: + unlock_system_sleep(); + + return error; +} +EXPORT_SYMBOL_GPL(hibernate_quiet_exec); /** * software_resume - Resume from a saved hibernation image. -- cgit v1.2.3 From a1facc1fffc17a65e2c12a8de7434b9325ec0324 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 20 Jul 2020 15:08:24 -0700 Subject: ACPI: NFIT: Add runtime firmware activate support Plumb the platform specific backend for the generic libnvdimm firmware activate interface. Register dimm level operations to arm/disarm activation, and register bus level operations to report the dynamic platform-quiesce time relative to the number of dimms armed for firmware activation. A new nfit-specific bus attribute "firmware_activate_noidle" is added to allow the activation to switch between platform enforced, and OS opportunistic device quiesce. In other words, let the hibernate cycle handle in-flight device-dma rather than the platform attempting to increase PCI-E timeouts and the like. Cc: Dave Jiang Cc: Ira Weiny Cc: Vishal Verma Signed-off-by: Dan Williams Signed-off-by: Vishal Verma --- Documentation/ABI/testing/sysfs-bus-nfit | 19 ++ drivers/acpi/nfit/core.c | 41 +++- drivers/acpi/nfit/intel.c | 386 +++++++++++++++++++++++++++++++ drivers/acpi/nfit/intel.h | 3 + drivers/acpi/nfit/nfit.h | 10 + drivers/nvdimm/dimm_devs.c | 4 +- include/linux/libnvdimm.h | 5 +- 7 files changed, 461 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-bus-nfit b/Documentation/ABI/testing/sysfs-bus-nfit index a1cb44dcb908..e4f76e7eab93 100644 --- a/Documentation/ABI/testing/sysfs-bus-nfit +++ b/Documentation/ABI/testing/sysfs-bus-nfit @@ -202,6 +202,25 @@ Description: functions. See the section named 'NVDIMM Root Device _DSMs' in the ACPI specification. +What: /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle +Date: Apr, 2020 +KernelVersion: v5.8 +Contact: linux-nvdimm@lists.01.org +Description: + (RW) The Intel platform implementation of firmware activate + support exposes an option let the platform force idle devices in + the system over the activation event, or trust that the OS will + do it. The safe default is to let the platform force idle + devices since the kernel is already in a suspend state, and on + the chance that a driver does not properly quiesce bus-mastering + after a suspend callback the platform will handle it. However, + the activation might abort if, for example, platform firmware + determines that the activation time exceeds the max PCI-E + completion timeout. Since the platform does not know whether the + OS is running the activation from a suspend context it aborts, + but if the system owner trusts driver suspend callback to be + sufficient then 'firmware_activation_noidle' can be + enabled to bypass the activation abort. What: /sys/bus/nd/devices/regionX/nfit/range_index Date: Jun, 2015 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 78cc9e2d2aa3..fb775b967c52 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1392,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) struct device *dev = container_of(kobj, struct device, kobj); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); - if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) - return 0; + if (a == &dev_attr_scrub.attr) + return ars_supported(nvdimm_bus) ? a->mode : 0; + + if (a == &dev_attr_firmware_activate_noidle.attr) + return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; + return a->mode; } @@ -1402,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = { &dev_attr_scrub.attr, &dev_attr_hw_error_scrub.attr, &dev_attr_bus_dsm_mask.attr, + &dev_attr_firmware_activate_noidle.attr, NULL, }; @@ -2019,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) } } +static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( + struct nfit_mem *nfit_mem) +{ + unsigned long mask; + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + if (!nd_desc->fw_ops) + return NULL; + + if (nfit_mem->family != NVDIMM_FAMILY_INTEL) + return NULL; + + mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; + if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + return NULL; + + return intel_fw_ops; +} + static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) { struct nfit_mem *nfit_mem; @@ -2095,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) acpi_nfit_dimm_attribute_groups, flags, cmd_mask, flush ? flush->hint_count : 0, nfit_mem->flush_wpq, &nfit_mem->id[0], - acpi_nfit_get_security_ops(nfit_mem->family)); + acpi_nfit_get_security_ops(nfit_mem->family), + acpi_nfit_get_fw_ops(nfit_mem)); if (!nvdimm) return -ENOMEM; @@ -2170,8 +2196,10 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) if (acpi_desc->bus_cmd_force_en) { nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; mask = &nd_desc->bus_family_mask; - if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) + if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } } adev = to_acpi_dev(acpi_desc); @@ -2202,6 +2230,11 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, mask); + + if (*mask == dsm_mask) { + set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } } static ssize_t range_index_show(struct device *dev, diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 1113b679cd7b..8dd792a55730 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -7,6 +7,48 @@ #include "intel.h" #include "nfit.h" +static ssize_t firmware_activate_noidle_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N"); +} + +static ssize_t firmware_activate_noidle_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + ssize_t rc; + bool val; + + rc = kstrtobool(buf, &val); + if (rc) + return rc; + if (val != acpi_desc->fwa_noidle) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID; + acpi_desc->fwa_noidle = val; + return size; +} +DEVICE_ATTR_RW(firmware_activate_noidle); + +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + unsigned long *mask; + + if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask)) + return false; + + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; +} + static unsigned long intel_security_flags(struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { @@ -389,3 +431,347 @@ static const struct nvdimm_security_ops __intel_security_ops = { }; const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; + +static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, + struct nd_intel_bus_fw_activate_businfo *info) +{ + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_bus_fw_activate_businfo cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate_businfo), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate_businfo), + }, + }; + int rc; + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + *info = nd_cmd.cmd; + return rc; +} + +/* The fw_ops expect to be called with the nvdimm_bus_lock() held */ +static enum nvdimm_fwa_state intel_bus_fwa_state( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct nd_intel_bus_fw_activate_businfo info; + struct device *dev = acpi_desc->dev; + enum nvdimm_fwa_state state; + int rc; + + /* + * It should not be possible for platform firmware to return + * busy because activate is a synchronous operation. Treat it + * similar to invalid, i.e. always refresh / poll the status. + */ + switch (acpi_desc->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* check if capability needs to be refreshed */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) + break; + return acpi_desc->fwa_state; + } + + /* Refresh with platform firmware */ + rc = intel_bus_fwa_businfo(nd_desc, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + if (info.activate_tmo > info.max_quiesce_tmo) + state = NVDIMM_FWA_ARM_OVERFLOW; + else + state = NVDIMM_FWA_ARMED; + break; + default: + dev_err_once(dev, "invalid firmware activate state %d\n", + info.state); + return NVDIMM_FWA_INVALID; + } + + /* + * Capability data is available in the same payload as state. It + * is expected to be static. + */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) { + if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE; + else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) { + /* + * Skip hibernate cycle by default if platform + * indicates that it does not need devices to be + * quiesced. + */ + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE; + } else + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE; + } + + acpi_desc->fwa_state = state; + + return state; +} + +static enum nvdimm_fwa_capability intel_bus_fwa_capability( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID) + return acpi_desc->fwa_cap; + + if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID) + return acpi_desc->fwa_cap; + + return NVDIMM_FWA_CAP_INVALID; +} + +static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_bus_fw_activate cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate), + }, + /* + * Even though activate is run from a suspended context, + * for safety, still ask platform firmware to force + * quiesce devices by default. Let a module + * parameter override that policy. + */ + .cmd = { + .iodev_state = acpi_desc->fwa_noidle + ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE + : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, + }, + }; + int rc; + + switch (intel_bus_fwa_state(nd_desc)) { + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + break; + default: + return -ENXIO; + } + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + + /* + * Whether the command succeeded, or failed, the agent checking + * for the result needs to query the DIMMs individually. + * Increment the activation count to invalidate all the DIMM + * states at once (it's otherwise not possible to take + * acpi_desc->init_mutex in this context) + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + acpi_desc->fwa_count++; + + dev_dbg(acpi_desc->dev, "result: %d\n", rc); + + return rc; +} + +static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = { + .activate_state = intel_bus_fwa_state, + .capability = intel_bus_fwa_capability, + .activate = intel_bus_fwa_activate, +}; + +const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; + +static int intel_fwa_dimminfo(struct nvdimm *nvdimm, + struct nd_intel_fw_activate_dimminfo *info) +{ + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_fw_activate_dimminfo cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_fw_activate_dimminfo), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_dimminfo), + }, + }; + int rc; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + *info = nd_cmd.cmd; + return rc; +} + +static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nd_intel_fw_activate_dimminfo info; + int rc; + + /* + * Similar to the bus state, since activate is synchronous the + * busy state should resolve within the context of 'activate'. + */ + switch (nfit_mem->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* If no activations occurred the old state is still valid */ + if (nfit_mem->fwa_count == acpi_desc->fwa_count) + return nfit_mem->fwa_state; + } + + rc = intel_fwa_dimminfo(nvdimm, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + nfit_mem->fwa_state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + nfit_mem->fwa_state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + nfit_mem->fwa_state = NVDIMM_FWA_ARMED; + break; + default: + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + break; + } + + switch (info.result) { + case ND_INTEL_DIMM_FWA_NONE: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE; + break; + case ND_INTEL_DIMM_FWA_SUCCESS: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS; + break; + case ND_INTEL_DIMM_FWA_NOTSTAGED: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED; + break; + case ND_INTEL_DIMM_FWA_NEEDRESET: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET; + break; + case ND_INTEL_DIMM_FWA_MEDIAFAILED: + case ND_INTEL_DIMM_FWA_ABORT: + case ND_INTEL_DIMM_FWA_NOTSUPP: + case ND_INTEL_DIMM_FWA_ERROR: + default: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL; + break; + } + + nfit_mem->fwa_count = acpi_desc->fwa_count; + + return nfit_mem->fwa_state; +} + +static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + + if (nfit_mem->fwa_count == acpi_desc->fwa_count + && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID) + return nfit_mem->fwa_result; + + if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID) + return nfit_mem->fwa_result; + + return NVDIMM_FWA_RESULT_INVALID; +} + +static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_fw_activate_arm cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), + .nd_size_out = + sizeof(struct nd_intel_fw_activate_arm), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_arm), + }, + .cmd = { + .activate_arm = arm == NVDIMM_FWA_ARM + ? ND_INTEL_DIMM_FWA_ARM + : ND_INTEL_DIMM_FWA_DISARM, + }, + }; + int rc; + + switch (intel_fwa_state(nvdimm)) { + case NVDIMM_FWA_INVALID: + return -ENXIO; + case NVDIMM_FWA_BUSY: + return -EBUSY; + case NVDIMM_FWA_IDLE: + if (arm == NVDIMM_FWA_DISARM) + return 0; + break; + case NVDIMM_FWA_ARMED: + if (arm == NVDIMM_FWA_ARM) + return 0; + break; + default: + return -ENXIO; + } + + /* + * Invalidate the bus-level state, now that we're committed to + * changing the 'arm' state. + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + + dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM + ? "arm" : "disarm", rc); + return rc; +} + +static const struct nvdimm_fw_ops __intel_fw_ops = { + .activate_state = intel_fwa_state, + .activate_result = intel_fwa_result, + .arm = intel_fwa_arm, +}; + +const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h index 49a598623024..b768234ccebc 100644 --- a/drivers/acpi/nfit/intel.h +++ b/drivers/acpi/nfit/intel.h @@ -169,4 +169,7 @@ struct nd_intel_bus_fw_activate { u8 iodev_state; u32 status; } __packed; + +extern const struct nvdimm_fw_ops *intel_fw_ops; +extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops; #endif diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 97c122628975..67b7807ed200 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -220,6 +220,9 @@ struct nfit_mem { struct list_head list; struct acpi_device *adev; struct acpi_nfit_desc *acpi_desc; + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_result fwa_result; + int fwa_count; char id[NFIT_DIMM_ID_LEN+1]; struct resource *flush_wpq; unsigned long dsm_mask; @@ -265,6 +268,11 @@ struct acpi_nfit_desc { unsigned int scrub_tmo; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_capability fwa_cap; + int fwa_count; + bool fwa_noidle; + bool fwa_nosuspend; }; enum scrub_mode { @@ -367,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event); int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus); +extern struct device_attribute dev_attr_firmware_activate_noidle; #endif /* __NFIT_H__ */ diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 85b53a7f44f2..2f0815e15986 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -582,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops) + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops) { struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); struct device *dev; @@ -612,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, dev->devt = MKDEV(nvdimm_major, nvdimm->id); dev->groups = groups; nvdimm->sec.ops = sec_ops; + nvdimm->fw_ops = fw_ops; nvdimm->sec.overwrite_tmo = 0; INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); /* diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 15dbcb718316..01f251b6e36c 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -269,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops); + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops); static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq) { return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, - cmd_mask, num_flush, flush_wpq, NULL, NULL); + cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); -- cgit v1.2.3 From 55f3560df975f557c48aa6afc636808f31ecb87a Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:15 +0200 Subject: seqlock: Extend seqcount API with associated locks A sequence counter write side critical section must be protected by some form of locking to serialize writers. If the serialization primitive is not disabling preemption implicitly, preemption has to be explicitly disabled before entering the write side critical section. There is no built-in debugging mechanism to verify that the lock used for writer serialization is held and preemption is disabled. Some usage sites like dma-buf have explicit lockdep checks for the writer-side lock, but this covers only a small portion of the sequence counter usage in the kernel. Add new sequence counter types which allows to associate a lock to the sequence counter at initialization time. The seqcount API functions are extended to provide appropriate lockdep assertions depending on the seqcount/lock type. For sequence counters with associated locks that do not implicitly disable preemption, preemption protection is enforced in the sequence counter write side functions. This removes the need to explicitly add preempt_disable/enable() around the write side critical sections: the write_begin/end() functions for these new sequence counter types automatically do this. Introduce the following seqcount types with associated locks: seqcount_spinlock_t seqcount_raw_spinlock_t seqcount_rwlock_t seqcount_mutex_t seqcount_ww_mutex_t Extend the seqcount read and write functions to branch out to the specific seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel API explosion per each new seqcount_LOCKTYPE_t added. Add such compile-time type detection logic into a new, internal, seqlock header. Document the proper seqcount_LOCKTYPE_t usage, and rationale, at Documentation/locking/seqlock.rst. If lockdep is disabled, this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-10-a.darwish@linutronix.de --- Documentation/locking/seqlock.rst | 52 +++++ include/linux/seqlock.h | 464 ++++++++++++++++++++++++++++++++------ 2 files changed, 447 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst index 366dd368d90a..62c5ad98c11c 100644 --- a/Documentation/locking/seqlock.rst +++ b/Documentation/locking/seqlock.rst @@ -87,6 +87,58 @@ Read path:: } while (read_seqcount_retry(&foo_seqcount, seq)); +.. _seqcount_locktype_t: + +Sequence counters with associated locks (``seqcount_LOCKTYPE_t``) +----------------------------------------------------------------- + +As discussed at :ref:`seqcount_t`, sequence count write side critical +sections must be serialized and non-preemptible. This variant of +sequence counters associate the lock used for writer serialization at +initialization time, which enables lockdep to validate that the write +side critical sections are properly serialized. + +This lock association is a NOOP if lockdep is disabled and has neither +storage nor runtime overhead. If lockdep is enabled, the lock pointer is +stored in struct seqcount and lockdep's "lock is held" assertions are +injected at the beginning of the write side critical section to validate +that it is properly protected. + +For lock types which do not implicitly disable preemption, preemption +protection is enforced in the write side function. + +The following sequence counters with associated locks are defined: + + - ``seqcount_spinlock_t`` + - ``seqcount_raw_spinlock_t`` + - ``seqcount_rwlock_t`` + - ``seqcount_mutex_t`` + - ``seqcount_ww_mutex_t`` + +The plain seqcount read and write APIs branch out to the specific +seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel +API explosion per each new seqcount LOCKTYPE. + +Initialization (replace "LOCKTYPE" with one of the supported locks):: + + /* dynamic */ + seqcount_LOCKTYPE_t foo_seqcount; + seqcount_LOCKTYPE_init(&foo_seqcount, &lock); + + /* static */ + static seqcount_LOCKTYPE_t foo_seqcount = + SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock); + + /* C99 struct init */ + struct { + .seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock), + } foo; + +Write path: same as in :ref:`seqcount_t`, while running from a context +with the associated LOCKTYPE lock acquired. + +Read path: same as in :ref:`seqcount_t`. + .. _seqlock_t: Sequential locks (``seqlock_t``) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 54bc20496392..8c16a494c968 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -10,13 +10,17 @@ * * Copyrights: * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli + * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH */ -#include -#include -#include #include #include +#include +#include +#include +#include +#include + #include /* @@ -48,6 +52,10 @@ * This mechanism can't be used if the protected data contains pointers, * as the writer can invalidate a pointer that a reader is following. * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKTYPE_t) instead. + * * If it's desired to automatically handle the sequence counter writer * serialization and non-preemptibility requirements, use a sequential * lock (seqlock_t) instead. @@ -108,9 +116,267 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) */ #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } +/* + * Sequence counters with associated locks (seqcount_LOCKTYPE_t) + * + * A sequence counter which associates the lock used for writer + * serialization at initialization time. This enables lockdep to validate + * that the write side critical section is properly serialized. + * + * For associated locks which do not implicitly disable preemption, + * preemption protection is enforced in the write side function. + * + * Lockdep is never used in any for the raw write variants. + * + * See Documentation/locking/seqlock.rst + */ + +#ifdef CONFIG_LOCKDEP +#define __SEQ_LOCKDEP(expr) expr +#else +#define __SEQ_LOCKDEP(expr) +#endif + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ + .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + __SEQ_LOCKDEP(.lock = (assoc_lock)) \ +} + +#define seqcount_locktype_init(s, assoc_lock) \ +do { \ + seqcount_init(&(s)->seqcount); \ + __SEQ_LOCKDEP((s)->lock = (assoc_lock)); \ +} while (0) + +/** + * typedef seqcount_spinlock_t - sequence counter with spinlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. + */ +typedef struct seqcount_spinlock { + seqcount_t seqcount; + __SEQ_LOCKDEP(spinlock_t *lock); +} seqcount_spinlock_t; + +/** + * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t + * @name: Name of the seqcount_spinlock_t instance + * @lock: Pointer to the associated spinlock + */ +#define SEQCNT_SPINLOCK_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_spinlock_init - runtime initializer for seqcount_spinlock_t + * @s: Pointer to the seqcount_spinlock_t instance + * @lock: Pointer to the associated spinlock + */ +#define seqcount_spinlock_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_raw_spinlock_t - sequence count with raw spinlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated raw spinlock + * + * A plain sequence counter with external writer synchronization by a + * raw spinlock. The raw spinlock is associated to the sequence count in + * the static initializer or init function. This enables lockdep to + * validate that the write side critical section is properly serialized. + */ +typedef struct seqcount_raw_spinlock { + seqcount_t seqcount; + __SEQ_LOCKDEP(raw_spinlock_t *lock); +} seqcount_raw_spinlock_t; + +/** + * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t + * @name: Name of the seqcount_raw_spinlock_t instance + * @lock: Pointer to the associated raw_spinlock + */ +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_raw_spinlock_init - runtime initializer for seqcount_raw_spinlock_t + * @s: Pointer to the seqcount_raw_spinlock_t instance + * @lock: Pointer to the associated raw_spinlock + */ +#define seqcount_raw_spinlock_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_rwlock_t - sequence count with rwlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated rwlock + * + * A plain sequence counter with external writer synchronization by a + * rwlock. The rwlock is associated to the sequence count in the static + * initializer or init function. This enables lockdep to validate that + * the write side critical section is properly serialized. + */ +typedef struct seqcount_rwlock { + seqcount_t seqcount; + __SEQ_LOCKDEP(rwlock_t *lock); +} seqcount_rwlock_t; + +/** + * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t + * @name: Name of the seqcount_rwlock_t instance + * @lock: Pointer to the associated rwlock + */ +#define SEQCNT_RWLOCK_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_rwlock_init - runtime initializer for seqcount_rwlock_t + * @s: Pointer to the seqcount_rwlock_t instance + * @lock: Pointer to the associated rwlock + */ +#define seqcount_rwlock_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_mutex_t - sequence count with mutex associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated mutex + * + * A plain sequence counter with external writer synchronization by a + * mutex. The mutex is associated to the sequence counter in the static + * initializer or init function. This enables lockdep to validate that + * the write side critical section is properly serialized. + * + * The write side API functions write_seqcount_begin()/end() automatically + * disable and enable preemption when used with seqcount_mutex_t. + */ +typedef struct seqcount_mutex { + seqcount_t seqcount; + __SEQ_LOCKDEP(struct mutex *lock); +} seqcount_mutex_t; + +/** + * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t + * @name: Name of the seqcount_mutex_t instance + * @lock: Pointer to the associated mutex + */ +#define SEQCNT_MUTEX_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_mutex_init - runtime initializer for seqcount_mutex_t + * @s: Pointer to the seqcount_mutex_t instance + * @lock: Pointer to the associated mutex + */ +#define seqcount_mutex_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/** + * typedef seqcount_ww_mutex_t - sequence count with ww_mutex associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated ww_mutex + * + * A plain sequence counter with external writer synchronization by a + * ww_mutex. The ww_mutex is associated to the sequence counter in the static + * initializer or init function. This enables lockdep to validate that + * the write side critical section is properly serialized. + * + * The write side API functions write_seqcount_begin()/end() automatically + * disable and enable preemption when used with seqcount_ww_mutex_t. + */ +typedef struct seqcount_ww_mutex { + seqcount_t seqcount; + __SEQ_LOCKDEP(struct ww_mutex *lock); +} seqcount_ww_mutex_t; + +/** + * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t + * @name: Name of the seqcount_ww_mutex_t instance + * @lock: Pointer to the associated ww_mutex + */ +#define SEQCNT_WW_MUTEX_ZERO(name, lock) \ + SEQCOUNT_LOCKTYPE_ZERO(name, lock) + +/** + * seqcount_ww_mutex_init - runtime initializer for seqcount_ww_mutex_t + * @s: Pointer to the seqcount_ww_mutex_t instance + * @lock: Pointer to the associated ww_mutex + */ +#define seqcount_ww_mutex_init(s, lock) \ + seqcount_locktype_init(s, lock) + +/* + * @preempt: Is the associated write serialization lock preemtpible? + */ +#define SEQCOUNT_LOCKTYPE(locktype, preempt, lockmember) \ +static inline seqcount_t * \ +__seqcount_##locktype##_ptr(seqcount_##locktype##_t *s) \ +{ \ + return &s->seqcount; \ +} \ + \ +static inline bool \ +__seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \ +{ \ + return preempt; \ +} \ + \ +static inline void \ +__seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \ +{ \ + __SEQ_LOCKDEP(lockdep_assert_held(lockmember)); \ +} + +/* + * Similar hooks, but for plain seqcount_t + */ + +static inline seqcount_t *__seqcount_ptr(seqcount_t *s) +{ + return s; +} + +static inline bool __seqcount_preemptible(seqcount_t *s) +{ + return false; +} + +static inline void __seqcount_assert(seqcount_t *s) +{ + lockdep_assert_preemption_disabled(); +} + +/* + * @s: Pointer to seqcount_locktype_t, generated hooks first parameter. + */ +SEQCOUNT_LOCKTYPE(raw_spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(rwlock, false, s->lock) +SEQCOUNT_LOCKTYPE(mutex, true, s->lock) +SEQCOUNT_LOCKTYPE(ww_mutex, true, &s->lock->base) + +#define __seqprop_case(s, locktype, prop) \ + seqcount_##locktype##_t: __seqcount_##locktype##_##prop((void *)(s)) + +#define __seqprop(s, prop) _Generic(*(s), \ + seqcount_t: __seqcount_##prop((void *)(s)), \ + __seqprop_case((s), raw_spinlock, prop), \ + __seqprop_case((s), spinlock, prop), \ + __seqprop_case((s), rwlock, prop), \ + __seqprop_case((s), mutex, prop), \ + __seqprop_case((s), ww_mutex, prop)) + +#define __to_seqcount_t(s) __seqprop(s, ptr) +#define __associated_lock_exists_and_is_preemptible(s) __seqprop(s, preemptible) +#define __assert_write_section_is_protected(s) __seqprop(s, assert) + /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -122,7 +388,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned __read_seqcount_begin(const seqcount_t *s) +#define __read_seqcount_begin(s) \ + __read_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned __read_seqcount_t_begin(const seqcount_t *s) { unsigned ret; @@ -138,32 +407,38 @@ repeat: /** * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +#define raw_read_seqcount_begin(s) \ + raw_read_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) { - unsigned ret = __read_seqcount_begin(s); + unsigned ret = __read_seqcount_t_begin(s); smp_rmb(); return ret; } /** * read_seqcount_begin() - begin a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned read_seqcount_begin(const seqcount_t *s) +#define read_seqcount_begin(s) \ + read_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned read_seqcount_t_begin(const seqcount_t *s) { seqcount_lockdep_reader_access(s); - return raw_read_seqcount_begin(s); + return raw_read_seqcount_t_begin(s); } /** * raw_read_seqcount() - read the raw seqcount_t counter value - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * raw_read_seqcount opens a read critical section of the given * seqcount_t, without any lockdep checking, and without checking or @@ -172,7 +447,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount(const seqcount_t *s) +#define raw_read_seqcount(s) \ + raw_read_seqcount_t(__to_seqcount_t(s)) + +static inline unsigned raw_read_seqcount_t(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); @@ -183,7 +461,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) /** * raw_seqcount_begin() - begin a seqcount_t read critical section w/o * lockdep and w/o counter stabilization - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * raw_seqcount_begin opens a read critical section of the given * seqcount_t. Unlike read_seqcount_begin(), this function will not wait @@ -197,18 +475,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_seqcount_begin(const seqcount_t *s) +#define raw_seqcount_begin(s) \ + raw_seqcount_t_begin(__to_seqcount_t(s)) + +static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) { /* * If the counter is odd, let read_seqcount_retry() fail * by decrementing the counter. */ - return raw_read_seqcount(s) & ~1; + return raw_read_seqcount_t(s) & ~1; } /** * __read_seqcount_retry() - end a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @start: count, from read_seqcount_begin() * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() @@ -221,7 +502,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) * * Return: true if a read section retry is required, else false */ -static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +#define __read_seqcount_retry(s, start) \ + __read_seqcount_t_retry(__to_seqcount_t(s), start) + +static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) { kcsan_atomic_next(0); return unlikely(READ_ONCE(s->sequence) != start); @@ -229,7 +513,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) /** * read_seqcount_retry() - end a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @start: count, from read_seqcount_begin() * * read_seqcount_retry closes the read critical section of given @@ -238,17 +522,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) * * Return: true if a read section retry is required, else false */ -static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +#define read_seqcount_retry(s, start) \ + read_seqcount_t_retry(__to_seqcount_t(s), start) + +static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_retry(s, start); + return __read_seqcount_t_retry(s, start); } /** * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants */ -static inline void raw_write_seqcount_begin(seqcount_t *s) +#define raw_write_seqcount_begin(s) \ +do { \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_disable(); \ + \ + raw_write_seqcount_t_begin(__to_seqcount_t(s)); \ +} while (0) + +static inline void raw_write_seqcount_t_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -257,49 +552,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s) /** * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants */ -static inline void raw_write_seqcount_end(seqcount_t *s) +#define raw_write_seqcount_end(s) \ +do { \ + raw_write_seqcount_t_end(__to_seqcount_t(s)); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void raw_write_seqcount_t_end(seqcount_t *s) { smp_wmb(); s->sequence++; kcsan_nestable_atomic_end(); } -static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} - /** * write_seqcount_begin_nested() - start a seqcount_t write section with * custom lockdep nesting level - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @subclass: lockdep nesting level * * See Documentation/locking/lockdep-design.rst */ -static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - lockdep_assert_preemption_disabled(); - __write_seqcount_begin_nested(s, subclass); -} - -/* - * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks. - * - * Use for internal seqlock.h code where it's known that preemption is - * already disabled. For example, seqlock_t write side functions. - */ -static inline void __write_seqcount_begin(seqcount_t *s) +#define write_seqcount_begin_nested(s, subclass) \ +do { \ + __assert_write_section_is_protected(s); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin_nested(__to_seqcount_t(s), subclass); \ +} while (0) + +static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) { - __write_seqcount_begin_nested(s, 0); + raw_write_seqcount_t_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); } /** * write_seqcount_begin() - start a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * write_seqcount_begin opens a write side critical section of the given * seqcount_t. @@ -308,26 +604,44 @@ static inline void __write_seqcount_begin(seqcount_t *s) * non-preemptible. If readers can be invoked from hardirq or softirq * context, interrupts or bottom halves must be respectively disabled. */ -static inline void write_seqcount_begin(seqcount_t *s) +#define write_seqcount_begin(s) \ +do { \ + __assert_write_section_is_protected(s); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin(__to_seqcount_t(s)); \ +} while (0) + +static inline void write_seqcount_t_begin(seqcount_t *s) { - write_seqcount_begin_nested(s, 0); + write_seqcount_t_begin_nested(s, 0); } /** * write_seqcount_end() - end a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The write section must've been opened with write_seqcount_begin(). */ -static inline void write_seqcount_end(seqcount_t *s) +#define write_seqcount_end(s) \ +do { \ + write_seqcount_t_end(__to_seqcount_t(s)); \ + \ + if (__associated_lock_exists_and_is_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void write_seqcount_t_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_end(s); + raw_write_seqcount_t_end(s); } /** * raw_write_seqcount_barrier() - do a seqcount_t write barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * This can be used to provide an ordering guarantee instead of the usual * consistency guarantee. It is one wmb cheaper, because it can collapse @@ -366,7 +680,10 @@ static inline void write_seqcount_end(seqcount_t *s) * WRITE_ONCE(X, false); * } */ -static inline void raw_write_seqcount_barrier(seqcount_t *s) +#define raw_write_seqcount_barrier(s) \ + raw_write_seqcount_t_barrier(__to_seqcount_t(s)) + +static inline void raw_write_seqcount_t_barrier(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -378,12 +695,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) /** * write_seqcount_invalidate() - invalidate in-progress seqcount_t read * side operations - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * After write_seqcount_invalidate, no seqcount_t read side operations * will complete successfully and see data older than this. */ -static inline void write_seqcount_invalidate(seqcount_t *s) +#define write_seqcount_invalidate(s) \ + write_seqcount_t_invalidate(__to_seqcount_t(s)) + +static inline void write_seqcount_t_invalidate(seqcount_t *s) { smp_wmb(); kcsan_nestable_atomic_begin(); @@ -393,7 +713,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s) /** * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Use seqcount_t latching to switch between two storage places protected * by a sequence counter. Doing so allows having interruptible, preemptible, @@ -406,7 +726,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s) * picking which data copy to read. The full counter value must then be * checked with read_seqcount_retry(). */ -static inline int raw_read_seqcount_latch(seqcount_t *s) +#define raw_read_seqcount_latch(s) \ + raw_read_seqcount_t_latch(__to_seqcount_t(s)) + +static inline int raw_read_seqcount_t_latch(seqcount_t *s) { /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ int seq = READ_ONCE(s->sequence); /* ^^^ */ @@ -415,7 +738,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) /** * raw_write_seqcount_latch() - redirect readers to even/odd copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -494,7 +817,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_t *s) +#define raw_write_seqcount_latch(s) \ + raw_write_seqcount_t_latch(__to_seqcount_t(s)) + +static inline void raw_write_seqcount_t_latch(seqcount_t *s) { smp_wmb(); /* prior stores before incrementing "sequence" */ s->sequence++; @@ -592,7 +918,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -604,7 +930,7 @@ static inline void write_seqlock(seqlock_t *sl) */ static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock(&sl->lock); } @@ -618,7 +944,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -631,7 +957,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) */ static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } @@ -645,7 +971,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -657,7 +983,7 @@ static inline void write_seqlock_irq(seqlock_t *sl) */ static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -666,7 +992,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); return flags; } @@ -695,13 +1021,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } /** * read_seqlock_excl() - begin a seqlock_t locking reader section - * @sl: Pointer to seqlock_t + * @sl: Pointer to seqlock_t * * read_seqlock_excl opens a seqlock_t locking reader critical section. A * locking reader exclusively locks out *both* other writers *and* other -- cgit v1.2.3 From ec8702da570ebb59f38471007bf71359c51b027b Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:16 +0200 Subject: seqlock: Align multi-line macros newline escapes at 72 columns Parent commit, "seqlock: Extend seqcount API with associated locks", introduced a big number of multi-line macros that are newline-escaped at 72 columns. For overall cohesion, align the earlier-existing macros similarly. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-11-a.darwish@linutronix.de --- include/linux/seqlock.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 8c16a494c968..b48729988325 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -80,17 +80,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name, } #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SEQCOUNT_DEP_MAP_INIT(lockname) \ - .dep_map = { .name = #lockname } \ + +# define SEQCOUNT_DEP_MAP_INIT(lockname) \ + .dep_map = { .name = #lockname } /** * seqcount_init() - runtime initializer for seqcount_t * @s: Pointer to the seqcount_t instance */ -# define seqcount_init(s) \ - do { \ - static struct lock_class_key __key; \ - __seqcount_init((s), #s, &__key); \ +# define seqcount_init(s) \ + do { \ + static struct lock_class_key __key; \ + __seqcount_init((s), #s, &__key); \ } while (0) static inline void seqcount_lockdep_reader_access(const seqcount_t *s) @@ -842,20 +843,20 @@ typedef struct { spinlock_t lock; } seqlock_t; -#define __SEQLOCK_UNLOCKED(lockname) \ - { \ - .seqcount = SEQCNT_ZERO(lockname), \ - .lock = __SPIN_LOCK_UNLOCKED(lockname) \ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO(lockname), \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ } /** * seqlock_init() - dynamic initializer for seqlock_t * @sl: Pointer to the seqlock_t instance */ -#define seqlock_init(sl) \ - do { \ - seqcount_init(&(sl)->seqcount); \ - spin_lock_init(&(sl)->lock); \ +#define seqlock_init(sl) \ + do { \ + seqcount_init(&(sl)->seqcount); \ + spin_lock_init(&(sl)->lock); \ } while (0) /** -- cgit v1.2.3 From 318ce71f3e3ae4108c1665f3860afa8a2a4c9f02 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:17 +0200 Subject: dma-buf: Remove custom seqcount lockdep class key Commit 3c3b177a9369 ("reservation: add support for read-only access using rcu") introduced a sequence counter to manage updates to reservations. Back then, the reservation object initializer reservation_object_init() was always inlined. Having the sequence counter initialization inlined meant that each of the call sites would have a different lockdep class key, which would've broken lockdep's deadlock detection. The aforementioned commit thus introduced, and exported, a custom seqcount lockdep class key and name. The commit 8735f16803f00 ("dma-buf: cleanup reservation_object_init...") transformed the reservation object initializer to a normal non-inlined C function. seqcount_init(), which automatically defines the seqcount lockdep class key and must be called non-inlined, can now be safely used. Remove the seqcount custom lockdep class key, name, and export. Use seqcount_init() inside the dma reservation object initializer. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sebastian Andrzej Siewior Acked-by: Daniel Vetter Link: https://lkml.kernel.org/r/20200720155530.1173732-12-a.darwish@linutronix.de --- drivers/dma-buf/dma-resv.c | 9 +-------- include/linux/dma-resv.h | 2 -- 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index b45f8514dc82..15efa0c2dacb 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -51,12 +51,6 @@ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); -struct lock_class_key reservation_seqcount_class; -EXPORT_SYMBOL(reservation_seqcount_class); - -const char reservation_seqcount_string[] = "reservation_seqcount"; -EXPORT_SYMBOL(reservation_seqcount_string); - /** * dma_resv_list_alloc - allocate fence list * @shared_max: number of fences we need space for @@ -135,9 +129,8 @@ subsys_initcall(dma_resv_lockdep); void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); + seqcount_init(&obj->seq); - __seqcount_init(&obj->seq, reservation_seqcount_string, - &reservation_seqcount_class); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); } diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index ee50d10f052b..a6538ae7d93f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -46,8 +46,6 @@ #include extern struct ww_class reservation_ww_class; -extern struct lock_class_key reservation_seqcount_class; -extern const char reservation_seqcount_string[]; /** * struct dma_resv_list - a list of shared fences -- cgit v1.2.3 From cd29f22019ec4ab998d2e1e8c831c7c42db4aa7d Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:18 +0200 Subject: dma-buf: Use sequence counter with associated wound/wait mutex A sequence counter write side critical section must be protected by some form of locking to serialize writers. If the serialization primitive is not disabling preemption implicitly, preemption has to be explicitly disabled before entering the sequence counter write side critical section. The dma-buf reservation subsystem uses plain sequence counters to manage updates to reservations. Writer serialization is accomplished through a wound/wait mutex. Acquiring a wound/wait mutex does not disable preemption, so this needs to be done manually before and after the write side critical section. Use the newly-added seqcount_ww_mutex_t instead: - It associates the ww_mutex with the sequence count, which enables lockdep to validate that the write side critical section is properly serialized. - It removes the need to explicitly add preempt_disable/enable() around the write side critical section because the write_begin/end() functions for this new data type automatically do this. If lockdep is disabled this ww_mutex lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Acked-by: Daniel Vetter Link: https://lkml.kernel.org/r/20200720155530.1173732-13-a.darwish@linutronix.de --- drivers/dma-buf/dma-resv.c | 8 +------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 -- include/linux/dma-resv.h | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 15efa0c2dacb..a7631352a486 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -129,7 +129,7 @@ subsys_initcall(dma_resv_lockdep); void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); - seqcount_init(&obj->seq); + seqcount_ww_mutex_init(&obj->seq, &obj->lock); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); @@ -260,7 +260,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) fobj = dma_resv_get_list(obj); count = fobj->shared_count; - preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < count; ++i) { @@ -282,7 +281,6 @@ replace: smp_store_mb(fobj->shared_count, count); write_seqcount_end(&obj->seq); - preempt_enable(); dma_fence_put(old); } EXPORT_SYMBOL(dma_resv_add_shared_fence); @@ -309,14 +307,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) if (fence) dma_fence_get(fence); - preempt_disable(); write_seqcount_begin(&obj->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(obj->fence_excl, fence); if (old) old->shared_count = 0; write_seqcount_end(&obj->seq); - preempt_enable(); /* inplace update, no shared fences */ while (i--) @@ -394,13 +390,11 @@ retry: src_list = dma_resv_get_list(dst); old = dma_resv_get_excl(dst); - preempt_disable(); write_seqcount_begin(&dst->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(dst->fence_excl, new); RCU_INIT_POINTER(dst->fence, dst_list); write_seqcount_end(&dst->seq); - preempt_enable(); dma_resv_list_free(src_list); dma_fence_put(old); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b91b5171270f..ff4b583cb96a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, new->shared_count = k; /* Install the new fence list, seqcount provides the barriers */ - preempt_disable(); write_seqcount_begin(&resv->seq); RCU_INIT_POINTER(resv->fence, new); write_seqcount_end(&resv->seq); - preempt_enable(); /* Drop the references to the removed fences or move them to ef_list */ for (i = j, k = 0; i < old->shared_count; ++i) { diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index a6538ae7d93f..d44a77e8a7e3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -69,7 +69,7 @@ struct dma_resv_list { */ struct dma_resv { struct ww_mutex lock; - seqcount_t seq; + seqcount_ww_mutex_t seq; struct dma_fence __rcu *fence_excl; struct dma_resv_list __rcu *fence; -- cgit v1.2.3 From b75058614fdd3140074a640b514f6a0b4d485a2d Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:19 +0200 Subject: sched: tasks: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-14-a.darwish@linutronix.de --- include/linux/sched.h | 2 +- init/init_task.c | 3 ++- kernel/fork.c | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 8d1de021b315..9a9d8263962d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1050,7 +1050,7 @@ struct task_struct { /* Protected by ->alloc_lock: */ nodemask_t mems_allowed; /* Seqence number to catch updates: */ - seqcount_t mems_allowed_seq; + seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; #endif diff --git a/init/init_task.c b/init/init_task.c index 15089d15010a..94fe3ba1bb60 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -154,7 +154,8 @@ struct task_struct init_task .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list), #endif #ifdef CONFIG_CPUSETS - .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq), + .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq, + &init_task.alloc_lock), #endif #ifdef CONFIG_RT_MUTEXES .pi_waiters = RB_ROOT_CACHED, diff --git a/kernel/fork.c b/kernel/fork.c index 70d9d0a4de2a..fc72f09a61b2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2032,7 +2032,7 @@ static __latent_entropy struct task_struct *copy_process( #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; - seqcount_init(&p->mems_allowed_seq); + seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; -- cgit v1.2.3 From 26475371976c69489d3a8e6c8bbf35afbbc25055 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:24 +0200 Subject: vfs: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-19-a.darwish@linutronix.de --- fs/dcache.c | 2 +- fs/fs_struct.c | 4 ++-- include/linux/dcache.h | 2 +- include/linux/fs_struct.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/dcache.c b/fs/dcache.c index 361ea7ab30ea..ea0485861d93 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1746,7 +1746,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_lockref.count = 1; dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); - seqcount_init(&dentry->d_seq); + seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); dentry->d_inode = NULL; dentry->d_parent = dentry; dentry->d_sb = sb; diff --git a/fs/fs_struct.c b/fs/fs_struct.c index ca639ed967b7..04b3f5b9c629 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -117,7 +117,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) fs->users = 1; fs->in_exec = 0; spin_lock_init(&fs->lock); - seqcount_init(&fs->seq); + seqcount_spinlock_init(&fs->seq, &fs->lock); fs->umask = old->umask; spin_lock(&old->lock); @@ -163,6 +163,6 @@ EXPORT_SYMBOL(current_umask); struct fs_struct init_fs = { .users = 1, .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), - .seq = SEQCNT_ZERO(init_fs.seq), + .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock), .umask = 0022, }; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a81f0c3cf352..65d975bf9390 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat; struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ - seqcount_t d_seq; /* per dentry seqlock */ + seqcount_spinlock_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index cf1015abfbf2..783b48dedb72 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -9,7 +9,7 @@ struct fs_struct { int users; spinlock_t lock; - seqcount_t seq; + seqcount_spinlock_t seq; int umask; int in_exec; struct path root, pwd; -- cgit v1.2.3 From 5c73b9a2b1b4ecc809a914aa64970157b3d8c936 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:29 +0200 Subject: kvm/eventfd: Use sequence counter with associated spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_spinlock_t data type, which allows to associate a spinlock with the sequence counter. This enables lockdep to verify that the spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Acked-by: Paolo Bonzini Link: https://lkml.kernel.org/r/20200720155530.1173732-24-a.darwish@linutronix.de --- include/linux/kvm_irqfd.h | 2 +- virt/kvm/eventfd.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index dc1da020305b..dac047abdba7 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -42,7 +42,7 @@ struct kvm_kernel_irqfd { wait_queue_entry_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; - seqcount_t irq_entry_sc; + seqcount_spinlock_t irq_entry_sc; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index ef7ed916ad4a..d6408bb497dc 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -303,7 +303,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) INIT_LIST_HEAD(&irqfd->list); INIT_WORK(&irqfd->inject, irqfd_inject); INIT_WORK(&irqfd->shutdown, irqfd_shutdown); - seqcount_init(&irqfd->irq_entry_sc); + seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock); f = fdget(args->fd); if (!f.file) { -- cgit v1.2.3 From af5a06b582ec3d7b0160b4faaa65f73d8dcf989f Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 20 Jul 2020 17:55:30 +0200 Subject: hrtimer: Use sequence counter with associated raw spinlock A sequence counter write side critical section must be protected by some form of locking to serialize writers. A plain seqcount_t does not contain the information of which lock must be held when entering a write side critical section. Use the new seqcount_raw_spinlock_t data type, which allows to associate a raw spinlock with the sequence counter. This enables lockdep to verify that the raw spinlock used for writer serialization is held when the write side critical section is entered. If lockdep is disabled this lock association is compiled out and has neither storage size nor runtime overhead. Signed-off-by: Ahmed S. Darwish Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200720155530.1173732-25-a.darwish@linutronix.de --- include/linux/hrtimer.h | 2 +- kernel/time/hrtimer.c | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 15c8ac313678..25993b86ac5c 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -159,7 +159,7 @@ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; unsigned int index; clockid_t clockid; - seqcount_t seq; + seqcount_raw_spinlock_t seq; struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index d89da1c7e005..c4038511d5c9 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -135,7 +135,11 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { * timer->base->cpu_base */ static struct hrtimer_cpu_base migration_cpu_base = { - .clock_base = { { .cpu_base = &migration_cpu_base, }, }, + .clock_base = { { + .cpu_base = &migration_cpu_base, + .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq, + &migration_cpu_base.lock), + }, }, }; #define migration_base migration_cpu_base.clock_base[0] @@ -1998,8 +2002,11 @@ int hrtimers_prepare_cpu(unsigned int cpu) int i; for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - cpu_base->clock_base[i].cpu_base = cpu_base; - timerqueue_init_head(&cpu_base->clock_base[i].active); + struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i]; + + clock_b->cpu_base = cpu_base; + seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock); + timerqueue_init_head(&clock_b->active); } cpu_base->cpu = cpu; -- cgit v1.2.3 From e55687fe5c1e4849e5559a0a49199c9ca3fff36e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 11:56:22 +0200 Subject: seqlock: s/__SEQ_LOCKDEP/__SEQ_LOCK/g __SEQ_LOCKDEP() is an expression gate for the seqcount_LOCKNAME_t::lock member. Rename it to be about the member, not the gate condition. Later (PREEMPT_RT) patches will make the member available for !LOCKDEP configs. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index b48729988325..c689abab06c8 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -133,20 +133,20 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) */ #ifdef CONFIG_LOCKDEP -#define __SEQ_LOCKDEP(expr) expr +#define __SEQ_LOCK(expr) expr #else -#define __SEQ_LOCKDEP(expr) +#define __SEQ_LOCK(expr) #endif #define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ - __SEQ_LOCKDEP(.lock = (assoc_lock)) \ + __SEQ_LOCK(.lock = (assoc_lock)) \ } #define seqcount_locktype_init(s, assoc_lock) \ do { \ seqcount_init(&(s)->seqcount); \ - __SEQ_LOCKDEP((s)->lock = (assoc_lock)); \ + __SEQ_LOCK((s)->lock = (assoc_lock)); \ } while (0) /** @@ -161,7 +161,7 @@ do { \ */ typedef struct seqcount_spinlock { seqcount_t seqcount; - __SEQ_LOCKDEP(spinlock_t *lock); + __SEQ_LOCK(spinlock_t *lock); } seqcount_spinlock_t; /** @@ -192,7 +192,7 @@ typedef struct seqcount_spinlock { */ typedef struct seqcount_raw_spinlock { seqcount_t seqcount; - __SEQ_LOCKDEP(raw_spinlock_t *lock); + __SEQ_LOCK(raw_spinlock_t *lock); } seqcount_raw_spinlock_t; /** @@ -223,7 +223,7 @@ typedef struct seqcount_raw_spinlock { */ typedef struct seqcount_rwlock { seqcount_t seqcount; - __SEQ_LOCKDEP(rwlock_t *lock); + __SEQ_LOCK(rwlock_t *lock); } seqcount_rwlock_t; /** @@ -257,7 +257,7 @@ typedef struct seqcount_rwlock { */ typedef struct seqcount_mutex { seqcount_t seqcount; - __SEQ_LOCKDEP(struct mutex *lock); + __SEQ_LOCK(struct mutex *lock); } seqcount_mutex_t; /** @@ -291,7 +291,7 @@ typedef struct seqcount_mutex { */ typedef struct seqcount_ww_mutex { seqcount_t seqcount; - __SEQ_LOCKDEP(struct ww_mutex *lock); + __SEQ_LOCK(struct ww_mutex *lock); } seqcount_ww_mutex_t; /** @@ -329,7 +329,7 @@ __seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \ static inline void \ __seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \ { \ - __SEQ_LOCKDEP(lockdep_assert_held(lockmember)); \ + __SEQ_LOCK(lockdep_assert_held(lockmember)); \ } /* -- cgit v1.2.3 From a8772dccb2ec7b139db1b3ba782ecb12ed92d7c3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 11:56:49 +0200 Subject: seqlock: Fold seqcount_LOCKNAME_t definition Manual repetition is boring and error prone. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 142 +++++++++++++----------------------------------- 1 file changed, 39 insertions(+), 103 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index c689abab06c8..4b259bb4d4b9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -149,21 +149,6 @@ do { \ __SEQ_LOCK((s)->lock = (assoc_lock)); \ } while (0) -/** - * typedef seqcount_spinlock_t - sequence counter with spinlock associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated spinlock - * - * A plain sequence counter with external writer synchronization by a - * spinlock. The spinlock is associated to the sequence count in the - * static initializer or init function. This enables lockdep to validate - * that the write side critical section is properly serialized. - */ -typedef struct seqcount_spinlock { - seqcount_t seqcount; - __SEQ_LOCK(spinlock_t *lock); -} seqcount_spinlock_t; - /** * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t * @name: Name of the seqcount_spinlock_t instance @@ -180,21 +165,6 @@ typedef struct seqcount_spinlock { #define seqcount_spinlock_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_raw_spinlock_t - sequence count with raw spinlock associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated raw spinlock - * - * A plain sequence counter with external writer synchronization by a - * raw spinlock. The raw spinlock is associated to the sequence count in - * the static initializer or init function. This enables lockdep to - * validate that the write side critical section is properly serialized. - */ -typedef struct seqcount_raw_spinlock { - seqcount_t seqcount; - __SEQ_LOCK(raw_spinlock_t *lock); -} seqcount_raw_spinlock_t; - /** * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t * @name: Name of the seqcount_raw_spinlock_t instance @@ -211,21 +181,6 @@ typedef struct seqcount_raw_spinlock { #define seqcount_raw_spinlock_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_rwlock_t - sequence count with rwlock associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated rwlock - * - * A plain sequence counter with external writer synchronization by a - * rwlock. The rwlock is associated to the sequence count in the static - * initializer or init function. This enables lockdep to validate that - * the write side critical section is properly serialized. - */ -typedef struct seqcount_rwlock { - seqcount_t seqcount; - __SEQ_LOCK(rwlock_t *lock); -} seqcount_rwlock_t; - /** * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t * @name: Name of the seqcount_rwlock_t instance @@ -242,24 +197,6 @@ typedef struct seqcount_rwlock { #define seqcount_rwlock_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_mutex_t - sequence count with mutex associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated mutex - * - * A plain sequence counter with external writer synchronization by a - * mutex. The mutex is associated to the sequence counter in the static - * initializer or init function. This enables lockdep to validate that - * the write side critical section is properly serialized. - * - * The write side API functions write_seqcount_begin()/end() automatically - * disable and enable preemption when used with seqcount_mutex_t. - */ -typedef struct seqcount_mutex { - seqcount_t seqcount; - __SEQ_LOCK(struct mutex *lock); -} seqcount_mutex_t; - /** * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t * @name: Name of the seqcount_mutex_t instance @@ -276,24 +213,6 @@ typedef struct seqcount_mutex { #define seqcount_mutex_init(s, lock) \ seqcount_locktype_init(s, lock) -/** - * typedef seqcount_ww_mutex_t - sequence count with ww_mutex associated - * @seqcount: The real sequence counter - * @lock: Pointer to the associated ww_mutex - * - * A plain sequence counter with external writer synchronization by a - * ww_mutex. The ww_mutex is associated to the sequence counter in the static - * initializer or init function. This enables lockdep to validate that - * the write side critical section is properly serialized. - * - * The write side API functions write_seqcount_begin()/end() automatically - * disable and enable preemption when used with seqcount_ww_mutex_t. - */ -typedef struct seqcount_ww_mutex { - seqcount_t seqcount; - __SEQ_LOCK(struct ww_mutex *lock); -} seqcount_ww_mutex_t; - /** * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t * @name: Name of the seqcount_ww_mutex_t instance @@ -310,30 +229,50 @@ typedef struct seqcount_ww_mutex { #define seqcount_ww_mutex_init(s, lock) \ seqcount_locktype_init(s, lock) -/* - * @preempt: Is the associated write serialization lock preemtpible? +/** + * typedef seqcount_LOCKNAME_t - sequence counter with spinlock associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. */ -#define SEQCOUNT_LOCKTYPE(locktype, preempt, lockmember) \ -static inline seqcount_t * \ -__seqcount_##locktype##_ptr(seqcount_##locktype##_t *s) \ + +/* + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers + * @locktype: actual typename + * @lockname: name + * @preemptible: preemptibility of above locktype + * @lockmember: argument for lockdep_assert_held() + */ +#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \ +typedef struct seqcount_##lockname { \ + seqcount_t seqcount; \ + __SEQ_LOCK(locktype *lock); \ +} seqcount_##lockname##_t; \ + \ +static __always_inline seqcount_t * \ +__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ { \ return &s->seqcount; \ } \ \ -static inline bool \ -__seqcount_##locktype##_preemptible(seqcount_##locktype##_t *s) \ +static __always_inline bool \ +__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \ { \ - return preempt; \ + return preemptible; \ } \ \ -static inline void \ -__seqcount_##locktype##_assert(seqcount_##locktype##_t *s) \ +static __always_inline void \ +__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \ { \ __SEQ_LOCK(lockdep_assert_held(lockmember)); \ } /* - * Similar hooks, but for plain seqcount_t + * __seqprop() for seqcount_t */ static inline seqcount_t *__seqcount_ptr(seqcount_t *s) @@ -351,17 +290,14 @@ static inline void __seqcount_assert(seqcount_t *s) lockdep_assert_preemption_disabled(); } -/* - * @s: Pointer to seqcount_locktype_t, generated hooks first parameter. - */ -SEQCOUNT_LOCKTYPE(raw_spinlock, false, s->lock) -SEQCOUNT_LOCKTYPE(spinlock, false, s->lock) -SEQCOUNT_LOCKTYPE(rwlock, false, s->lock) -SEQCOUNT_LOCKTYPE(mutex, true, s->lock) -SEQCOUNT_LOCKTYPE(ww_mutex, true, &s->lock->base) - -#define __seqprop_case(s, locktype, prop) \ - seqcount_##locktype##_t: __seqcount_##locktype##_##prop((void *)(s)) +SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock) +SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock) +SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) + +#define __seqprop_case(s, lockname, prop) \ + seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) #define __seqprop(s, prop) _Generic(*(s), \ seqcount_t: __seqcount_##prop((void *)(s)), \ -- cgit v1.2.3 From e4e9ab3f9f91ad3b88d12363f890e8ad9b59b645 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 12:00:53 +0200 Subject: seqlock: Fold seqcount_LOCKNAME_init() definition Manual repetition is boring and error prone. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 61 ++++++++++++------------------------------------- 1 file changed, 14 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 4b259bb4d4b9..501ff47d1e8e 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -143,12 +143,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) __SEQ_LOCK(.lock = (assoc_lock)) \ } -#define seqcount_locktype_init(s, assoc_lock) \ -do { \ - seqcount_init(&(s)->seqcount); \ - __SEQ_LOCK((s)->lock = (assoc_lock)); \ -} while (0) - /** * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t * @name: Name of the seqcount_spinlock_t instance @@ -157,14 +151,6 @@ do { \ #define SEQCNT_SPINLOCK_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_spinlock_init - runtime initializer for seqcount_spinlock_t - * @s: Pointer to the seqcount_spinlock_t instance - * @lock: Pointer to the associated spinlock - */ -#define seqcount_spinlock_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t * @name: Name of the seqcount_raw_spinlock_t instance @@ -173,14 +159,6 @@ do { \ #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_raw_spinlock_init - runtime initializer for seqcount_raw_spinlock_t - * @s: Pointer to the seqcount_raw_spinlock_t instance - * @lock: Pointer to the associated raw_spinlock - */ -#define seqcount_raw_spinlock_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t * @name: Name of the seqcount_rwlock_t instance @@ -189,14 +167,6 @@ do { \ #define SEQCNT_RWLOCK_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_rwlock_init - runtime initializer for seqcount_rwlock_t - * @s: Pointer to the seqcount_rwlock_t instance - * @lock: Pointer to the associated rwlock - */ -#define seqcount_rwlock_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t * @name: Name of the seqcount_mutex_t instance @@ -205,14 +175,6 @@ do { \ #define SEQCNT_MUTEX_ZERO(name, lock) \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) -/** - * seqcount_mutex_init - runtime initializer for seqcount_mutex_t - * @s: Pointer to the seqcount_mutex_t instance - * @lock: Pointer to the associated mutex - */ -#define seqcount_mutex_init(s, lock) \ - seqcount_locktype_init(s, lock) - /** * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t * @name: Name of the seqcount_ww_mutex_t instance @@ -222,15 +184,7 @@ do { \ SEQCOUNT_LOCKTYPE_ZERO(name, lock) /** - * seqcount_ww_mutex_init - runtime initializer for seqcount_ww_mutex_t - * @s: Pointer to the seqcount_ww_mutex_t instance - * @lock: Pointer to the associated ww_mutex - */ -#define seqcount_ww_mutex_init(s, lock) \ - seqcount_locktype_init(s, lock) - -/** - * typedef seqcount_LOCKNAME_t - sequence counter with spinlock associated + * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated * @seqcount: The real sequence counter * @lock: Pointer to the associated spinlock * @@ -240,6 +194,12 @@ do { \ * that the write side critical section is properly serialized. */ +/** + * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t + * @s: Pointer to the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + /* * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers * @locktype: actual typename @@ -253,6 +213,13 @@ typedef struct seqcount_##lockname { \ __SEQ_LOCK(locktype *lock); \ } seqcount_##lockname##_t; \ \ +static __always_inline void \ +seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \ +{ \ + seqcount_init(&s->seqcount); \ + __SEQ_LOCK(s->lock = lock); \ +} \ + \ static __always_inline seqcount_t * \ __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ { \ -- cgit v1.2.3 From 0efc94c5d15c3da0a69543d86ad2180f39256ed6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 12:03:13 +0200 Subject: seqcount: Compress SEQCNT_LOCKNAME_ZERO() Less is more. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 63 ++++++++++++++----------------------------------- 1 file changed, 18 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 501ff47d1e8e..251dcd6f5cd8 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -138,51 +138,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define __SEQ_LOCK(expr) #endif -#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ - .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ - __SEQ_LOCK(.lock = (assoc_lock)) \ -} - -/** - * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t - * @name: Name of the seqcount_spinlock_t instance - * @lock: Pointer to the associated spinlock - */ -#define SEQCNT_SPINLOCK_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t - * @name: Name of the seqcount_raw_spinlock_t instance - * @lock: Pointer to the associated raw_spinlock - */ -#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t - * @name: Name of the seqcount_rwlock_t instance - * @lock: Pointer to the associated rwlock - */ -#define SEQCNT_RWLOCK_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t - * @name: Name of the seqcount_mutex_t instance - * @lock: Pointer to the associated mutex - */ -#define SEQCNT_MUTEX_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - -/** - * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t - * @name: Name of the seqcount_ww_mutex_t instance - * @lock: Pointer to the associated ww_mutex - */ -#define SEQCNT_WW_MUTEX_ZERO(name, lock) \ - SEQCOUNT_LOCKTYPE_ZERO(name, lock) - /** * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated * @seqcount: The real sequence counter @@ -263,6 +218,24 @@ SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock) SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock) SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) +/** + * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t + * @name: Name of the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ + .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + __SEQ_LOCK(.lock = (assoc_lock)) \ +} + +#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + + #define __seqprop_case(s, lockname, prop) \ seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) -- cgit v1.2.3 From b5e6a027bd327daa679ca55182a920659e2cbb90 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jul 2020 12:11:49 +0200 Subject: seqcount: More consistent seqprop names Attempt uniformity and brevity. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/seqlock.h | 52 ++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 251dcd6f5cd8..a076f783aa36 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -247,9 +247,9 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) __seqprop_case((s), mutex, prop), \ __seqprop_case((s), ww_mutex, prop)) -#define __to_seqcount_t(s) __seqprop(s, ptr) -#define __associated_lock_exists_and_is_preemptible(s) __seqprop(s, preemptible) -#define __assert_write_section_is_protected(s) __seqprop(s, assert) +#define __seqcount_ptr(s) __seqprop(s, ptr) +#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) +#define __seqcount_assert_lock_held(s) __seqprop(s, assert) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier @@ -266,7 +266,7 @@ SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) * Return: count to be passed to read_seqcount_retry() */ #define __read_seqcount_begin(s) \ - __read_seqcount_t_begin(__to_seqcount_t(s)) + __read_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned __read_seqcount_t_begin(const seqcount_t *s) { @@ -289,7 +289,7 @@ repeat: * Return: count to be passed to read_seqcount_retry() */ #define raw_read_seqcount_begin(s) \ - raw_read_seqcount_t_begin(__to_seqcount_t(s)) + raw_read_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) { @@ -305,7 +305,7 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) * Return: count to be passed to read_seqcount_retry() */ #define read_seqcount_begin(s) \ - read_seqcount_t_begin(__to_seqcount_t(s)) + read_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned read_seqcount_t_begin(const seqcount_t *s) { @@ -325,7 +325,7 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s) * Return: count to be passed to read_seqcount_retry() */ #define raw_read_seqcount(s) \ - raw_read_seqcount_t(__to_seqcount_t(s)) + raw_read_seqcount_t(__seqcount_ptr(s)) static inline unsigned raw_read_seqcount_t(const seqcount_t *s) { @@ -353,7 +353,7 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s) * Return: count to be passed to read_seqcount_retry() */ #define raw_seqcount_begin(s) \ - raw_seqcount_t_begin(__to_seqcount_t(s)) + raw_seqcount_t_begin(__seqcount_ptr(s)) static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) { @@ -380,7 +380,7 @@ static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ - __read_seqcount_t_retry(__to_seqcount_t(s), start) + __read_seqcount_t_retry(__seqcount_ptr(s), start) static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) { @@ -400,7 +400,7 @@ static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ - read_seqcount_t_retry(__to_seqcount_t(s), start) + read_seqcount_t_retry(__seqcount_ptr(s), start) static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) { @@ -414,10 +414,10 @@ static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) */ #define raw_write_seqcount_begin(s) \ do { \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_disable(); \ \ - raw_write_seqcount_t_begin(__to_seqcount_t(s)); \ + raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ } while (0) static inline void raw_write_seqcount_t_begin(seqcount_t *s) @@ -433,9 +433,9 @@ static inline void raw_write_seqcount_t_begin(seqcount_t *s) */ #define raw_write_seqcount_end(s) \ do { \ - raw_write_seqcount_t_end(__to_seqcount_t(s)); \ + raw_write_seqcount_t_end(__seqcount_ptr(s)); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_enable(); \ } while (0) @@ -456,12 +456,12 @@ static inline void raw_write_seqcount_t_end(seqcount_t *s) */ #define write_seqcount_begin_nested(s, subclass) \ do { \ - __assert_write_section_is_protected(s); \ + __seqcount_assert_lock_held(s); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin_nested(__to_seqcount_t(s), subclass); \ + write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ } while (0) static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) @@ -483,12 +483,12 @@ static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) */ #define write_seqcount_begin(s) \ do { \ - __assert_write_section_is_protected(s); \ + __seqcount_assert_lock_held(s); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_disable(); \ \ - write_seqcount_t_begin(__to_seqcount_t(s)); \ + write_seqcount_t_begin(__seqcount_ptr(s)); \ } while (0) static inline void write_seqcount_t_begin(seqcount_t *s) @@ -504,9 +504,9 @@ static inline void write_seqcount_t_begin(seqcount_t *s) */ #define write_seqcount_end(s) \ do { \ - write_seqcount_t_end(__to_seqcount_t(s)); \ + write_seqcount_t_end(__seqcount_ptr(s)); \ \ - if (__associated_lock_exists_and_is_preemptible(s)) \ + if (__seqcount_lock_preemptible(s)) \ preempt_enable(); \ } while (0) @@ -558,7 +558,7 @@ static inline void write_seqcount_t_end(seqcount_t *s) * } */ #define raw_write_seqcount_barrier(s) \ - raw_write_seqcount_t_barrier(__to_seqcount_t(s)) + raw_write_seqcount_t_barrier(__seqcount_ptr(s)) static inline void raw_write_seqcount_t_barrier(seqcount_t *s) { @@ -578,7 +578,7 @@ static inline void raw_write_seqcount_t_barrier(seqcount_t *s) * will complete successfully and see data older than this. */ #define write_seqcount_invalidate(s) \ - write_seqcount_t_invalidate(__to_seqcount_t(s)) + write_seqcount_t_invalidate(__seqcount_ptr(s)) static inline void write_seqcount_t_invalidate(seqcount_t *s) { @@ -604,7 +604,7 @@ static inline void write_seqcount_t_invalidate(seqcount_t *s) * checked with read_seqcount_retry(). */ #define raw_read_seqcount_latch(s) \ - raw_read_seqcount_t_latch(__to_seqcount_t(s)) + raw_read_seqcount_t_latch(__seqcount_ptr(s)) static inline int raw_read_seqcount_t_latch(seqcount_t *s) { @@ -695,7 +695,7 @@ static inline int raw_read_seqcount_t_latch(seqcount_t *s) * patterns to manage the lifetimes of the objects within. */ #define raw_write_seqcount_latch(s) \ - raw_write_seqcount_t_latch(__to_seqcount_t(s)) + raw_write_seqcount_t_latch(__seqcount_ptr(s)) static inline void raw_write_seqcount_t_latch(seqcount_t *s) { -- cgit v1.2.3 From f369bc3f9096f5d355e8b80540bc30ac9a602912 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 1 Aug 2020 08:17:13 +0200 Subject: vgaarb: mark vga_tryget static This symbols isn't used anywhere outside of vgaarb.c. Signed-off-by: Christoph Hellwig Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200801061713.307434-1-hch@lst.de --- drivers/gpu/vga/vgaarb.c | 3 +-- include/linux/vgaarb.h | 6 ------ 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index f2f3ef8af271..5180c5687ee5 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -529,7 +529,7 @@ EXPORT_SYMBOL(vga_get); * * 0 on success, negative error code on failure. */ -int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) +static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { struct vga_device *vgadev; unsigned long flags; @@ -554,7 +554,6 @@ bail: spin_unlock_irqrestore(&vga_lock, flags); return rc; } -EXPORT_SYMBOL(vga_tryget); /** * vga_put - release lock on legacy VGA resources diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 553b34c8b5f7..977caf96c8d2 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -109,12 +109,6 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, return vga_get(pdev, rsrc, 0); } -#if defined(CONFIG_VGA_ARB) -extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); -#else -static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } -#endif - #if defined(CONFIG_VGA_ARB) extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); #else -- cgit v1.2.3 From 7ef5264de773279b9f23b6cc8afb5addb30e970b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:20 +0200 Subject: modules: mark ref_module static ref_module isn't used anywhere outside of module.c. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 1 - kernel/module.c | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 2e6670860d27..f1fdbeef2153 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -657,7 +657,6 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while (0) #endif /* CONFIG_MODULE_UNLOAD */ -int ref_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ diff --git a/kernel/module.c b/kernel/module.c index e8a198588f26..baae0e83d630 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -869,7 +869,7 @@ static int add_module_usage(struct module *a, struct module *b) } /* Module a uses b: caller needs module_mutex() */ -int ref_module(struct module *a, struct module *b) +static int ref_module(struct module *a, struct module *b) { int err; @@ -888,7 +888,6 @@ int ref_module(struct module *a, struct module *b) } return 0; } -EXPORT_SYMBOL_GPL(ref_module); /* Clear the unload stuff of the module. */ static void module_unload_free(struct module *mod) @@ -1169,11 +1168,10 @@ static inline void module_unload_free(struct module *mod) { } -int ref_module(struct module *a, struct module *b) +static int ref_module(struct module *a, struct module *b) { return strong_try_module_get(b); } -EXPORT_SYMBOL_GPL(ref_module); static inline int module_unload_init(struct module *mod) { -- cgit v1.2.3 From 773110470e2fa3839523384ae014f8a723c4d178 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:21 +0200 Subject: modules: mark find_symbol static find_symbol is only used in module.c. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 11 ----------- kernel/module.c | 3 +-- 2 files changed, 1 insertion(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index f1fdbeef2153..90bdc362be36 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -590,17 +590,6 @@ struct symsearch { bool unused; }; -/* - * Search for an exported symbol by name. - * - * Must be called with module_mutex held or preemption disabled. - */ -const struct kernel_symbol *find_symbol(const char *name, - struct module **owner, - const s32 **crc, - bool gplok, - bool warn); - /* * Walk the exported symbol table * diff --git a/kernel/module.c b/kernel/module.c index baae0e83d630..0f95fb4b3e37 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -585,7 +585,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, /* Find an exported symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ -const struct kernel_symbol *find_symbol(const char *name, +static const struct kernel_symbol *find_symbol(const char *name, struct module **owner, const s32 **crc, bool gplok, @@ -608,7 +608,6 @@ const struct kernel_symbol *find_symbol(const char *name, pr_debug("Failed to find symbol %s\n", name); return NULL; } -EXPORT_SYMBOL_GPL(find_symbol); /* * Search for module by name: must hold module_mutex (or preempt disabled -- cgit v1.2.3 From a54e04914c211b5678602a46b3ede5d82ec1327d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:22 +0200 Subject: modules: mark each_symbol_section static each_symbol_section is only used inside of module.c. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 9 --------- kernel/module.c | 3 +-- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 90bdc362be36..b79219eed83c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -590,15 +590,6 @@ struct symsearch { bool unused; }; -/* - * Walk the exported symbol table - * - * Must be called with module_mutex held or preemption disabled. - */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), void *data); - /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, diff --git a/kernel/module.c b/kernel/module.c index 0f95fb4b3e37..c2a099a27b68 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -422,7 +422,7 @@ static bool each_symbol_in_section(const struct symsearch *arr, } /* Returns true as soon as fn returns true, otherwise false. */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, +static bool each_symbol_section(bool (*fn)(const struct symsearch *arr, struct module *owner, void *data), void *data) @@ -484,7 +484,6 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, } return false; } -EXPORT_SYMBOL_GPL(each_symbol_section); struct find_symbol_arg { /* Input */ -- cgit v1.2.3 From cd8732cdcc37d7077c4fa2c966b748c0662b607e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:25 +0200 Subject: modules: rename the licence field in struct symsearch to license Use the same spelling variant as the rest of the file. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 2 +- kernel/module.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index b79219eed83c..be04ba2f881d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -586,7 +586,7 @@ struct symsearch { NOT_GPL_ONLY, GPL_ONLY, WILL_BE_GPL_ONLY, - } licence; + } license; bool unused; }; diff --git a/kernel/module.c b/kernel/module.c index e85d06158fbc..62d817a0dca8 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -504,9 +504,9 @@ static bool check_exported_symbol(const struct symsearch *syms, struct find_symbol_arg *fsa = data; if (!fsa->gplok) { - if (syms->licence == GPL_ONLY) + if (syms->license == GPL_ONLY) return false; - if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { + if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) { pr_warn("Symbol %s is being used by a non-GPL module, " "which will not be allowed in the future\n", fsa->name); -- cgit v1.2.3 From ef1dac6021cc8ec5de02ce31722bf26ac4ed5523 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jul 2020 08:10:26 +0200 Subject: modules: return licensing information from find_symbol Report the GPLONLY status through a new argument. Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 2 +- kernel/module.c | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index be04ba2f881d..30b0f5fcdb3c 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -582,7 +582,7 @@ struct module *find_module(const char *name); struct symsearch { const struct kernel_symbol *start, *stop; const s32 *crcs; - enum { + enum mod_license { NOT_GPL_ONLY, GPL_ONLY, WILL_BE_GPL_ONLY, diff --git a/kernel/module.c b/kernel/module.c index 62d817a0dca8..656f5ff27088 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -495,6 +495,7 @@ struct find_symbol_arg { struct module *owner; const s32 *crc; const struct kernel_symbol *sym; + enum mod_license license; }; static bool check_exported_symbol(const struct symsearch *syms, @@ -528,6 +529,7 @@ static bool check_exported_symbol(const struct symsearch *syms, fsa->owner = owner; fsa->crc = symversion(syms->crcs, symnum); fsa->sym = &syms->start[symnum]; + fsa->license = syms->license; return true; } @@ -587,6 +589,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, static const struct kernel_symbol *find_symbol(const char *name, struct module **owner, const s32 **crc, + enum mod_license *license, bool gplok, bool warn) { @@ -601,6 +604,8 @@ static const struct kernel_symbol *find_symbol(const char *name, *owner = fsa.owner; if (crc) *crc = fsa.crc; + if (license) + *license = fsa.license; return fsa.sym; } @@ -1074,7 +1079,7 @@ void __symbol_put(const char *symbol) struct module *owner; preempt_disable(); - if (!find_symbol(symbol, &owner, NULL, true, false)) + if (!find_symbol(symbol, &owner, NULL, NULL, true, false)) BUG(); module_put(owner); preempt_enable(); @@ -1352,7 +1357,7 @@ static inline int check_modstruct_version(const struct load_info *info, * locking is necessary -- use preempt_disable() to placate lockdep. */ preempt_disable(); - if (!find_symbol("module_layout", NULL, &crc, true, false)) { + if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) { preempt_enable(); BUG(); } @@ -1436,6 +1441,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, struct module *owner; const struct kernel_symbol *sym; const s32 *crc; + enum mod_license license; int err; /* @@ -1445,7 +1451,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, */ sched_annotate_sleep(); mutex_lock(&module_mutex); - sym = find_symbol(name, &owner, &crc, + sym = find_symbol(name, &owner, &crc, &license, !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); if (!sym) goto unlock; @@ -2213,7 +2219,7 @@ void *__symbol_get(const char *symbol) const struct kernel_symbol *sym; preempt_disable(); - sym = find_symbol(symbol, &owner, NULL, true, true); + sym = find_symbol(symbol, &owner, NULL, NULL, true, true); if (sym && strong_try_module_get(owner)) sym = NULL; preempt_enable(); @@ -2249,7 +2255,7 @@ static int verify_exported_symbols(struct module *mod) for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { if (find_symbol(kernel_symbol_name(s), &owner, NULL, - true, false)) { + NULL, true, false)) { pr_err("%s: exports duplicate symbol %s" " (owned by %s)\n", mod->name, kernel_symbol_name(s), -- cgit v1.2.3 From 614a895fc69497d99cc02c076fa712c75194eab3 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Thu, 9 Jul 2020 20:07:33 +0200 Subject: mtd: hyperbus: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Richard Weinberger --- drivers/mtd/hyperbus/hbmc-am654.c | 2 +- drivers/mtd/hyperbus/hyperbus-core.c | 2 +- include/linux/mtd/hyperbus.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c index f350a0809f88..e0e33f6bf513 100644 --- a/drivers/mtd/hyperbus/hbmc-am654.c +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ // Author: Vignesh Raghavendra #include diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c index 32685e8dd278..2f9fc4e17d53 100644 --- a/drivers/mtd/hyperbus/hyperbus-core.c +++ b/drivers/mtd/hyperbus/hyperbus-core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ // Author: Vignesh Raghavendra #include diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h index 2dfe65964f6e..2129f7d3b6eb 100644 --- a/include/linux/mtd/hyperbus.h +++ b/include/linux/mtd/hyperbus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MTD_HYPERBUS_H__ -- cgit v1.2.3 From 0c84b7fc973f9220ef8732c430ccc7c92d083184 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:54 -0700 Subject: MTD: pfow.h: drop a duplicated word Drop the repeated word "can" in a comment. Signed-off-by: Randy Dunlap Cc: Miquel Raynal Cc: Richard Weinberger Cc: Vignesh Raghavendra Cc: linux-mtd@lists.infradead.org Signed-off-by: Richard Weinberger --- include/linux/mtd/pfow.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 122f3439e1af..6166e7c60869 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -19,7 +19,7 @@ /* Identification info for LPDDR chip */ #define PFOW_MANUFACTURER_ID 0x0020 #define PFOW_DEVICE_ID 0x0022 -/* Address in PFOW where prog buffer can can be found */ +/* Address in PFOW where prog buffer can be found */ #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 /* Size of program buffer in words */ #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 -- cgit v1.2.3 From 042f649810f61c4a834f3d6d866c567f7f6b3f8c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 1 Jul 2020 11:54:43 -0400 Subject: libceph: just have osd_req_op_init() return a pointer The caller can just ignore the return. No need for this wrapper that just casts the other function to void. [ idryomov: argument alignment ] Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- include/linux/ceph/osd_client.h | 2 +- net/ceph/osd_client.c | 39 ++++++++++++++++----------------------- 2 files changed, 17 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index c60b59e9291b..83fa08a06507 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -404,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); &__oreq->r_ops[__whch].typ.fld; \ }) -extern void osd_req_op_init(struct ceph_osd_request *osd_req, +struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index db6abb5a5511..e4fbcad6e7d8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -525,7 +525,7 @@ EXPORT_SYMBOL(ceph_osdc_put_request); static void request_init(struct ceph_osd_request *req) { - /* req only, each op is zeroed in _osd_req_op_init() */ + /* req only, each op is zeroed in osd_req_op_init() */ memset(req, 0, sizeof(*req)); kref_init(&req->r_kref); @@ -746,8 +746,8 @@ EXPORT_SYMBOL(ceph_osdc_alloc_messages); * other information associated with them. It also serves as a * common init routine for all the other init functions, below. */ -static struct ceph_osd_req_op * -_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, +struct ceph_osd_req_op * +osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags) { struct ceph_osd_req_op *op; @@ -762,12 +762,6 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, return op; } - -void osd_req_op_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, u32 flags) -{ - (void)_osd_req_op_init(osd_req, which, opcode, flags); -} EXPORT_SYMBOL(osd_req_op_init); void osd_req_op_extent_init(struct ceph_osd_request *osd_req, @@ -775,8 +769,8 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - opcode, 0); + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, + opcode, 0); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && @@ -822,7 +816,7 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, BUG_ON(which + 1 >= osd_req->r_num_ops); prev_op = &osd_req->r_ops[which]; - op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); + op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); /* dup previous one */ op->indata_len = prev_op->indata_len; op->outdata_len = prev_op->outdata_len; @@ -845,7 +839,7 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, size_t size; int ret; - op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); + op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) @@ -883,8 +877,8 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - opcode, 0); + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, + opcode, 0); struct ceph_pagelist *pagelist; size_t payload_len; int ret; @@ -928,7 +922,7 @@ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, { struct ceph_osd_req_op *op; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); op->watch.cookie = cookie; op->watch.op = watch_opcode; op->watch.gen = 0; @@ -943,10 +937,9 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, u64 expected_write_size, u32 flags) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - CEPH_OSD_OP_SETALLOCHINT, - 0); + struct ceph_osd_req_op *op; + op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0); op->alloc_hint.expected_object_size = expected_object_size; op->alloc_hint.expected_write_size = expected_write_size; op->alloc_hint.flags = flags; @@ -4799,7 +4792,7 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, struct ceph_pagelist *pl; int ret; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); pl = ceph_pagelist_alloc(GFP_NOIO); if (!pl) @@ -4868,7 +4861,7 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, struct ceph_pagelist *pl; int ret; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); op->notify.cookie = cookie; pl = ceph_pagelist_alloc(GFP_NOIO); @@ -5332,8 +5325,8 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req, if (IS_ERR(pages)) return PTR_ERR(pages); - op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, - dst_fadvise_flags); + op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, + dst_fadvise_flags); op->copy_from.snapid = src_snapid; op->copy_from.src_version = src_version; op->copy_from.flags = copy_from_flags; -- cgit v1.2.3 From 94f17c00d6687993101372f996cf6690ec9adf83 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Wed, 8 Jul 2020 08:53:28 +0200 Subject: libceph: replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. [ idryomov: Do the same for the CRUSH paper and replace ceph.newdream.net with ceph.io. ] Signed-off-by: Alexander A. Klimov Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/Kconfig | 2 +- include/linux/crush/crush.h | 2 +- net/ceph/Kconfig | 2 +- net/ceph/ceph_hash.c | 2 +- net/ceph/crush/hash.c | 2 +- net/ceph/crush/mapper.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index cf235f6eacf9..471e40156065 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -13,7 +13,7 @@ config CEPH_FS scalable file system designed to provide high performance, reliable access to petabytes of storage. - More information at http://ceph.newdream.net/. + More information at https://ceph.io/. If unsure, say N. diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 33c16f2de7f6..2f811baf78d2 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -17,7 +17,7 @@ * The algorithm was originally described in detail in this paper * (although the algorithm has evolved somewhat since then): * - * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf * * LGPL2 */ diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig index d7bec7adc267..f36f9a3a4e20 100644 --- a/net/ceph/Kconfig +++ b/net/ceph/Kconfig @@ -13,7 +13,7 @@ config CEPH_LIB common functionality to both the Ceph filesystem and to the rados block device (rbd). - More information at http://ceph.newdream.net/. + More information at https://ceph.io/. If unsure, say N. diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 9a5850f264ed..81e1e006c540 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -4,7 +4,7 @@ /* * Robert Jenkin's hash function. - * http://burtleburtle.net/bob/hash/evahash.html + * https://burtleburtle.net/bob/hash/evahash.html * This is in the public domain. */ #define mix(a, b, c) \ diff --git a/net/ceph/crush/hash.c b/net/ceph/crush/hash.c index e5cc603cdb17..fe79f6d2d0db 100644 --- a/net/ceph/crush/hash.c +++ b/net/ceph/crush/hash.c @@ -7,7 +7,7 @@ /* * Robert Jenkins' function for mixing 32-bit values - * http://burtleburtle.net/bob/hash/evahash.html + * https://burtleburtle.net/bob/hash/evahash.html * a, b = random bits, c = input and output */ #define crush_hashmix(a, b, c) do { \ diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 3f323ed9df52..07e5614eb3f1 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -298,7 +298,7 @@ static __u64 crush_ln(unsigned int xin) * * for reference, see: * - * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables + * https://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables * */ -- cgit v1.2.3 From 18f473b384a64cef69f166a3e2b73d3d2eca82c6 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 16 Jul 2020 10:05:57 -0400 Subject: ceph: periodically send perf metrics to MDSes This will send the caps/read/write/metadata metrics to any available MDS once per second, which will be the same as the userland client. It will skip the MDS sessions which don't support the metric collection, as the MDSs will close socket connections when they get an unknown type message. We can disable the metric sending via the disable_send_metrics module parameter. [ jlayton: fix up endianness bug in ceph_mdsc_send_metrics() ] URL: https://tracker.ceph.com/issues/43215 Signed-off-by: Xiubo Li Signed-off-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/mds_client.c | 3 + fs/ceph/mds_client.h | 4 +- fs/ceph/metric.c | 148 +++++++++++++++++++++++++++++++++++++++++++ fs/ceph/metric.h | 77 ++++++++++++++++++++++ fs/ceph/super.c | 42 ++++++++++++ fs/ceph/super.h | 2 + include/linux/ceph/ceph_fs.h | 1 + 7 files changed, 276 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ef8a1179171b..d6cd2e4f0bc8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3334,6 +3334,8 @@ static void handle_session(struct ceph_mds_session *session, session->s_state = CEPH_MDS_SESSION_OPEN; session->s_features = features; renewed_caps(mdsc, session, 0); + if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features)) + metric_schedule_delayed(&mdsc->metric); wake = 1; if (mdsc->stopping) __close_session(mdsc, session); @@ -4725,6 +4727,7 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc) ceph_metric_destroy(&mdsc->metric); + flush_delayed_work(&mdsc->metric.delayed_work); fsc->mdsc = NULL; kfree(mdsc); dout("mdsc_destroy %p done\n", mdsc); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 6147ff0a1cdf..bc9e95937d7c 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -28,8 +28,9 @@ enum ceph_feature_type { CEPHFS_FEATURE_LAZY_CAP_WANTED, CEPHFS_FEATURE_MULTI_RECONNECT, CEPHFS_FEATURE_DELEG_INO, + CEPHFS_FEATURE_METRIC_COLLECT, - CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_DELEG_INO, + CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT, }; /* @@ -43,6 +44,7 @@ enum ceph_feature_type { CEPHFS_FEATURE_LAZY_CAP_WANTED, \ CEPHFS_FEATURE_MULTI_RECONNECT, \ CEPHFS_FEATURE_DELEG_INO, \ + CEPHFS_FEATURE_METRIC_COLLECT, \ \ CEPHFS_FEATURE_MAX, \ } diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c index 269eacbd2a15..2466b261fba2 100644 --- a/fs/ceph/metric.c +++ b/fs/ceph/metric.c @@ -1,10 +1,150 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include #include #include #include #include "metric.h" +#include "mds_client.h" + +static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, + struct ceph_mds_session *s) +{ + struct ceph_metric_head *head; + struct ceph_metric_cap *cap; + struct ceph_metric_read_latency *read; + struct ceph_metric_write_latency *write; + struct ceph_metric_metadata_latency *meta; + struct ceph_client_metric *m = &mdsc->metric; + u64 nr_caps = atomic64_read(&m->total_caps); + struct ceph_msg *msg; + struct timespec64 ts; + s64 sum; + s32 items = 0; + s32 len; + + len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write) + + sizeof(*meta); + + msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true); + if (!msg) { + pr_err("send metrics to mds%d, failed to allocate message\n", + s->s_mds); + return false; + } + + head = msg->front.iov_base; + + /* encode the cap metric */ + cap = (struct ceph_metric_cap *)(head + 1); + cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO); + cap->ver = 1; + cap->compat = 1; + cap->data_len = cpu_to_le32(sizeof(*cap) - 10); + cap->hit = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_hit)); + cap->mis = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_mis)); + cap->total = cpu_to_le64(nr_caps); + items++; + + /* encode the read latency metric */ + read = (struct ceph_metric_read_latency *)(cap + 1); + read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY); + read->ver = 1; + read->compat = 1; + read->data_len = cpu_to_le32(sizeof(*read) - 10); + sum = m->read_latency_sum; + jiffies_to_timespec64(sum, &ts); + read->sec = cpu_to_le32(ts.tv_sec); + read->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + /* encode the write latency metric */ + write = (struct ceph_metric_write_latency *)(read + 1); + write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY); + write->ver = 1; + write->compat = 1; + write->data_len = cpu_to_le32(sizeof(*write) - 10); + sum = m->write_latency_sum; + jiffies_to_timespec64(sum, &ts); + write->sec = cpu_to_le32(ts.tv_sec); + write->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + /* encode the metadata latency metric */ + meta = (struct ceph_metric_metadata_latency *)(write + 1); + meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY); + meta->ver = 1; + meta->compat = 1; + meta->data_len = cpu_to_le32(sizeof(*meta) - 10); + sum = m->metadata_latency_sum; + jiffies_to_timespec64(sum, &ts); + meta->sec = cpu_to_le32(ts.tv_sec); + meta->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + put_unaligned_le32(items, &head->num); + msg->front.iov_len = len; + msg->hdr.version = cpu_to_le16(1); + msg->hdr.compat_version = cpu_to_le16(1); + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + dout("client%llu send metrics to mds%d\n", + ceph_client_gid(mdsc->fsc->client), s->s_mds); + ceph_con_send(&s->s_con, msg); + + return true; +} + + +static void metric_get_session(struct ceph_mds_client *mdsc) +{ + struct ceph_mds_session *s; + int i; + + mutex_lock(&mdsc->mutex); + for (i = 0; i < mdsc->max_sessions; i++) { + s = __ceph_lookup_mds_session(mdsc, i); + if (!s) + continue; + + /* + * Skip it if MDS doesn't support the metric collection, + * or the MDS will close the session's socket connection + * directly when it get this message. + */ + if (check_session_state(s) && + test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) { + mdsc->metric.session = s; + break; + } + + ceph_put_mds_session(s); + } + mutex_unlock(&mdsc->mutex); +} + +static void metric_delayed_work(struct work_struct *work) +{ + struct ceph_client_metric *m = + container_of(work, struct ceph_client_metric, delayed_work.work); + struct ceph_mds_client *mdsc = + container_of(m, struct ceph_mds_client, metric); + + if (mdsc->stopping) + return; + + if (!m->session || !check_session_state(m->session)) { + if (m->session) { + ceph_put_mds_session(m->session); + m->session = NULL; + } + metric_get_session(mdsc); + } + if (m->session) { + ceph_mdsc_send_metrics(mdsc, m->session); + metric_schedule_delayed(m); + } +} int ceph_metric_init(struct ceph_client_metric *m) { @@ -52,6 +192,9 @@ int ceph_metric_init(struct ceph_client_metric *m) m->total_metadatas = 0; m->metadata_latency_sum = 0; + m->session = NULL; + INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work); + return 0; err_i_caps_mis: @@ -73,6 +216,11 @@ void ceph_metric_destroy(struct ceph_client_metric *m) percpu_counter_destroy(&m->i_caps_hit); percpu_counter_destroy(&m->d_lease_mis); percpu_counter_destroy(&m->d_lease_hit); + + cancel_delayed_work_sync(&m->delayed_work); + + if (m->session) + ceph_put_mds_session(m->session); } static inline void __update_latency(ktime_t *totalp, ktime_t *lsump, diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h index 23a3373d5a3d..fe5d07d2e63a 100644 --- a/fs/ceph/metric.h +++ b/fs/ceph/metric.h @@ -6,6 +6,71 @@ #include #include +extern bool disable_send_metrics; + +enum ceph_metric_type { + CLIENT_METRIC_TYPE_CAP_INFO, + CLIENT_METRIC_TYPE_READ_LATENCY, + CLIENT_METRIC_TYPE_WRITE_LATENCY, + CLIENT_METRIC_TYPE_METADATA_LATENCY, + CLIENT_METRIC_TYPE_DENTRY_LEASE, + + CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_DENTRY_LEASE, +}; + +/* metric caps header */ +struct ceph_metric_cap { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(hit + mis + total) */ + __le64 hit; + __le64 mis; + __le64 total; +} __packed; + +/* metric read latency header */ +struct ceph_metric_read_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +/* metric write latency header */ +struct ceph_metric_write_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +/* metric metadata latency header */ +struct ceph_metric_metadata_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +struct ceph_metric_head { + __le32 num; /* the number of metrics that will be sent */ +} __packed; + /* This is the global metrics */ struct ceph_client_metric { atomic64_t total_dentries; @@ -36,8 +101,20 @@ struct ceph_client_metric { ktime_t metadata_latency_sq_sum; ktime_t metadata_latency_min; ktime_t metadata_latency_max; + + struct ceph_mds_session *session; + struct delayed_work delayed_work; /* delayed work */ }; +static inline void metric_schedule_delayed(struct ceph_client_metric *m) +{ + if (disable_send_metrics) + return; + + /* per second */ + schedule_delayed_work(&m->delayed_work, round_jiffies_relative(HZ)); +} + extern int ceph_metric_init(struct ceph_client_metric *m); extern void ceph_metric_destroy(struct ceph_client_metric *m); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c9784eb1159a..933f5df5da7d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -27,6 +27,9 @@ #include #include +static DEFINE_SPINLOCK(ceph_fsc_lock); +static LIST_HEAD(ceph_fsc_list); + /* * Ceph superblock operations * @@ -691,6 +694,10 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, if (!fsc->wb_pagevec_pool) goto fail_cap_wq; + spin_lock(&ceph_fsc_lock); + list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list); + spin_unlock(&ceph_fsc_lock); + return fsc; fail_cap_wq: @@ -717,6 +724,10 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) { dout("destroy_fs_client %p\n", fsc); + spin_lock(&ceph_fsc_lock); + list_del(&fsc->metric_wakeup); + spin_unlock(&ceph_fsc_lock); + ceph_mdsc_destroy(fsc); destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->cap_wq); @@ -1282,6 +1293,37 @@ static void __exit exit_ceph(void) destroy_caches(); } +static int param_set_metrics(const char *val, const struct kernel_param *kp) +{ + struct ceph_fs_client *fsc; + int ret; + + ret = param_set_bool(val, kp); + if (ret) { + pr_err("Failed to parse sending metrics switch value '%s'\n", + val); + return ret; + } else if (!disable_send_metrics) { + // wake up all the mds clients + spin_lock(&ceph_fsc_lock); + list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) { + metric_schedule_delayed(&fsc->mdsc->metric); + } + spin_unlock(&ceph_fsc_lock); + } + + return 0; +} + +static const struct kernel_param_ops param_ops_metrics = { + .set = param_set_metrics, + .get = param_get_bool, +}; + +bool disable_send_metrics = false; +module_param_cb(disable_send_metrics, ¶m_ops_metrics, &disable_send_metrics, 0644); +MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)"); + module_init(init_ceph); module_exit(exit_ceph); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 5a6cdd39bc10..2dcb6a90c636 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -101,6 +101,8 @@ struct ceph_mount_options { struct ceph_fs_client { struct super_block *sb; + struct list_head metric_wakeup; + struct ceph_mount_options *mount_options; struct ceph_client *client; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index ebf5ba62b772..455e9b9e2adf 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -130,6 +130,7 @@ struct ceph_dir_layout { #define CEPH_MSG_CLIENT_REQUEST 24 #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 #define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_METRICS 29 #define CEPH_MSG_CLIENT_CAPS 0x310 #define CEPH_MSG_CLIENT_LEASE 0x311 #define CEPH_MSG_CLIENT_SNAP 0x312 -- cgit v1.2.3 From f1f565a26976612121f97464f9245307422d0ce8 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:36:04 -0700 Subject: ceph: delete repeated words in fs/ceph/ Drop duplicated words "down" and "the" in fs/ceph/. [ idryomov: merge into a single patch ] Signed-off-by: Randy Dunlap Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov --- fs/ceph/super.c | 2 +- fs/ceph/super.h | 2 +- include/linux/ceph/ceph_features.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 933f5df5da7d..585aecea5cad 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -839,7 +839,7 @@ static void destroy_caches(void) } /* - * ceph_umount_begin - initiate forced umount. Tear down down the + * ceph_umount_begin - initiate forced umount. Tear down the * mount, skipping steps that may hang while waiting for server(s). */ static void ceph_umount_begin(struct super_block *sb) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 2dcb6a90c636..9001a896ae8c 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -355,7 +355,7 @@ struct ceph_inode_info { unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ /* - * Link to the the auth cap's session's s_cap_dirty list. s_cap_dirty + * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty * is protected by the mdsc->cap_dirty_lock, but each individual item * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty * requires the mdsc->cap_dirty_lock. List presence for an item can diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 39e6f4c57580..fcd84e8d88f4 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -58,7 +58,7 @@ * because 10.2.z (jewel) did not care if its peers advertised this * feature bit. * - * - In the second phase we stop advertising the the bit and call it + * - In the second phase we stop advertising the bit and call it * RETIRED. This can normally be done in the *next* major release * following the one in which we marked the feature DEPRECATED. In * the above example, for 12.0.z (luminous) we can say: -- cgit v1.2.3 From c9dff08485a6c11449307abde1d7bb404fcee481 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Thu, 9 Jul 2020 14:49:30 +0200 Subject: fs: Fix typo in comment The comment for function filemap_check_wb_err accidentally refers to it as filemap_check_wb_error. Signed-off-by: Andreas Gruenbacher --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index f5abba86107d..f9ae45795c1a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2798,7 +2798,7 @@ static inline void filemap_set_wb_err(struct address_space *mapping, int err) } /** - * filemap_check_wb_error - has an error occurred since the mark was sampled? + * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * -- cgit v1.2.3 From 321bd212619a7269308696e4ddc446930ea73fad Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 24 Jun 2020 18:24:33 -0400 Subject: virtio: VIRTIO_F_IOMMU_PLATFORM -> VIRTIO_F_ACCESS_PLATFORM Rename the bit to match latest virtio spec. Add a compat macro to avoid breaking existing userspace. Signed-off-by: Michael S. Tsirkin Reviewed-by: David Hildenbrand --- arch/um/drivers/virtio_uml.c | 2 +- drivers/vdpa/ifcvf/ifcvf_base.h | 2 +- drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++-- drivers/vhost/net.c | 4 ++-- drivers/vhost/vdpa.c | 2 +- drivers/virtio/virtio_balloon.c | 2 +- drivers/virtio/virtio_ring.c | 2 +- include/linux/virtio_config.h | 2 +- include/uapi/linux/virtio_config.h | 10 +++++++--- tools/virtio/linux/virtio_config.h | 2 +- 10 files changed, 18 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 351aee52aca6..a6c4bb6c2c01 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data) } break; case VHOST_USER_SLAVE_IOTLB_MSG: - /* not supported - VIRTIO_F_IOMMU_PLATFORM */ + /* not supported - VIRTIO_F_ACCESS_PLATFORM */ case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */ default: diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index f4554412e607..24af422b5a3e 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -29,7 +29,7 @@ (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VIRTIO_NET_F_STATUS) | \ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \ - (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ + (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \ (1ULL << VIRTIO_NET_F_MRG_RXBUF)) /* Only one queue pair for now. */ diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index c7334cc65bb2..a9bc5e0fb353 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -55,7 +55,7 @@ struct vdpasim_virtqueue { static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM); + (1ULL << VIRTIO_F_ACCESS_PLATFORM); /* State of each vdpasim device */ struct vdpasim { @@ -450,7 +450,7 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) struct vdpasim *vdpasim = vdpa_to_sim(vdpa); /* DMA mapping must be done by driver */ - if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) + if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) return -EINVAL; vdpasim->features = features & vdpasim_features; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e992decfec53..8e0921d3805d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -73,7 +73,7 @@ enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM) + (1ULL << VIRTIO_F_ACCESS_PLATFORM) }; enum { @@ -1653,7 +1653,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) !vhost_log_access_ok(&n->dev)) goto out_unlock; - if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) { + if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { if (vhost_init_device_iotlb(&n->dev, true)) goto out_unlock; } diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index a54b60d6623f..18869a35d408 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -31,7 +31,7 @@ enum { (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM) | + (1ULL << VIRTIO_F_ACCESS_PLATFORM) | (1ULL << VIRTIO_F_RING_PACKED) | (1ULL << VIRTIO_F_ORDER_PLATFORM) | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 8be02f333b7a..54fd989f9353 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -1129,7 +1129,7 @@ static int virtballoon_validate(struct virtio_device *vdev) else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); - __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); + __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM); return 0; } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 58b96baa8d48..a1a5c2a91426 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2225,7 +2225,7 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_VERSION_1: break; - case VIRTIO_F_IOMMU_PLATFORM: + case VIRTIO_F_ACCESS_PLATFORM: break; case VIRTIO_F_RING_PACKED: break; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bb4cc4910750..f2cc2a0df174 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -171,7 +171,7 @@ static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index ff8e7dc9d4dd..b5eda06f0d57 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -67,13 +67,17 @@ #define VIRTIO_F_VERSION_1 32 /* - * If clear - device has the IOMMU bypass quirk feature. - * If set - use platform tools to detect the IOMMU. + * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + * If set - use platform DMA tools to access the memory. * * Note the reverse polarity (compared to most other features), * this is for compatibility with legacy systems. */ -#define VIRTIO_F_IOMMU_PLATFORM 33 +#define VIRTIO_F_ACCESS_PLATFORM 33 +#ifndef __KERNEL__ +/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */ +#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM +#endif /* __KERNEL__ */ /* This feature indicates support for the packed virtqueue layout. */ #define VIRTIO_F_RING_PACKED 34 diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h index dbf14c1e2188..f99ae42668e0 100644 --- a/tools/virtio/linux/virtio_config.h +++ b/tools/virtio/linux/virtio_config.h @@ -51,7 +51,7 @@ static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline bool virtio_is_little_endian(struct virtio_device *vdev) -- cgit v1.2.3 From 24b6842ade6925199e182988259761504aacfbc0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 24 Jun 2020 19:17:04 -0400 Subject: virtio: virtio_has_iommu_quirk -> virtio_has_dma_quirk Now that the corresponding feature bit has been renamed, rename the quirk too - it's about special ways to do DMA, not necessarily about the IOMMU. Signed-off-by: Michael S. Tsirkin --- drivers/gpu/drm/virtio/virtgpu_object.c | 2 +- drivers/gpu/drm/virtio/virtgpu_vq.c | 4 ++-- drivers/virtio/virtio_ring.c | 2 +- include/linux/virtio_config.h | 4 ++-- tools/virtio/linux/virtio_config.h | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 6ccbd01cd888..e8799ab0c753 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry **ents, unsigned int *nents) { - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct scatterlist *sg; int si, ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 9e663a5d9952..53af60d484a4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) @@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index a1a5c2a91426..34253cb69cb8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq, static bool vring_use_dma_api(struct virtio_device *vdev) { - if (!virtio_has_iommu_quirk(vdev)) + if (!virtio_has_dma_quirk(vdev)) return true; /* Otherwise, we are left to guess. */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index f2cc2a0df174..3b4eae5ac5e3 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -162,10 +162,10 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, } /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h index f99ae42668e0..f2640e505c4e 100644 --- a/tools/virtio/linux/virtio_config.h +++ b/tools/virtio/linux/virtio_config.h @@ -42,10 +42,10 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev, (__virtio_test_bit((dev), feature)) /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most -- cgit v1.2.3 From 6cfde88418fe95240e32d2955b51988360ee0942 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 1 Aug 2020 10:53:42 +0200 Subject: clk: drop unused function __clk_get_flags The function __clk_get_flags has not been used since the April 2019 commit a348f05361c9 ("ARM: omap2+: hwmod: drop CLK_IS_BASIC flag usage"). Other uses were removed in June 2015, eg by commit 98d8a60eccee ("clk: Convert __clk_get_flags() to clk_hw_get_flags()"), which shows how clk_hw_get_flags can easily be used instead. Signed-off-by: Julia Lawall Link: https://lore.kernel.org/r/1596272022-14173-1-git-send-email-Julia.Lawall@inria.fr Signed-off-by: Stephen Boyd --- drivers/clk/clk.c | 6 ------ include/linux/clk-provider.h | 1 - 2 files changed, 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 3f588ed06ce3..d71456aad11d 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -500,12 +500,6 @@ static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) return core->accuracy; } -unsigned long __clk_get_flags(struct clk *clk) -{ - return !clk ? 0 : clk->core->flags; -} -EXPORT_SYMBOL_GPL(__clk_get_flags); - unsigned long clk_hw_get_flags(const struct clk_hw *hw) { return hw->core->flags; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bd1ee9039558..93a78a5256d1 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1096,7 +1096,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw); int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); -unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); #define clk_hw_can_set_rate_parent(hw) \ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) -- cgit v1.2.3 From d88ca7e1a27eb2df056bbf37ddef62e1c73d37ea Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Thu, 30 Jul 2020 19:47:14 +0900 Subject: fbmem: pull fbcon_update_vcs() out of fb_set_var() syzbot is reporting OOB read bug in vc_do_resize() [1] caused by memcpy() based on outdated old_{rows,row_size} values, for resize_screen() can recurse into vc_do_resize() which changes vc->vc_{cols,rows} that outdates old_{rows,row_size} values which were saved before calling resize_screen(). Daniel Vetter explained that resize_screen() should not recurse into fbcon_update_vcs() path due to FBINFO_MISC_USEREVENT being still set when calling resize_screen(). Instead of masking FBINFO_MISC_USEREVENT before calling fbcon_update_vcs(), we can remove FBINFO_MISC_USEREVENT by calling fbcon_update_vcs() only if fb_set_var() returned 0. This change assumes that it is harmless to call fbcon_update_vcs() when fb_set_var() returned 0 without reaching fb_notifier_call_chain(). [1] https://syzkaller.appspot.com/bug?id=c70c88cfd16dcf6e1d3c7f0ab8648b3144b5b25e Reported-and-tested-by: syzbot Suggested-by: Daniel Vetter Signed-off-by: Tetsuo Handa Reported-by: kernel test robot for missing #include Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/075b7e37-3278-cd7d-31ab-c5073cfa8e92@i-love.sakura.ne.jp --- drivers/video/fbdev/core/fbmem.c | 8 ++------ drivers/video/fbdev/core/fbsysfs.c | 4 ++-- drivers/video/fbdev/ps3fb.c | 5 +++-- include/linux/fb.h | 2 -- 4 files changed, 7 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 30e73ec4ad5c..da7c88ffaa6a 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -957,7 +957,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var, int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) { - int flags = info->flags; int ret = 0; u32 activate; struct fb_var_screeninfo old_var; @@ -1052,9 +1051,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) event.data = &mode; fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event); - if (flags & FBINFO_MISC_USEREVENT) - fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL); - return 0; } EXPORT_SYMBOL(fb_set_var); @@ -1105,9 +1101,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; console_lock(); lock_fb_info(info); - info->flags |= FBINFO_MISC_USEREVENT; ret = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!ret) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); console_unlock(); if (!ret && copy_to_user(argp, &var, sizeof(var))) diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index d54c88f88991..65dae05fff8e 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var) var->activate |= FB_ACTIVATE_FORCE; console_lock(); - fb_info->flags |= FBINFO_MISC_USEREVENT; err = fb_set_var(fb_info, var); - fb_info->flags &= ~FBINFO_MISC_USEREVENT; + if (!err) + fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL); console_unlock(); if (err) return err; diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index 9df78fb77267..203c254f8f6c 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd, var = info->var; fb_videomode_to_var(&var, vmode); console_lock(); - info->flags |= FBINFO_MISC_USEREVENT; /* Force, in case only special bits changed */ var.activate |= FB_ACTIVATE_FORCE; par->new_mode_id = val; retval = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!retval) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); console_unlock(); } break; diff --git a/include/linux/fb.h b/include/linux/fb.h index 2b530e6d86e4..850f79e9a7cb 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -400,8 +400,6 @@ struct fb_tile_ops { #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ -#define FBINFO_MISC_USEREVENT 0x10000 /* event request - from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ /* A driver may set this flag to indicate that it does want a set_par to be -- cgit v1.2.3 From a0102bda5bc0991c5c8c7c07770b236894a810fd Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 30 Jul 2020 11:03:55 -0400 Subject: ceph: move sb->wb_pagevec_pool to be a global mempool When doing some testing recently, I hit some page allocation failures on mount, when creating the wb_pagevec_pool for the mount. That requires 128k (32 contiguous pages), and after thrashing the memory during an xfstests run, sometimes that would fail. 128k for each mount seems like a lot to hold in reserve for a rainy day, so let's change this to a global mempool that gets allocated when the module is plugged in. Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov --- fs/ceph/addr.c | 23 +++++++++++------------ fs/ceph/super.c | 22 ++++++++-------------- fs/ceph/super.h | 2 -- include/linux/ceph/libceph.h | 1 + 4 files changed, 20 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 01ad09733ac7..6ea761c84494 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req) osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->pages_from_pool) - mempool_free(osd_data->pages, - ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); + mempool_free(osd_data->pages, ceph_wb_pagevec_pool); else kfree(osd_data->pages); ceph_osdc_put_request(req); @@ -955,10 +954,10 @@ retry: int num_ops = 0, op_idx; unsigned i, pvec_pages, max_pages, locked_pages = 0; struct page **pages = NULL, **data_pages; - mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; pgoff_t strip_unit_end = 0; u64 offset = 0, len = 0; + bool from_pool = false; max_pages = wsize >> PAGE_SHIFT; @@ -1057,16 +1056,16 @@ get_more_pages: sizeof(*pages), GFP_NOFS); if (!pages) { - pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); + from_pool = true; + pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); BUG_ON(!pages); } len = 0; } else if (page->index != (offset + len) >> PAGE_SHIFT) { - if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : - CEPH_OSD_MAX_OPS)) { + if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : + CEPH_OSD_MAX_OPS)) { redirty_page_for_writepage(wbc, page); unlock_page(page); break; @@ -1161,7 +1160,7 @@ new_request: offset, len); osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 0, - !!pool, false); + from_pool, false); osd_req_op_extent_update(req, op_idx, len); len = 0; @@ -1188,12 +1187,12 @@ new_request: dout("writepages got pages at %llu~%llu\n", offset, len); osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, - 0, !!pool, false); + 0, from_pool, false); osd_req_op_extent_update(req, op_idx, len); BUG_ON(op_idx + 1 != req->r_num_ops); - pool = NULL; + from_pool = false; if (i < locked_pages) { BUG_ON(num_ops <= req->r_num_ops); num_ops -= req->r_num_ops; @@ -1204,8 +1203,8 @@ new_request: pages = kmalloc_array(locked_pages, sizeof(*pages), GFP_NOFS); if (!pages) { - pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); + from_pool = true; + pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); BUG_ON(!pages); } memcpy(pages, data_pages + i, diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 585aecea5cad..7ec0e6d03d10 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -637,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_options *opt) { struct ceph_fs_client *fsc; - int page_count; - size_t size; int err; fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); @@ -686,22 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, if (!fsc->cap_wq) goto fail_inode_wq; - /* set up mempools */ - err = -ENOMEM; - page_count = fsc->mount_options->wsize >> PAGE_SHIFT; - size = sizeof (struct page *) * (page_count ? page_count : 1); - fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); - if (!fsc->wb_pagevec_pool) - goto fail_cap_wq; - spin_lock(&ceph_fsc_lock); list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list); spin_unlock(&ceph_fsc_lock); return fsc; -fail_cap_wq: - destroy_workqueue(fsc->cap_wq); fail_inode_wq: destroy_workqueue(fsc->inode_wq); fail_client: @@ -732,8 +720,6 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->cap_wq); - mempool_destroy(fsc->wb_pagevec_pool); - destroy_mount_options(fsc->mount_options); ceph_destroy_client(fsc->client); @@ -752,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; struct kmem_cache *ceph_dir_file_cachep; struct kmem_cache *ceph_mds_request_cachep; +mempool_t *ceph_wb_pagevec_pool; static void ceph_inode_init_once(void *foo) { @@ -796,6 +783,10 @@ static int __init init_caches(void) if (!ceph_mds_request_cachep) goto bad_mds_req; + ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT); + if (!ceph_wb_pagevec_pool) + goto bad_pagevec_pool; + error = ceph_fscache_register(); if (error) goto bad_fscache; @@ -804,6 +795,8 @@ static int __init init_caches(void) bad_fscache: kmem_cache_destroy(ceph_mds_request_cachep); +bad_pagevec_pool: + mempool_destroy(ceph_wb_pagevec_pool); bad_mds_req: kmem_cache_destroy(ceph_dir_file_cachep); bad_dir_file: @@ -834,6 +827,7 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_file_cachep); kmem_cache_destroy(ceph_dir_file_cachep); kmem_cache_destroy(ceph_mds_request_cachep); + mempool_destroy(ceph_wb_pagevec_pool); ceph_fscache_unregister(); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 9001a896ae8c..4c3c964b1c54 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -118,8 +118,6 @@ struct ceph_fs_client { struct ceph_mds_client *mdsc; - /* writeback */ - mempool_t *wb_pagevec_pool; atomic_long_t writeback_count; struct workqueue_struct *inode_wq; diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index e5ed1c541e7f..c8645f0b797d 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_dir_file_cachep; extern struct kmem_cache *ceph_mds_request_cachep; +extern mempool_t *ceph_wb_pagevec_pool; /* ceph_common.c */ extern bool libceph_compatible(void *data); -- cgit v1.2.3 From 4476770881d7ac647e3bcae0943f37e00b9c3f3c Mon Sep 17 00:00:00 2001 From: Siddharth Gupta Date: Wed, 29 Jul 2020 10:40:00 -0700 Subject: remoteproc: Add remoteproc character device interface Add the character device interface into remoteproc framework. This interface can be used in order to boot/shutdown remote subsystems and provides a basic ioctl based interface to implement supplementary functionality. An ioctl call is implemented to enable the shutdown on release feature which will allow remote processors to be shutdown when the controlling userspace application crashes or hangs. Reviewed-by: Bjorn Andersson Reviewed-by: Mathieu Poirier Signed-off-by: Rishabh Bhatnagar Signed-off-by: Siddharth Gupta Link: https://lore.kernel.org/r/1596044401-22083-2-git-send-email-sidgup@codeaurora.org [bjorn: s/int32_t/s32/ per checkpatch] Signed-off-by: Bjorn Andersson --- Documentation/userspace-api/ioctl/ioctl-number.rst | 1 + drivers/remoteproc/Kconfig | 9 ++ drivers/remoteproc/Makefile | 1 + drivers/remoteproc/remoteproc_cdev.c | 124 +++++++++++++++++++++ drivers/remoteproc/remoteproc_internal.h | 28 +++++ include/linux/remoteproc.h | 5 + include/uapi/linux/remoteproc_cdev.h | 37 ++++++ 7 files changed, 205 insertions(+) create mode 100644 drivers/remoteproc/remoteproc_cdev.c create mode 100644 include/uapi/linux/remoteproc_cdev.h (limited to 'include/linux') diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 59472cd6a11d..2a198838fca9 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -339,6 +339,7 @@ Code Seq# Include File Comments 0xB4 00-0F linux/gpio.h 0xB5 00-0F uapi/linux/rpmsg.h 0xB6 all linux/fpga-dfl.h +0xB7 all uapi/linux/remoteproc_cdev.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h 0xCA 10-2F uapi/misc/ocxl.h diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 48315dc4a30c..c6659dfea7c7 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -14,6 +14,15 @@ config REMOTEPROC if REMOTEPROC +config REMOTEPROC_CDEV + bool "Remoteproc character device interface" + help + Say y here to have a character device interface for the remoteproc + framework. Userspace can boot/shutdown remote processors through + this interface. + + It's safe to say N if you don't want to use this interface. + config IMX_REMOTEPROC tristate "IMX6/7 remoteproc support" depends on ARCH_MXC diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 4d4307dc8fa9..3dfa28e6c701 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -10,6 +10,7 @@ remoteproc-y += remoteproc_debugfs.o remoteproc-y += remoteproc_sysfs.o remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_elf_loader.o +obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c new file mode 100644 index 000000000000..b19ea3057bde --- /dev/null +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Character device interface driver for Remoteproc framework. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "remoteproc_internal.h" + +#define NUM_RPROC_DEVICES 64 +static dev_t rproc_major; + +static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + int ret = 0; + char cmd[10]; + + if (!len || len > sizeof(cmd)) + return -EINVAL; + + ret = copy_from_user(cmd, buf, len); + if (ret) + return -EFAULT; + + if (!strncmp(cmd, "start", len)) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + } else if (!strncmp(cmd, "stop", len)) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognized option\n"); + ret = -EINVAL; + } + + return ret ? ret : len; +} + +static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + void __user *argp = (void __user *)arg; + s32 param; + + switch (ioctl) { + case RPROC_SET_SHUTDOWN_ON_RELEASE: + if (copy_from_user(¶m, argp, sizeof(s32))) + return -EFAULT; + + rproc->cdev_put_on_release = !!param; + break; + case RPROC_GET_SHUTDOWN_ON_RELEASE: + param = (s32)rproc->cdev_put_on_release; + if (copy_to_user(argp, ¶m, sizeof(s32))) + return -EFAULT; + + break; + default: + dev_err(&rproc->dev, "Unsupported ioctl\n"); + return -EINVAL; + } + + return 0; +} + +static int rproc_cdev_release(struct inode *inode, struct file *filp) +{ + struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev); + + if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING) + rproc_shutdown(rproc); + + return 0; +} + +static const struct file_operations rproc_fops = { + .write = rproc_cdev_write, + .unlocked_ioctl = rproc_device_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = rproc_cdev_release, +}; + +int rproc_char_device_add(struct rproc *rproc) +{ + int ret; + + cdev_init(&rproc->cdev, &rproc_fops); + rproc->cdev.owner = THIS_MODULE; + + rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index); + cdev_set_parent(&rproc->cdev, &rproc->dev.kobj); + ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1); + if (ret < 0) + dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name); + + return ret; +} + +void rproc_char_device_remove(struct rproc *rproc) +{ + __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc"); +} + +void __init rproc_init_cdev(void) +{ + int ret; + + ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc"); + if (ret < 0) + pr_err("Failed to alloc rproc_cdev region, err %d\n", ret); +} diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index cd4176b033a3..c34002888d2c 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -53,6 +53,34 @@ void rproc_exit_sysfs(void); void rproc_coredump_cleanup(struct rproc *rproc); void rproc_coredump(struct rproc *rproc); +#ifdef CONFIG_REMOTEPROC_CDEV +void rproc_init_cdev(void); +void rproc_exit_cdev(void); +int rproc_char_device_add(struct rproc *rproc); +void rproc_char_device_remove(struct rproc *rproc); +#else +static inline void rproc_init_cdev(void) +{ +} + +static inline void rproc_exit_cdev(void) +{ +} + +/* + * The character device interface is an optional feature, if it is not enabled + * the function should not return an error. + */ +static inline int rproc_char_device_add(struct rproc *rproc) +{ + return 0; +} + +static inline void rproc_char_device_remove(struct rproc *rproc) +{ +} +#endif + void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 0e8d2ff575b4..2fa68bf5aa4f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -509,6 +510,8 @@ struct rproc_dump_segment { * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc + * @char_dev: character device of the rproc + * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { struct list_head node; @@ -546,6 +549,8 @@ struct rproc { int nb_vdev; u8 elf_class; u16 elf_machine; + struct cdev cdev; + bool cdev_put_on_release; }; /** diff --git a/include/uapi/linux/remoteproc_cdev.h b/include/uapi/linux/remoteproc_cdev.h new file mode 100644 index 000000000000..c43768e4b0dc --- /dev/null +++ b/include/uapi/linux/remoteproc_cdev.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * IOCTLs for Remoteproc's character device interface. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_REMOTEPROC_CDEV_H_ +#define _UAPI_REMOTEPROC_CDEV_H_ + +#include +#include + +#define RPROC_MAGIC 0xB7 + +/* + * The RPROC_SET_SHUTDOWN_ON_RELEASE ioctl allows to enable/disable the shutdown of a remote + * processor automatically when the controlling userpsace closes the char device interface. + * + * input parameter: integer + * 0 : disable automatic shutdown + * other : enable automatic shutdown + */ +#define RPROC_SET_SHUTDOWN_ON_RELEASE _IOW(RPROC_MAGIC, 1, __s32) + +/* + * The RPROC_GET_SHUTDOWN_ON_RELEASE ioctl gets information about whether the automatic shutdown of + * a remote processor is enabled or disabled when the controlling userspace closes the char device + * interface. + * + * output parameter: integer + * 0 : automatic shutdown disable + * other : automatic shutdown enable + */ +#define RPROC_GET_SHUTDOWN_ON_RELEASE _IOR(RPROC_MAGIC, 2, __s32) + +#endif -- cgit v1.2.3 From 7de62bc09fe6d100ebd6c931c3f9a6fa7e6ed10f Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 15 Jul 2020 13:17:52 -0400 Subject: SUNRPC dont update timeout value on connection reset Current behaviour: every time a v3 operation is re-sent to the server we update (double) the timeout. There is no distinction between whether or not the previous timer had expired before the re-sent happened. Here's the scenario: 1. Client sends a v3 operation 2. Server RST-s the connection (prior to the timeout) (eg., connection is immediately reset) 3. Client re-sends a v3 operation but the timeout is now 120sec. As a result, an application sees 2mins pause before a retry in case server again does not reply. Instead, this patch proposes to keep track off when the minor timeout should happen and if it didn't, then don't update the new timeout. Value is updated based on the previous value to make timeouts predictable. Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- include/linux/sunrpc/xprt.h | 1 + net/sunrpc/xprt.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index e64bd8222f55..a603d48d2b2c 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -101,6 +101,7 @@ struct rpc_rqst { * used in the softirq. */ unsigned long rq_majortimeo; /* major timeout alarm */ + unsigned long rq_minortimeo; /* minor timeout alarm */ unsigned long rq_timeout; /* Current timeout value */ ktime_t rq_rtt; /* round-trip time */ unsigned int rq_retries; /* # of retries */ diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index d5cc5db9dbf3..6ba9d5842629 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -607,6 +607,11 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req) req->rq_majortimeo += xprt_calc_majortimeo(req); } +static void xprt_reset_minortimeo(struct rpc_rqst *req) +{ + req->rq_minortimeo += req->rq_timeout; +} + static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) { unsigned long time_init; @@ -618,6 +623,7 @@ static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req) time_init = xprt_abs_ktime_to_jiffies(task->tk_start); req->rq_timeout = task->tk_client->cl_timeout->to_initval; req->rq_majortimeo = time_init + xprt_calc_majortimeo(req); + req->rq_minortimeo = time_init + req->rq_timeout; } /** @@ -631,6 +637,8 @@ int xprt_adjust_timeout(struct rpc_rqst *req) const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; int status = 0; + if (time_before(jiffies, req->rq_minortimeo)) + return status; if (time_before(jiffies, req->rq_majortimeo)) { if (to->to_exponential) req->rq_timeout <<= 1; @@ -649,6 +657,7 @@ int xprt_adjust_timeout(struct rpc_rqst *req) spin_unlock(&xprt->transport_lock); status = -ETIMEDOUT; } + xprt_reset_minortimeo(req); if (req->rq_timeout == 0) { printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); -- cgit v1.2.3 From 262e6ae7081df304fc625cf368d5c2cbba2bb991 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 23:33:33 +0200 Subject: modules: inherit TAINT_PROPRIETARY_MODULE If a TAINT_PROPRIETARY_MODULE exports symbol, inherit the taint flag for all modules importing these symbols, and don't allow loading symbols from TAINT_PROPRIETARY_MODULE modules if the module previously imported gplonly symbols. Add a anti-circumvention devices so people don't accidentally get themselves into trouble this way. Comment from Greg: "Ah, the proven-to-be-illegal "GPL Condom" defense :)" [jeyu: pr_info -> pr_err and pr_warn as per discussion] Link: http://lore.kernel.org/r/20200730162957.GA22469@lst.de Acked-by: Daniel Vetter Reviewed-by: Greg Kroah-Hartman Signed-off-by: Christoph Hellwig Signed-off-by: Jessica Yu --- include/linux/module.h | 1 + kernel/module.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 30b0f5fcdb3c..e30ed5fa33a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -389,6 +389,7 @@ struct module { unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; + bool using_gplonly_symbols; #ifdef CONFIG_UNUSED_SYMBOLS /* unused exported symbols. */ diff --git a/kernel/module.c b/kernel/module.c index 656f5ff27088..09bf5a652a47 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1431,6 +1431,24 @@ static int verify_namespace_is_imported(const struct load_info *info, return 0; } +static bool inherit_taint(struct module *mod, struct module *owner) +{ + if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) + return true; + + if (mod->using_gplonly_symbols) { + pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n", + mod->name, owner->name); + return false; + } + + if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { + pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n", + mod->name, owner->name); + set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); + } + return true; +} /* Resolve a symbol for this module. I.e. if we find one, record usage. */ static const struct kernel_symbol *resolve_symbol(struct module *mod, @@ -1456,6 +1474,14 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, if (!sym) goto unlock; + if (license == GPL_ONLY) + mod->using_gplonly_symbols = true; + + if (!inherit_taint(mod, owner)) { + sym = NULL; + goto getname; + } + if (!check_version(info, name, mod, crc)) { sym = ERR_PTR(-EINVAL); goto getname; -- cgit v1.2.3 From 75820314de26b00aaea0d0e79269c0d17914c5c4 Mon Sep 17 00:00:00 2001 From: Codrin Ciubotariu Date: Tue, 4 Aug 2020 12:59:24 +0300 Subject: i2c: core: add generic I2C GPIO recovery Multiple I2C bus drivers use similar bindings to obtain information needed for I2C recovery. For example, for platforms using device-tree, the properties look something like this: &i2c { ... pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c_default>; pinctrl-1 = <&pinctrl_i2c_gpio>; sda-gpios = <&pio 0 GPIO_ACTIVE_HIGH>; scl-gpios = <&pio 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; ... } For this reason, we can add this common initialization in the core. This way, other I2C bus drivers will be able to support GPIO recovery just by providing a pointer to platform's pinctrl and calling i2c_recover_bus() when SDA is stuck low. Signed-off-by: Codrin Ciubotariu [wsa: inverted one logic for better readability, minor update to kdoc] Signed-off-by: Wolfram Sang --- drivers/i2c/i2c-core-base.c | 126 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/i2c.h | 11 ++++ 2 files changed, 137 insertions(+) (limited to 'include/linux') diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 69217d2193da..ece25560eae4 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -181,6 +182,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * If we can set SDA, we will always create a STOP to ensure additional @@ -236,6 +239,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) if (bri->unprepare_recovery) bri->unprepare_recovery(adap); + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } @@ -251,6 +256,125 @@ int i2c_recover_bus(struct i2c_adapter *adap) } EXPORT_SYMBOL_GPL(i2c_recover_bus); +static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + struct device *dev = &adap->dev; + struct pinctrl *p = bri->pinctrl; + + /* + * we can't change states without pinctrl, so remove the states if + * populated + */ + if (!p) { + bri->pins_default = NULL; + bri->pins_gpio = NULL; + return; + } + + if (!bri->pins_default) { + bri->pins_default = pinctrl_lookup_state(p, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(bri->pins_default)) { + dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n"); + bri->pins_default = NULL; + } + } + if (!bri->pins_gpio) { + bri->pins_gpio = pinctrl_lookup_state(p, "gpio"); + if (IS_ERR(bri->pins_gpio)) + bri->pins_gpio = pinctrl_lookup_state(p, "recovery"); + + if (IS_ERR(bri->pins_gpio)) { + dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n"); + bri->pins_gpio = NULL; + } + } + + /* for pinctrl state changes, we need all the information */ + if (bri->pins_default && bri->pins_gpio) { + dev_info(dev, "using pinctrl states for GPIO recovery"); + } else { + bri->pinctrl = NULL; + bri->pins_default = NULL; + bri->pins_gpio = NULL; + } +} + +static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + struct device *dev = &adap->dev; + struct gpio_desc *gpiod; + int ret = 0; + + /* + * don't touch the recovery information if the driver is not using + * generic SCL recovery + */ + if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery) + return 0; + + /* + * pins might be taken as GPIO, so we should inform pinctrl about + * this and move the state to GPIO + */ + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_gpio); + + /* + * if there is incomplete or no recovery information, see if generic + * GPIO recovery is available + */ + if (!bri->scl_gpiod) { + gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); + if (PTR_ERR(gpiod) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto cleanup_pinctrl_state; + } + if (!IS_ERR(gpiod)) { + bri->scl_gpiod = gpiod; + bri->recover_bus = i2c_generic_scl_recovery; + dev_info(dev, "using generic GPIOs for recovery\n"); + } + } + + /* SDA GPIOD line is optional, so we care about DEFER only */ + if (!bri->sda_gpiod) { + /* + * We have SCL. Pull SCL low and wait a bit so that SDA glitches + * have no effect. + */ + gpiod_direction_output(bri->scl_gpiod, 0); + udelay(10); + gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN); + + /* Wait a bit in case of a SDA glitch, and then release SCL. */ + udelay(10); + gpiod_direction_output(bri->scl_gpiod, 1); + + if (PTR_ERR(gpiod) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto cleanup_pinctrl_state; + } + if (!IS_ERR(gpiod)) + bri->sda_gpiod = gpiod; + } + +cleanup_pinctrl_state: + /* change the state of the pins back to their default state */ + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_default); + + return ret; +} + +static int i2c_gpio_init_recovery(struct i2c_adapter *adap) +{ + i2c_gpio_init_pinctrl_recovery(adap); + return i2c_gpio_init_generic_recovery(adap); +} + static void i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; @@ -259,6 +383,8 @@ static void i2c_init_recovery(struct i2c_adapter *adap) if (!bri) return; + i2c_gpio_init_recovery(adap); + if (!bri->recover_bus) { err_str = "no recover_bus() found"; goto err; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 8ea9c3f86dba..d387d3786429 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -606,6 +606,14 @@ struct i2c_timings { * may configure padmux here for SDA/SCL line or something else they want. * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. + * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins. + * Optional. + * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned + * to the I2C bus. Optional. Populated internally for GPIO recovery, if + * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid. + * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as + * GPIOs. Optional. Populated internally for GPIO recovery, if this state + * is called "gpio" or "recovery" and pinctrl is valid. */ struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *adap); @@ -622,6 +630,9 @@ struct i2c_bus_recovery_info { /* gpio recovery */ struct gpio_desc *scl_gpiod; struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; }; int i2c_recover_bus(struct i2c_adapter *adap); -- cgit v1.2.3 From 5487196878bc926a1ee15069c13aa48b9a894fab Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 06:46:04 -0400 Subject: virtio_ring: sparse warning fixup virtio_store_mb was built with split ring in mind so it accepts __virtio16 arguments. Packed ring uses __le16 values, so sparse complains. It's just a store with some barriers so let's convert it to a macro, we don't loose too much type safety by doing that. Signed-off-by: Michael S. Tsirkin Acked-by: Cornelia Huck --- include/linux/virtio_ring.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 3dc70adfe5f5..b485b13fa50b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers) dma_wmb(); } -static inline void virtio_store_mb(bool weak_barriers, - __virtio16 *p, __virtio16 v) -{ - if (weak_barriers) { - virt_store_mb(*p, v); - } else { - WRITE_ONCE(*p, v); - mb(); - } -} +#define virtio_store_mb(weak_barriers, p, v) \ +do { \ + if (weak_barriers) { \ + virt_store_mb(*p, v); \ + } else { \ + WRITE_ONCE(*p, v); \ + mb(); \ + } \ +} while (0) \ struct virtio_device; struct virtqueue; -- cgit v1.2.3 From a4235ec06acf05c58081700cda02dcd480d9e9cb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 03:20:21 -0400 Subject: virtio: allow __virtioXX, __leXX in config space Currently all config space fields are of the type __uXX. This confuses people and some drivers (notably vdpa) access them using CPU endian-ness - which only works well for legacy or LE platforms. Update virtio_cread/virtio_cwrite macros to allow __virtioXX and __leXX field types. Follow-up patches will convert config space to use these types. Signed-off-by: Michael S. Tsirkin Acked-by: Cornelia Huck --- include/linux/virtio_config.h | 50 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 3b4eae5ac5e3..64da491936f7 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -6,6 +6,7 @@ #include #include #include +#include #include struct irq_affinity; @@ -287,12 +288,57 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } +/* + * Only the checker differentiates between __virtioXX and __uXX types. But we + * try to share as much code as we can with the regular GCC build. + */ +#if !defined(CONFIG_CC_IS_GCC) && !defined(__CHECKER__) + +/* Not a checker - we can keep things simple */ +#define __virtio_native_typeof(x) typeof(x) + +#else + +/* + * We build this out of a couple of helper macros in a vain attempt to + * help you keep your lunch down while reading it. + */ +#define __virtio_pick_value(x, type, then, otherwise) \ + __builtin_choose_expr(__same_type(x, type), then, otherwise) + +#define __virtio_pick_type(x, type, then, otherwise) \ + __virtio_pick_value(x, type, (then)0, otherwise) + +#define __virtio_pick_endian(x, x16, x32, x64, otherwise) \ + __virtio_pick_type(x, x16, __u16, \ + __virtio_pick_type(x, x32, __u32, \ + __virtio_pick_type(x, x64, __u64, \ + otherwise))) + +#define __virtio_native_typeof(x) typeof( \ + __virtio_pick_type(x, __u8, __u8, \ + __virtio_pick_endian(x, __virtio16, __virtio32, __virtio64, \ + __virtio_pick_endian(x, __le16, __le32, __le64, \ + __virtio_pick_endian(x, __u16, __u32, __u64, \ + /* No other type allowed */ \ + (void)0))))) + +#endif + +#define __virtio_native_type(structname, member) \ + __virtio_native_typeof(((structname*)0)->member) + +#define __virtio_typecheck(structname, member, val) \ + /* Must match the member's type, and be integer */ \ + typecheck(__virtio_native_type(structname, member), (val)) + + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ might_sleep(); \ /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ + if (!__virtio_typecheck(structname, member, *(ptr))) \ (*ptr) = 1; \ \ switch (sizeof(*ptr)) { \ @@ -322,7 +368,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) do { \ might_sleep(); \ /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ + if (!__virtio_typecheck(structname, member, *(ptr))) \ BUG_ON((*ptr) == 1); \ \ switch (sizeof(*ptr)) { \ -- cgit v1.2.3 From 4a04cfb0eb5e00432f9ff978f2b81bd1736e85db Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:55:52 -0400 Subject: virtio_config: disallow native type fields Transitional devices should all use __virtioXX types (and __leXX for fields not present in legacy devices). Modern ones should use __leXX. _uXX type would be a bug. Let's prevent that. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 64da491936f7..c68f58f3bf34 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -319,9 +319,8 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __virtio_pick_type(x, __u8, __u8, \ __virtio_pick_endian(x, __virtio16, __virtio32, __virtio64, \ __virtio_pick_endian(x, __le16, __le32, __le64, \ - __virtio_pick_endian(x, __u16, __u32, __u64, \ - /* No other type allowed */ \ - (void)0))))) + /* No other type allowed */ \ + (void)0)))) #endif -- cgit v1.2.3 From 452639a64ad880792652b6d20cc5c8dd4ecf27d9 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 27 Jul 2020 10:51:55 -0400 Subject: vdpa: make sure set_features is invoked for legacy Some legacy guests just assume features are 0 after reset. We detect that config space is accessed before features are set and set features to 0 automatically. Note: some legacy guests might not even access config space, if this is reported in the field we might need to catch a kick to handle these. Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/vdpa.c | 1 + include/linux/vdpa.h | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) (limited to 'include/linux') diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index de211ef3738c..7105265e4793 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -96,6 +96,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->dev.release = vdpa_release_dev; vdev->index = err; vdev->config = config; + vdev->features_valid = false; err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 239db794357c..29b8296f1414 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -33,12 +33,14 @@ struct vdpa_notification_area { * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. * @index: device index + * @features_valid: were features initialized? for legacy guests */ struct vdpa_device { struct device dev; struct device *dma_dev; const struct vdpa_config_ops *config; unsigned int index; + bool features_valid; }; /** @@ -266,4 +268,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) { return vdev->dma_dev; } + +static inline void vdpa_reset(struct vdpa_device *vdev) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = false; + ops->set_status(vdev, 0); +} + +static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = true; + return ops->set_features(vdev, features); +} + + +static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, + void *buf, unsigned int len) +{ + const struct vdpa_config_ops *ops = vdev->config; + + /* + * Config accesses aren't supposed to trigger before features are set. + * If it does happen we assume a legacy guest. + */ + if (!vdev->features_valid) + vdpa_set_features(vdev, 0); + ops->get_config(vdev, offset, buf, len); +} + #endif /* _LINUX_VDPA_H */ -- cgit v1.2.3 From cacaf775c699e9e8473491197587535f1c10ac8f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 30 Jul 2020 16:12:40 -0400 Subject: virtio_config: cread/write cleanup Use vars of the correct type instead of casting. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index c68f58f3bf34..5c3b02245ecd 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -444,53 +444,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev, static inline u16 virtio_cread16(struct virtio_device *vdev, unsigned int offset) { - u16 ret; + __virtio16 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio16_to_cpu(vdev, (__force __virtio16)ret); + return virtio16_to_cpu(vdev, ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + __virtio16 v; + might_sleep(); - val = (__force u16)cpu_to_virtio16(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio16(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u32 virtio_cread32(struct virtio_device *vdev, unsigned int offset) { - u32 ret; + __virtio32 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio32_to_cpu(vdev, (__force __virtio32)ret); + return virtio32_to_cpu(vdev, ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + __virtio32 v; + might_sleep(); - val = (__force u32)cpu_to_virtio32(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio32(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u64 virtio_cread64(struct virtio_device *vdev, unsigned int offset) { - u64 ret; + __virtio64 ret; + __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); - return virtio64_to_cpu(vdev, (__force __virtio64)ret); + return virtio64_to_cpu(vdev, ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + __virtio64 v; + might_sleep(); - val = (__force u64)cpu_to_virtio64(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio64(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } /* Conditional config space accessors. */ -- cgit v1.2.3 From a5b90f2db8e0ef6504695cbd36a65fd8296338ee Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 3 Aug 2020 16:08:11 -0400 Subject: virtio_config: rewrite using _Generic Min compiler version has been raised, so that's ok now. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 161 ++++++++++++++++++++---------------------- 1 file changed, 76 insertions(+), 85 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 5c3b02245ecd..7fa000f02721 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -288,112 +288,103 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } -/* - * Only the checker differentiates between __virtioXX and __uXX types. But we - * try to share as much code as we can with the regular GCC build. - */ -#if !defined(CONFIG_CC_IS_GCC) && !defined(__CHECKER__) - -/* Not a checker - we can keep things simple */ -#define __virtio_native_typeof(x) typeof(x) - -#else - -/* - * We build this out of a couple of helper macros in a vain attempt to - * help you keep your lunch down while reading it. - */ -#define __virtio_pick_value(x, type, then, otherwise) \ - __builtin_choose_expr(__same_type(x, type), then, otherwise) - -#define __virtio_pick_type(x, type, then, otherwise) \ - __virtio_pick_value(x, type, (then)0, otherwise) - -#define __virtio_pick_endian(x, x16, x32, x64, otherwise) \ - __virtio_pick_type(x, x16, __u16, \ - __virtio_pick_type(x, x32, __u32, \ - __virtio_pick_type(x, x64, __u64, \ - otherwise))) - -#define __virtio_native_typeof(x) typeof( \ - __virtio_pick_type(x, __u8, __u8, \ - __virtio_pick_endian(x, __virtio16, __virtio32, __virtio64, \ - __virtio_pick_endian(x, __le16, __le32, __le64, \ - /* No other type allowed */ \ - (void)0)))) - -#endif +#define virtio_to_cpu(vdev, x) \ + _Generic((x), \ + __u8: (x), \ + __virtio16: virtio16_to_cpu((vdev), (x)), \ + __virtio32: virtio32_to_cpu((vdev), (x)), \ + __virtio64: virtio64_to_cpu((vdev), (x)), \ + /* + * Why define a default? checker can distinguish between + * e.g. __u16, __le16 and __virtio16, but GCC can't so + * attempts to define variants for both look like a duplicate + * variant to it. + */ \ + default: _Generic((x), \ + __u8: (x), \ + __le16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ + __le32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ + __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)), \ + default: _Generic((x), \ + __u8: (x), \ + __u16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ + __u32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ + __u64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ + ) \ + ) \ + ) + +#define cpu_to_virtio(vdev, x, m) \ + _Generic((m), \ + __u8: (x), \ + __virtio16: cpu_to_virtio16((vdev), (x)), \ + __virtio32: cpu_to_virtio32((vdev), (x)), \ + __virtio64: cpu_to_virtio64((vdev), (x)), \ + /* + * Why define a default? checker can distinguish between + * e.g. __u16, __le16 and __virtio16, but GCC can't so + * attempts to define variants for both look like a duplicate + * variant to it. + */ \ + default: _Generic((m), \ + __u8: (x), \ + __le16: (__force __le16)cpu_to_virtio16((vdev), (x)), \ + __le32: (__force __le32)cpu_to_virtio32((vdev), (x)), \ + __le64: (__force __le64)cpu_to_virtio64((vdev), (x)), \ + default: _Generic((m), \ + __u8: (x), \ + __u16: (__force __u16)cpu_to_virtio16((vdev), (x)), \ + __u32: (__force __u32)cpu_to_virtio32((vdev), (x)), \ + __u64: (__force __u64)cpu_to_virtio64((vdev), (x)) \ + ) \ + ) \ + ) #define __virtio_native_type(structname, member) \ - __virtio_native_typeof(((structname*)0)->member) - -#define __virtio_typecheck(structname, member, val) \ - /* Must match the member's type, and be integer */ \ - typecheck(__virtio_native_type(structname, member), (val)) - + typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!__virtio_typecheck(structname, member, *(ptr))) \ - (*ptr) = 1; \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - *(ptr) = virtio_cread8(vdev, \ - offsetof(structname, member)); \ - break; \ case 2: \ - *(ptr) = virtio_cread16(vdev, \ - offsetof(structname, member)); \ - break; \ case 4: \ - *(ptr) = virtio_cread32(vdev, \ - offsetof(structname, member)); \ - break; \ - case 8: \ - *(ptr) = virtio_cread64(vdev, \ - offsetof(structname, member)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ } while(0) /* Config space accessors. */ #define virtio_cwrite(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!__virtio_typecheck(structname, member, *(ptr))) \ - BUG_ON((*ptr) == 1); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ - case 1: \ - virtio_cwrite8(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 2: \ - virtio_cwrite16(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 4: \ - virtio_cwrite32(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 8: \ - virtio_cwrite64(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - default: \ - BUG(); \ - } \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ } while(0) /* Read @count fields, @bytes each. */ -- cgit v1.2.3 From 14191c15ab9d87e60d2ebbfbf6df83d546152af1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 10 Jul 2020 07:55:52 -0400 Subject: virtio_config: disallow native type fields (again) _Generic version allowed __uXX types but that is no longer necessary: Transitional devices should all use __virtioXX types (and __leXX for fields not present in the legacy devices). Modern ones should use __leXX. _uXX type would be a bug. Let's prevent that. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7fa000f02721..441fd6dd42ab 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -304,13 +304,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __le16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ __le32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ - __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)), \ - default: _Generic((x), \ - __u8: (x), \ - __u16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ - __u32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ - __u64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ - ) \ + __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ ) \ ) @@ -330,13 +324,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __le16: (__force __le16)cpu_to_virtio16((vdev), (x)), \ __le32: (__force __le32)cpu_to_virtio32((vdev), (x)), \ - __le64: (__force __le64)cpu_to_virtio64((vdev), (x)), \ - default: _Generic((m), \ - __u8: (x), \ - __u16: (__force __u16)cpu_to_virtio16((vdev), (x)), \ - __u32: (__force __u32)cpu_to_virtio32((vdev), (x)), \ - __u64: (__force __u64)cpu_to_virtio64((vdev), (x)) \ - ) \ + __le64: (__force __le64)cpu_to_virtio64((vdev), (x)) \ ) \ ) -- cgit v1.2.3 From e598960ff5e511b76a0eb8dff25207d35c2442c8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 4 Aug 2020 17:33:08 -0400 Subject: virtio_config: LE config space accessors To be used by modern code, as well as to handle LE only fields such as balloon. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 65 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 441fd6dd42ab..5b5196fec899 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -375,6 +375,71 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) sizeof(virtio_cwrite_v)); \ } while(0) +/* + * Nothing virtio-specific about these, but let's worry about generalizing + * these later. + */ +#define virtio_le_to_cpu(x) \ + _Generic((x), \ + __u8: (x), \ + __le16: le16_to_cpu(x), \ + __le32: le32_to_cpu(x), \ + __le64: le64_to_cpu(x) \ + ) + +#define virtio_cpu_to_le(x, m) \ + _Generic((m), \ + __u8: (x), \ + __le16: cpu_to_le16(x), \ + __le32: cpu_to_le32(x), \ + __le64: cpu_to_le64(x) \ + ) + +/* LE (e.g. modern) Config space accessors. */ +#define virtio_cread_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ + \ + switch (sizeof(virtio_cread_v)) { \ + case 1: \ + case 2: \ + case 4: \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ + break; \ + default: \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ + } \ + *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ + } while(0) + +/* Config space accessors. */ +#define virtio_cwrite_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ + } while(0) + + /* Read @count fields, @bytes each. */ static inline void __virtio_cread_many(struct virtio_device *vdev, unsigned int offset, -- cgit v1.2.3 From e3e7994d53082e48d2a6a248376683d3be3dff9d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 4 Aug 2020 18:02:39 -0400 Subject: virtio_caif: correct tags for config space fields Tag config space fields as having virtio endian-ness. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_caif.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h index 5d2d3124ca3d..ea722479510c 100644 --- a/include/linux/virtio_caif.h +++ b/include/linux/virtio_caif.h @@ -11,9 +11,9 @@ #include struct virtio_caif_transf_config { - u16 headroom; - u16 tailroom; - u32 mtu; + __virtio16 headroom; + __virtio16 tailroom; + __virtio32 mtu; u8 reserved[4]; }; -- cgit v1.2.3 From 035ce4210be1257dd417785ff7818b5c0f2205fb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 09:17:38 -0400 Subject: virtio_config: add virtio_cread_le_feature Mirrors virtio_cread_feature but for LE fields. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 5b5196fec899..cc7a2b1fd7b2 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -555,4 +555,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) +/* Conditional config space accessors. */ +#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ + ({ \ + int _r = 0; \ + if (!virtio_has_feature(vdev, fbit)) \ + _r = -ENOENT; \ + else \ + virtio_cread_le((vdev), structname, member, ptr); \ + _r; \ + }) #endif /* _LINUX_VIRTIO_CONFIG_H */ -- cgit v1.2.3 From 83eb9db95eb453f1db651909ad4598c3d44ef1e1 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 07:29:12 -0400 Subject: virtio_config: drop LE option from config space All drivers now use virtio_cread/write_le for LE config space fields. Drop LE option from virtio_cread/write, only leaving the option to access transitional fields. Signed-off-by: Michael S. Tsirkin --- include/linux/virtio_config.h | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index cc7a2b1fd7b2..ecb166c824bb 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -293,19 +293,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __virtio16: virtio16_to_cpu((vdev), (x)), \ __virtio32: virtio32_to_cpu((vdev), (x)), \ - __virtio64: virtio64_to_cpu((vdev), (x)), \ - /* - * Why define a default? checker can distinguish between - * e.g. __u16, __le16 and __virtio16, but GCC can't so - * attempts to define variants for both look like a duplicate - * variant to it. - */ \ - default: _Generic((x), \ - __u8: (x), \ - __le16: virtio16_to_cpu((vdev), (__force __virtio16)(x)), \ - __le32: virtio32_to_cpu((vdev), (__force __virtio32)(x)), \ - __le64: virtio64_to_cpu((vdev), (__force __virtio64)(x)) \ - ) \ + __virtio64: virtio64_to_cpu((vdev), (x)) \ ) #define cpu_to_virtio(vdev, x, m) \ @@ -313,19 +301,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) __u8: (x), \ __virtio16: cpu_to_virtio16((vdev), (x)), \ __virtio32: cpu_to_virtio32((vdev), (x)), \ - __virtio64: cpu_to_virtio64((vdev), (x)), \ - /* - * Why define a default? checker can distinguish between - * e.g. __u16, __le16 and __virtio16, but GCC can't so - * attempts to define variants for both look like a duplicate - * variant to it. - */ \ - default: _Generic((m), \ - __u8: (x), \ - __le16: (__force __le16)cpu_to_virtio16((vdev), (x)), \ - __le32: (__force __le32)cpu_to_virtio32((vdev), (x)), \ - __le64: (__force __le64)cpu_to_virtio64((vdev), (x)) \ - ) \ + __virtio64: cpu_to_virtio64((vdev), (x)) \ ) #define __virtio_native_type(structname, member) \ -- cgit v1.2.3 From 7164675ab5caf46867d6a5448f4ff3af92d80a30 Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Fri, 31 Jul 2020 14:55:30 +0800 Subject: vDPA: add get_vq_irq() in vdpa_config_ops This commit adds a new function get_vq_irq() in struct vdpa_config_ops, which will return the irq number of a virtqueue. Signed-off-by: Zhu Lingshan Suggested-by: Jason Wang Link: https://lore.kernel.org/r/20200731065533.4144-4-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- include/linux/vdpa.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 29b8296f1414..5c530a64aa06 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -89,6 +89,12 @@ struct vdpa_device { * @vdev: vdpa device * @idx: virtqueue index * Returns the notifcation area + * @get_vq_irq: Get the irq number of a virtqueue (optional, + * but must implemented if require vq irq offloading) + * @vdev: vdpa device + * @idx: virtqueue index + * Returns int: irq number of a virtqueue, + * negative number if no irq assigned. * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -180,6 +186,7 @@ struct vdpa_config_ops { u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); + int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); -- cgit v1.2.3 From 4c05433bc6fb4ae172270f0279be8ba89a3da64f Mon Sep 17 00:00:00 2001 From: Zhu Lingshan Date: Tue, 4 Aug 2020 18:21:23 +0800 Subject: vDPA: dont change vq irq after DRIVER_OK IRQ of a vq is not expected to be changed in a DRIVER_OK ~ !DRIVER_OK period for irq offloading purposes. Place this comment at the side of bus ops get_vq_irq than in set_status in vhost_vdpa. Signed-off-by: Zhu Lingshan Link: https://lore.kernel.org/r/20200804102123.69978-1-lingshan.zhu@intel.com Signed-off-by: Michael S. Tsirkin --- drivers/vhost/vdpa.c | 1 - include/linux/vdpa.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 7441b9803eae..f8f8c9cf05b0 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -172,7 +172,6 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) ops->set_status(vdpa, status); - /* vq irq is not expected to be changed once DRIVER_OK is set */ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) for (i = 0; i < nvqs; i++) vhost_vdpa_setup_vq_irq(v, i); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 5c530a64aa06..565298cb45d2 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -186,6 +186,7 @@ struct vdpa_config_ops { u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); + /* vq irq is not expected to be changed once DRIVER_OK is set */ int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); /* Device ops */ -- cgit v1.2.3 From 923a3a863ae0c26876d704fb3453069e11ebdcb6 Mon Sep 17 00:00:00 2001 From: Michael Shych Date: Mon, 4 May 2020 17:14:24 +0300 Subject: platform_data/mlxreg: support new watchdog type with longer timeout period Add new watchdog type 3 with longer timeout period. Extend size of health_cntr field that that can be used to init watchdog timeout period. Signed-off-by: Michael Shych Reviewed-by: Vadim Pasternak Acked-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200504141427.17685-2-michaelsh@mellanox.com Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- include/linux/platform_data/mlxreg.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index b8da8aef2446..2c5e58d1d77b 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -43,10 +43,13 @@ * * TYPE1 HW watchdog implementation exist in old systems. * All new systems have TYPE2 HW watchdog. + * TYPE3 HW watchdog can exist on all systems with new CPLD. + * TYPE3 is selected by WD capability bit. */ enum mlxreg_wdt_type { MLX_WDT_TYPE1, MLX_WDT_TYPE2, + MLX_WDT_TYPE3, }; /** @@ -90,7 +93,7 @@ struct mlxreg_core_data { umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; - u8 health_cntr; + u32 health_cntr; bool attached; }; -- cgit v1.2.3 From cef9572e9af373cefd1adc9e771e89670da5da3c Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Fri, 17 Jul 2020 16:29:56 +0300 Subject: watchdog: add support for adjusting last known HW keepalive time Certain watchdogs require the watchdog only to be pinged within a specific time window, pinging too early or too late cause the watchdog to fire. In cases where this sort of watchdog has been started before kernel comes up, we must adjust the watchdog keepalive window to match the actually running timer, so add a new driver API for this purpose. Signed-off-by: Tero Kristo Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200717132958.14304-3-t-kristo@ti.com Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- Documentation/watchdog/watchdog-kernel-api.rst | 12 +++++++++++ drivers/watchdog/watchdog_dev.c | 30 ++++++++++++++++++++++++++ include/linux/watchdog.h | 2 ++ 3 files changed, 44 insertions(+) (limited to 'include/linux') diff --git a/Documentation/watchdog/watchdog-kernel-api.rst b/Documentation/watchdog/watchdog-kernel-api.rst index 068a55ee0d4a..baf44e986b07 100644 --- a/Documentation/watchdog/watchdog-kernel-api.rst +++ b/Documentation/watchdog/watchdog-kernel-api.rst @@ -336,3 +336,15 @@ an action is taken by a preconfigured pretimeout governor preassigned to the watchdog device. If watchdog pretimeout governor framework is not enabled, watchdog_notify_pretimeout() prints a notification message to the kernel log buffer. + +To set the last known HW keepalive time for a watchdog, the following function +should be used:: + + int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd, + unsigned int last_ping_ms) + +This function must be called immediately after watchdog registration. It +sets the last known hardware heartbeat to have happened last_ping_ms before +current time. Calling this is only needed if the watchdog is already running +when probe is called, and the watchdog can only be pinged after the +min_hw_heartbeat_ms time has passed from the last ping. diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 0ad1c393c00e..531e74994b61 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -1138,6 +1138,36 @@ void watchdog_dev_unregister(struct watchdog_device *wdd) watchdog_cdev_unregister(wdd); } +/* + * watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog + * @wdd: watchdog device + * @last_ping_ms: time since last HW heartbeat + * + * Adjusts the last known HW keepalive time for a watchdog timer. + * This is needed if the watchdog is already running when the probe + * function is called, and it can't be pinged immediately. This + * function must be called immediately after watchdog registration, + * and min_hw_heartbeat_ms must be set for this to be useful. + */ +int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd, + unsigned int last_ping_ms) +{ + struct watchdog_core_data *wd_data; + ktime_t now; + + if (!wdd) + return -EINVAL; + + wd_data = wdd->wd_data; + + now = ktime_get(); + + wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms)); + + return __watchdog_ping(wdd); +} +EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive); + /* * watchdog_dev_init: init dev part of watchdog core * diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 1464ce6ffa31..9b19e6bb68b5 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -210,6 +210,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int); + /* devres register variant */ int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); -- cgit v1.2.3 From a9974489b61c09c702c85c6cba3d1a3fd1be7a15 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Tue, 4 Aug 2020 19:20:42 +0300 Subject: vdpa: remove hard coded virtq num This will enable vdpa providers to add support for multi queue feature and publish it to upper layers (vhost and virtio). Signed-off-by: Max Gurtovoy Reviewed-by: Jason Wang Link: https://lore.kernel.org/r/20200804162048.22587-7-eli@mellanox.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/ifcvf/ifcvf_main.c | 3 ++- drivers/vdpa/vdpa.c | 3 +++ drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 ++-- drivers/vhost/vdpa.c | 9 +++------ include/linux/vdpa.h | 6 ++++-- 5 files changed, 14 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index a902b29b0d29..7c93225367db 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -431,7 +431,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, - dev, &ifc_vdpa_ops); + dev, &ifc_vdpa_ops, + IFCVF_MAX_QUEUE_PAIRS * 2); if (adapter == NULL) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); return -ENOMEM; diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index 7105265e4793..a69ffc991e13 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d) * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device + * @nvqs: number of virtqueues supported by this device * @size: size of the parent structure that contains private data * * Driver should use vdpa_alloc_device() wrapper macro instead of @@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d) */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size) { struct vdpa_device *vdev; @@ -97,6 +99,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->index = err; vdev->config = config; vdev->features_valid = false; + vdev->nvqs = nvqs; err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 7b34d663778f..58baff89cc29 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -65,7 +65,7 @@ static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | /* State of each vdpasim device */ struct vdpasim { struct vdpa_device vdpa; - struct vdpasim_virtqueue vqs[2]; + struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM]; struct work_struct work; /* spinlock to synchronize virtqueue state */ spinlock_t lock; @@ -352,7 +352,7 @@ static struct vdpasim *vdpasim_create(void) else ops = &vdpasim_net_config_ops; - vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops); + vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM); if (!vdpasim) goto err_alloc; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index e80db051845d..2d8c950ad3a8 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -32,9 +32,6 @@ enum { (1ULL << VHOST_BACKEND_F_IOTLB_BATCH), }; -/* Currently, only network backend w/o multiqueue is supported. */ -#define VHOST_VDPA_VQ_MAX 2 - #define VHOST_VDPA_DEV_MAX (1U << MINORBITS) struct vhost_vdpa { @@ -930,7 +927,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) { const struct vdpa_config_ops *ops = vdpa->config; struct vhost_vdpa *v; - int minor, nvqs = VHOST_VDPA_VQ_MAX; + int minor; int r; /* Currently, we only accept the network devices. */ @@ -951,14 +948,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) atomic_set(&v->opened, 0); v->minor = minor; v->vdpa = vdpa; - v->nvqs = nvqs; + v->nvqs = vdpa->nvqs; v->virtio_id = ops->get_device_id(vdpa); device_initialize(&v->dev); v->dev.release = vhost_vdpa_release_dev; v->dev.parent = &vdpa->dev; v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor); - v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue), GFP_KERNEL); if (!v->vqs) { r = -ENOMEM; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 565298cb45d2..b5901cde73e0 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -41,6 +41,7 @@ struct vdpa_device { const struct vdpa_config_ops *config; unsigned int index; bool features_valid; + int nvqs; }; /** @@ -218,11 +219,12 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size); -#define vdpa_alloc_device(dev_struct, member, parent, config) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ container_of(__vdpa_alloc_device( \ - parent, config, \ + parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ dev_struct, member))), \ -- cgit v1.2.3 From aac50c0bd434794b9950181349099e709ca4edad Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 4 Aug 2020 19:20:43 +0300 Subject: net/vdpa: Use struct for set/get vq state For now VQ state involves 16 bit available index value encoded in u64 variable. In the future it will be extended to contain more fields. Use struct to contain the state, now containing only a single u16 for the available index. In the future we can add fields to this struct. Reviewed-by: Parav Pandit Acked-by: Jason Wang Signed-off-by: Eli Cohen Link: https://lore.kernel.org/r/20200804162048.22587-8-eli@mellanox.com Signed-off-by: Michael S. Tsirkin --- drivers/vdpa/ifcvf/ifcvf_base.c | 4 ++-- drivers/vdpa/ifcvf/ifcvf_base.h | 4 ++-- drivers/vdpa/ifcvf/ifcvf_main.c | 9 +++++---- drivers/vdpa/vdpa_sim/vdpa_sim.c | 10 ++++++---- drivers/vhost/vdpa.c | 7 +++++-- include/linux/vdpa.h | 18 ++++++++++++++---- 6 files changed, 34 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 94bf0328b68d..f2a128e56de5 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw) return 0; } -u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) +u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) { struct ifcvf_lm_cfg __iomem *ifcvf_lm; void __iomem *avail_idx_addr; @@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) return last_avail_idx; } -int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num) +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) { struct ifcvf_lm_cfg __iomem *ifcvf_lm; void __iomem *avail_idx_addr; diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 24af422b5a3e..08f267a2aafe 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); void io_write64_twopart(u64 val, u32 *lo, u32 *hi); void ifcvf_reset(struct ifcvf_hw *hw); u64 ifcvf_get_features(struct ifcvf_hw *hw); -u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); -int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num); +u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 7c93225367db..dc311e972b9e 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -237,19 +237,20 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_MAX; } -static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid) +static void ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, + struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_get_vq_state(vf, qid); + state->avail_index = ifcvf_get_vq_state(vf, qid); } static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, - u64 num) + const struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_set_vq_state(vf, qid, num); + return ifcvf_set_vq_state(vf, qid, state->avail_index); } static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 58baff89cc29..c93126ad09d1 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -450,26 +450,28 @@ static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) return vq->ready; } -static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state) +static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, + const struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; spin_lock(&vdpasim->lock); - vrh->last_avail_idx = state; + vrh->last_avail_idx = state->avail_index; spin_unlock(&vdpasim->lock); return 0; } -static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) +static void vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, + struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; - return vrh->last_avail_idx; + state->avail_index = vrh->last_avail_idx; } static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 2d8c950ad3a8..066b165c17b1 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -349,6 +349,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_vq_state vq_state; struct vdpa_callback cb; struct vhost_virtqueue *vq; struct vhost_vring_state s; @@ -374,7 +375,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_GET_VRING_BASE: - vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx); + ops->get_vq_state(v->vdpa, idx, &vq_state); + vq->last_avail_idx = vq_state.avail_index; break; case VHOST_GET_BACKEND_FEATURES: features = VHOST_VDPA_BACKEND_FEATURES; @@ -404,7 +406,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, break; case VHOST_SET_VRING_BASE: - if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx)) + vq_state.avail_index = vq->last_avail_idx; + if (ops->set_vq_state(vdpa, idx, &vq_state)) r = -EINVAL; break; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index b5901cde73e0..d7399c983734 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -27,6 +27,14 @@ struct vdpa_notification_area { resource_size_t size; }; +/** + * vDPA vq_state definition + * @avail_index: available index + */ +struct vdpa_vq_state { + u16 avail_index; +}; + /** * vDPA device - representation of a vDPA device * @dev: underlying device @@ -80,12 +88,12 @@ struct vdpa_device { * @set_vq_state: Set the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * @state: virtqueue state (last_avail_idx) + * @state: pointer to set virtqueue state (last_avail_idx) * Returns integer: success (0) or error (< 0) * @get_vq_state: Get the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * Returns virtqueue state (last_avail_idx) + * @state: pointer to returned state (last_avail_idx) * @get_vq_notification: Get the notification area for a virtqueue * @vdev: vdpa device * @idx: virtqueue index @@ -183,8 +191,10 @@ struct vdpa_config_ops { struct vdpa_callback *cb); void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); - int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); - u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, + const struct vdpa_vq_state *state); + void (*get_vq_state)(struct vdpa_device *vdev, u16 idx, + struct vdpa_vq_state *state); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* vq irq is not expected to be changed once DRIVER_OK is set */ -- cgit v1.2.3 From 23750e39d57433d0e3d89658f0bc448f9c42ff49 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 4 Aug 2020 19:20:44 +0300 Subject: vdpa: Modify get_vq_state() to return error code Modify get_vq_state() so it returns an error code. In case of hardware acceleration, the available index may be retrieved from the device, an operation that can possibly fail. Reviewed-by: Parav Pandit Signed-off-by: Eli Cohen Link: https://lore.kernel.org/r/20200804162048.22587-9-eli@mellanox.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- drivers/vdpa/ifcvf/ifcvf_main.c | 5 +++-- drivers/vdpa/vdpa_sim/vdpa_sim.c | 5 +++-- drivers/vhost/vdpa.c | 5 ++++- include/linux/vdpa.h | 4 ++-- 4 files changed, 12 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index dc311e972b9e..076d7ac5e723 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -237,12 +237,13 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_MAX; } -static void ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, - struct vdpa_vq_state *state) +static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, + struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); state->avail_index = ifcvf_get_vq_state(vf, qid); + return 0; } static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index c93126ad09d1..df3224b138ee 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -464,14 +464,15 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, return 0; } -static void vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, - struct vdpa_vq_state *state) +static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, + struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; state->avail_index = vrh->last_avail_idx; + return 0; } static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 066b165c17b1..3fab94f88894 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -375,7 +375,10 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_GET_VRING_BASE: - ops->get_vq_state(v->vdpa, idx, &vq_state); + r = ops->get_vq_state(v->vdpa, idx, &vq_state); + if (r) + return r; + vq->last_avail_idx = vq_state.avail_index; break; case VHOST_GET_BACKEND_FEATURES: diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index d7399c983734..eae0bfd87d91 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -193,8 +193,8 @@ struct vdpa_config_ops { bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state); - void (*get_vq_state)(struct vdpa_device *vdev, u16 idx, - struct vdpa_vq_state *state); + int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, + struct vdpa_vq_state *state); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* vq irq is not expected to be changed once DRIVER_OK is set */ -- cgit v1.2.3 From c84f91e2622235bb742f9f20b8675cf095157026 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 5 Aug 2020 19:55:50 -0400 Subject: virtio_config: fix up warnings on parisc Apparently, on parisc le16_to_cpu returns an int. virtio_cread_le is very strict about type sizes so it causes a warning. Fix it up by casting to the correct type. Reported-by: kernel test robot Signed-off-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/20200805235550.1451637-1-mst@redhat.com --- include/linux/virtio_config.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index ecb166c824bb..8fe857e27ef3 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -357,10 +357,10 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) */ #define virtio_le_to_cpu(x) \ _Generic((x), \ - __u8: (x), \ - __le16: le16_to_cpu(x), \ - __le32: le32_to_cpu(x), \ - __le64: le64_to_cpu(x) \ + __u8: (u8)(x), \ + __le16: (u16)le16_to_cpu(x), \ + __le32: (u32)le32_to_cpu(x), \ + __le64: (u64)le64_to_cpu(x) \ ) #define virtio_cpu_to_le(x, m) \ @@ -400,7 +400,6 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ } while(0) -/* Config space accessors. */ #define virtio_cwrite_le(vdev, structname, member, ptr) \ do { \ typeof(((structname*)0)->member) virtio_cwrite_v = \ -- cgit v1.2.3 From 0cd39f4600ed4de859383018eb10f0f724900e1b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 6 Aug 2020 14:35:11 +0200 Subject: locking/seqlock, headers: Untangle the spaghetti monster By using lockdep_assert_*() from seqlock.h, the spaghetti monster attacked. Attack back by reducing seqlock.h dependencies from two key high level headers: - : -Remove - : -Remove - : +Add The price was to add it to sched.h ... Core header fallout, we add direct header dependencies instead of gaining them parasitically from higher level headers: - : +Add - : +Add - : +Add - : +Add - : +Add - : +Add Arch headers fallout: - PARISC: : +Add - SH: : +Add - SPARC: : +Add - SPARC: : +Add , -Remove - X86: : +Add -Remove There's also a bunch of parasitic header dependency fallout in .c files, not listed separately. [ mingo: Extended the changelog, split up & fixed the original patch. ] Co-developed-by: Ingo Molnar Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200804133438.GK2674@hirez.programming.kicks-ass.net --- arch/sh/include/asm/io.h | 1 + arch/sh/kernel/machvec.c | 1 + arch/sparc/include/asm/timer_64.h | 1 + arch/sparc/include/asm/vvar.h | 3 ++- arch/sparc/kernel/vdso.c | 1 - arch/x86/include/asm/fixmap.h | 2 +- arch/x86/kernel/apic/apic_noop.c | 1 + arch/x86/kernel/apic/hw_nmi.c | 1 + arch/x86/kernel/apic/probe_64.c | 1 + arch/x86/kernel/cpu/amd.c | 1 + arch/x86/kernel/cpu/common.c | 1 + arch/x86/kernel/cpu/hygon.c | 1 + arch/x86/kernel/cpu/intel.c | 1 + arch/x86/kernel/jailhouse.c | 1 + arch/x86/kernel/tsc_msr.c | 1 + arch/x86/mm/init_32.c | 1 + arch/x86/xen/apic.c | 1 + arch/x86/xen/smp_hvm.c | 1 + arch/x86/xen/suspend_pv.c | 4 ++-- include/linux/dynamic_queue_limits.h | 2 ++ include/linux/hrtimer.h | 1 + include/linux/ktime.h | 1 + include/linux/lockdep.h | 1 + include/linux/mutex.h | 11 +++++++++++ include/linux/sched.h | 1 + include/linux/seqlock.h | 1 - include/linux/time.h | 1 - include/linux/videodev2.h | 1 + include/linux/ww_mutex.h | 8 -------- 29 files changed, 38 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 26f0f9b4658b..ec587b583822 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index beadbbdb4486..76bd8955d4fe 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -15,6 +15,7 @@ #include #include #include +#include #define MV_NAME_SIZE 32 diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h index c7e4fb601a57..dcfad4613e18 100644 --- a/arch/sparc/include/asm/timer_64.h +++ b/arch/sparc/include/asm/timer_64.h @@ -7,6 +7,7 @@ #ifndef _SPARC64_TIMER_H #define _SPARC64_TIMER_H +#include #include #include diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h index 0289503d1cb0..6eaf5cfcaae1 100644 --- a/arch/sparc/include/asm/vvar.h +++ b/arch/sparc/include/asm/vvar.h @@ -6,7 +6,8 @@ #define _ASM_SPARC_VVAR_DATA_H #include -#include +#include +#include #include #include diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c index 58880662b271..0e27437eb97b 100644 --- a/arch/sparc/kernel/vdso.c +++ b/arch/sparc/kernel/vdso.c @@ -7,7 +7,6 @@ * a different vsyscall implementation for Linux/IA32 and for the name. */ -#include #include #include diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index b9527a54db99..0f0dd645b594 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -26,9 +26,9 @@ #ifndef __ASSEMBLY__ #include -#include #include #include +#include #ifdef CONFIG_X86_32 #include #include diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index 98c9bb75d185..780c702969b7 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -10,6 +10,7 @@ * like self-ipi, etc... */ #include +#include #include diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index d1fc62a67320..34a992e275ef 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -9,6 +9,7 @@ * Bits copied from original nmi.c file * */ +#include #include #include diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 29f0e0984557..bd3835d6b535 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include #include #include "local.h" diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d4806eac9325..dcc3d943c68f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 95c090a45b4b..52b565016eb1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 4e28c1fc8749..ac6c30e5801d 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 0ab48f1cdf84..6eb42d7a3dfd 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 2caf5b990bf6..4eb8f2d19a87 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 4fec6f3a1858..46c72f2ec32f 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c @@ -7,6 +7,7 @@ */ #include +#include #include #include diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8b4afad84f4a..d46a5cf6ccb0 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "mm_internal.h" diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index 2df7d089ad54..1aff4ae65655 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c index f8d39440b292..f5e7db4f82ab 100644 --- a/arch/x86/xen/smp_hvm.c +++ b/arch/x86/xen/smp_hvm.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c index 8303b58c79a9..cae9660f4c67 100644 --- a/arch/x86/xen/suspend_pv.c +++ b/arch/x86/xen/suspend_pv.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include - #include #include +#include + #include "xen-ops.h" void xen_pv_pre_suspend(void) diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 99fc06f0afc1..407c2f281b64 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -38,6 +38,8 @@ #ifdef __KERNEL__ +#include + struct dql { /* Fields accessed in enqueue path (dql_queued) */ unsigned int num_queued; /* Total ever queued */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 25993b86ac5c..107cedd7019a 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 42d2e6ac35f2..a12b5523cc18 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -23,6 +23,7 @@ #include #include +#include /* Nanosecond scalar representation for kernel time values */ typedef s64 ktime_t; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 39a35699d0d6..62a382d1845b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -11,6 +11,7 @@ #define __LINUX_LOCKDEP_H #include +#include #include struct task_struct; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index ae197cc00cc8..dcd185cbfe79 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -65,6 +65,17 @@ struct mutex { #endif }; +struct ww_class; +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +#ifdef CONFIG_DEBUG_MUTEXES + struct ww_class *ww_class; +#endif +}; + /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: diff --git a/include/linux/sched.h b/include/linux/sched.h index 9a9d8263962d..7c7a9499d7bc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -31,6 +31,7 @@ #include #include #include +#include #include /* task_struct member predeclarations (sorted alphabetically): */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index a076f783aa36..962d9768945f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -19,7 +19,6 @@ #include #include #include -#include #include diff --git a/include/linux/time.h b/include/linux/time.h index 4c325bf44ce0..b142cb5f5a53 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -3,7 +3,6 @@ #define _LINUX_TIME_H # include -# include # include # include diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 16c0ed6c50a7..219037f4c08d 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -57,6 +57,7 @@ #define __LINUX_VIDEODEV2_H #include /* need struct timeval */ +#include #include #endif /* __LINUX_VIDEODEV2_H */ diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index d7554252404c..850424e5d030 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,14 +48,6 @@ struct ww_acquire_ctx { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ , .ww_class = class -- cgit v1.2.3 From 1fb497dd003009be95ce67689ac800c446b7acc5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 30 Jul 2020 12:14:06 +0200 Subject: posix-cpu-timers: Provide mechanisms to defer timer handling to task_work Running posix CPU timers in hard interrupt context has a few downsides: - For PREEMPT_RT it cannot work as the expiry code needs to take sighand lock, which is a 'sleeping spinlock' in RT. The original RT approach of offloading the posix CPU timer handling into a high priority thread was clumsy and provided no real benefit in general. - For fine grained accounting it's just wrong to run this in context of the timer interrupt because that way a process specific CPU time is accounted to the timer interrupt. - Long running timer interrupts caused by a large amount of expiring timers which can be created and armed by unpriviledged user space. There is no hard requirement to expire them in interrupt context. If the signal is targeted at the task itself then it won't be delivered before the task returns to user space anyway. If the signal is targeted at a supervisor process then it might be slightly delayed, but posix CPU timers are inaccurate anyway due to the fact that they are tied to the tick. Provide infrastructure to schedule task work which allows splitting the posix CPU timer code into a quick check in interrupt context and a thread context expiry and signal delivery function. This has to be enabled by architectures as it requires that the architecture specific KVM implementation handles pending task work before exiting to guest mode. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Reviewed-by: Oleg Nesterov Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20200730102337.783470146@linutronix.de --- include/linux/posix-timers.h | 17 ++++ include/linux/sched.h | 4 + kernel/time/Kconfig | 9 ++ kernel/time/posix-cpu-timers.c | 185 ++++++++++++++++++++++++++++++++++++++--- kernel/time/timer.c | 1 + 5 files changed, 204 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index e3f0f8585da4..896c16d2c5fb 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -6,6 +6,7 @@ #include #include #include +#include struct kernel_siginfo; struct task_struct; @@ -125,6 +126,16 @@ struct posix_cputimers { unsigned int expiry_active; }; +/** + * posix_cputimers_work - Container for task work based posix CPU timer expiry + * @work: The task work to be scheduled + * @scheduled: @work has been scheduled already, no further processing + */ +struct posix_cputimers_work { + struct callback_head work; + unsigned int scheduled; +}; + static inline void posix_cputimers_init(struct posix_cputimers *pct) { memset(pct, 0, sizeof(*pct)); @@ -165,6 +176,12 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } #endif +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK +void posix_cputimers_init_work(void); +#else +static inline void posix_cputimers_init_work(void) { } +#endif + #define REQUEUE_PENDING 1 /** diff --git a/include/linux/sched.h b/include/linux/sched.h index 06ec60462af0..e9942ce07ef1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -889,6 +889,10 @@ struct task_struct { /* Empty if CONFIG_POSIX_CPUTIMERS=n */ struct posix_cputimers posix_cputimers; +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK + struct posix_cputimers_work posix_cputimers_work; +#endif + /* Process credentials: */ /* Tracer's credentials at attach: */ diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index fcc42353f125..a09b1d61df6a 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -52,6 +52,15 @@ config GENERIC_CLOCKEVENTS_MIN_ADJUST config GENERIC_CMOS_UPDATE bool +# Select to handle posix CPU timers from task_work +# and not from the timer interrupt context +config HAVE_POSIX_CPU_TIMERS_TASK_WORK + bool + +config POSIX_CPU_TIMERS_TASK_WORK + bool + default y if POSIX_TIMERS && HAVE_POSIX_CPU_TIMERS_TASK_WORK + if GENERIC_CLOCKEVENTS menu "Timers subsystem" diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index e5ad87320468..a71758e34e45 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -377,6 +377,7 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp) */ static int posix_cpu_timer_create(struct k_itimer *new_timer) { + static struct lock_class_key posix_cpu_timers_key; struct pid *pid; rcu_read_lock(); @@ -386,6 +387,17 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer) return -EINVAL; } + /* + * If posix timer expiry is handled in task work context then + * timer::it_lock can be taken without disabling interrupts as all + * other locking happens in task context. This requires a seperate + * lock class key otherwise regular posix timer expiry would record + * the lock class being taken in interrupt context and generate a + * false positive warning. + */ + if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK)) + lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key); + new_timer->kclock = &clock_posix_cpu; timerqueue_init(&new_timer->it.cpu.node); new_timer->it.cpu.pid = get_pid(pid); @@ -1080,26 +1092,163 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) return false; } -static void __run_posix_cpu_timers(struct task_struct *tsk) +static void handle_posix_cpu_timers(struct task_struct *tsk); + +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK +static void posix_cpu_timers_work(struct callback_head *work) +{ + handle_posix_cpu_timers(current); +} + +/* + * Initialize posix CPU timers task work in init task. Out of line to + * keep the callback static and to avoid header recursion hell. + */ +void __init posix_cputimers_init_work(void) +{ + init_task_work(¤t->posix_cputimers_work.work, + posix_cpu_timers_work); +} + +/* + * Note: All operations on tsk->posix_cputimer_work.scheduled happen either + * in hard interrupt context or in task context with interrupts + * disabled. Aside of that the writer/reader interaction is always in the + * context of the current task, which means they are strict per CPU. + */ +static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) +{ + return tsk->posix_cputimers_work.scheduled; +} + +static inline void __run_posix_cpu_timers(struct task_struct *tsk) +{ + if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled)) + return; + + /* Schedule task work to actually expire the timers */ + tsk->posix_cputimers_work.scheduled = true; + task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME); +} + +static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk, + unsigned long start) +{ + bool ret = true; + + /* + * On !RT kernels interrupts are disabled while collecting expired + * timers, so no tick can happen and the fast path check can be + * reenabled without further checks. + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + tsk->posix_cputimers_work.scheduled = false; + return true; + } + + /* + * On RT enabled kernels ticks can happen while the expired timers + * are collected under sighand lock. But any tick which observes + * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath + * checks. So reenabling the tick work has do be done carefully: + * + * Disable interrupts and run the fast path check if jiffies have + * advanced since the collecting of expired timers started. If + * jiffies have not advanced or the fast path check did not find + * newly expired timers, reenable the fast path check in the timer + * interrupt. If there are newly expired timers, return false and + * let the collection loop repeat. + */ + local_irq_disable(); + if (start != jiffies && fastpath_timer_check(tsk)) + ret = false; + else + tsk->posix_cputimers_work.scheduled = false; + local_irq_enable(); + + return ret; +} +#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */ +static inline void __run_posix_cpu_timers(struct task_struct *tsk) +{ + lockdep_posixtimer_enter(); + handle_posix_cpu_timers(tsk); + lockdep_posixtimer_exit(); +} + +static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) +{ + return false; +} + +static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk, + unsigned long start) +{ + return true; +} +#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */ + +static void handle_posix_cpu_timers(struct task_struct *tsk) { struct k_itimer *timer, *next; - unsigned long flags; + unsigned long flags, start; LIST_HEAD(firing); if (!lock_task_sighand(tsk, &flags)) return; - /* - * Here we take off tsk->signal->cpu_timers[N] and - * tsk->cpu_timers[N] all the timers that are firing, and - * put them on the firing list. - */ - check_thread_timers(tsk, &firing); + do { + /* + * On RT locking sighand lock does not disable interrupts, + * so this needs to be careful vs. ticks. Store the current + * jiffies value. + */ + start = READ_ONCE(jiffies); + barrier(); - check_process_timers(tsk, &firing); + /* + * Here we take off tsk->signal->cpu_timers[N] and + * tsk->cpu_timers[N] all the timers that are firing, and + * put them on the firing list. + */ + check_thread_timers(tsk, &firing); + + check_process_timers(tsk, &firing); + + /* + * The above timer checks have updated the exipry cache and + * because nothing can have queued or modified timers after + * sighand lock was taken above it is guaranteed to be + * consistent. So the next timer interrupt fastpath check + * will find valid data. + * + * If timer expiry runs in the timer interrupt context then + * the loop is not relevant as timers will be directly + * expired in interrupt context. The stub function below + * returns always true which allows the compiler to + * optimize the loop out. + * + * If timer expiry is deferred to task work context then + * the following rules apply: + * + * - On !RT kernels no tick can have happened on this CPU + * after sighand lock was acquired because interrupts are + * disabled. So reenabling task work before dropping + * sighand lock and reenabling interrupts is race free. + * + * - On RT kernels ticks might have happened but the tick + * work ignored posix CPU timer handling because the + * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work + * must be done very carefully including a check whether + * ticks have happened since the start of the timer + * expiry checks. posix_cpu_timers_enable_work() takes + * care of that and eventually lets the expiry checks + * run again. + */ + } while (!posix_cpu_timers_enable_work(tsk, start)); /* - * We must release these locks before taking any timer's lock. + * We must release sighand lock before taking any timer's lock. * There is a potential race with timer deletion here, as the * siglock now protects our private firing list. We have set * the firing flag in each timer, so that a deletion attempt @@ -1117,6 +1266,13 @@ static void __run_posix_cpu_timers(struct task_struct *tsk) list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) { int cpu_firing; + /* + * spin_lock() is sufficient here even independent of the + * expiry context. If expiry happens in hard interrupt + * context it's obvious. For task work context it's safe + * because all other operations on timer::it_lock happen in + * task context (syscall or exit). + */ spin_lock(&timer->it_lock); list_del_init(&timer->it.cpu.elist); cpu_firing = timer->it.cpu.firing; @@ -1143,6 +1299,13 @@ void run_posix_cpu_timers(void) lockdep_assert_irqs_disabled(); + /* + * If the actual expiry is deferred to task work context and the + * work is already scheduled there is no point to do anything here. + */ + if (posix_cpu_timers_work_scheduled(tsk)) + return; + /* * The fast path checks that there are no expired thread or thread * group timers. If that's so, just return. @@ -1150,9 +1313,7 @@ void run_posix_cpu_timers(void) if (!fastpath_timer_check(tsk)) return; - lockdep_posixtimer_enter(); __run_posix_cpu_timers(tsk); - lockdep_posixtimer_exit(); } /* diff --git a/kernel/time/timer.c b/kernel/time/timer.c index ae5029f984a8..a16764b0116e 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2017,6 +2017,7 @@ static void __init init_timer_cpus(void) void __init init_timers(void) { init_timer_cpus(); + posix_cputimers_init_work(); open_softirq(TIMER_SOFTIRQ, run_timer_softirq); } -- cgit v1.2.3 From 09fc67b500c7f0bb1b5ed774197ac7f2c5285655 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 17 Jul 2020 17:42:55 +0900 Subject: kprobes: Remove show_registers() function prototype Remove show_registers() function prototype because this function has been renamed by commit: 57da8b960b9a ("x86: Avoid double stack traces with show_regs()") and this commit has removed the caller in kprobes altogether: 80006dbee674 ("kprobes/x86: Remove jprobe implementation") So this doesn't exist anymore - remove the orphan prototype. Signed-off-by: Masami Hiramatsu Signed-off-by: Ingo Molnar --- include/linux/kprobes.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 45b8cdc9fad7..9be1bff4f586 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); -extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); -- cgit v1.2.3 From b55b3fdce3e554a6bbe8f8ca6a01a892d720e64e Mon Sep 17 00:00:00 2001 From: Bhupesh Sharma Date: Fri, 17 Jul 2020 13:01:00 +0530 Subject: hw_breakpoint: Remove unused __register_perf_hw_breakpoint() declaration Commit: b326e9560a28 ("hw-breakpoints: Use overflow handler instead of the event callback") removed __register_perf_hw_breakpoint() function usage and replaced it with register_perf_hw_breakpoint() function. Remove the left-over unused __register_perf_hw_breakpoint() declaration from as well. Signed-off-by: Bhupesh Sharma Signed-off-by: Ingo Molnar Acked-by: Mark Rutland Link: https://lore.kernel.org/r/1594971060-14180-1-git-send-email-bhsharma@redhat.com --- include/linux/hw_breakpoint.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index d7d4250cd1e4..78dd7035d1e5 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -72,7 +72,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context); extern int register_perf_hw_breakpoint(struct perf_event *bp); -extern int __register_perf_hw_breakpoint(struct perf_event *bp); extern void unregister_hw_breakpoint(struct perf_event *bp); extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); @@ -119,8 +118,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } -static inline int -__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline void unregister_hw_breakpoint(struct perf_event *bp) { } static inline void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } -- cgit v1.2.3 From bb22d80b47d5dd641d09d31946c4be0f610f3f45 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 17 Jul 2020 16:36:40 -0700 Subject: LSM: drop duplicated words in header file comments Drop the doubled words "the" and "and" in comments. Signed-off-by: Randy Dunlap Acked-by: Serge Hallyn Cc: linux-security-module@vger.kernel.org Signed-off-by: James Morris --- include/linux/lsm_hook_defs.h | 2 +- include/linux/lsm_hooks.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 5616b2567aa7..dec0e5186834 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -15,7 +15,7 @@ */ /* - * The macro LSM_HOOK is used to define the data structures required by the + * The macro LSM_HOOK is used to define the data structures required by * the LSM framework using the pattern: * * LSM_HOOK(, , , args...) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 988ca0df7824..afd2c5becec5 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -798,7 +798,7 @@ * structure. Note that the security field was not added directly to the * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will - * allocate and and attach security information to + * allocate and attach security information to * SOCK_INODE(sock)->i_security. This hook may be used to update the * SOCK_INODE(sock)->i_security field with additional information that * wasn't available when the inode was allocated. -- cgit v1.2.3 From 5e7b30205cef80f6bb922e61834437ca7bff5837 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 4 Aug 2020 22:50:56 -0700 Subject: bpf: Change uapi for bpf iterator map elements Commit a5cbe05a6673 ("bpf: Implement bpf iterator for map elements") added bpf iterator support for map elements. The map element bpf iterator requires info to identify a particular map. In the above commit, the attr->link_create.target_fd is used to carry map_fd and an enum bpf_iter_link_info is added to uapi to specify the target_fd actually representing a map_fd: enum bpf_iter_link_info { BPF_ITER_LINK_UNSPEC = 0, BPF_ITER_LINK_MAP_FD = 1, MAX_BPF_ITER_LINK_INFO, }; This is an extensible approach as we can grow enumerator for pid, cgroup_id, etc. and we can unionize target_fd for pid, cgroup_id, etc. But in the future, there are chances that more complex customization may happen, e.g., for tasks, it could be filtered based on both cgroup_id and user_id. This patch changed the uapi to have fields __aligned_u64 iter_info; __u32 iter_info_len; for additional iter_info for link_create. The iter_info is defined as union bpf_iter_link_info { struct { __u32 map_fd; } map; }; So future extension for additional customization will be easier. The bpf_iter_link_info will be passed to target callback to validate and generic bpf_iter framework does not need to deal it any more. Note that map_fd = 0 will be considered invalid and -EBADF will be returned to user space. Fixes: a5cbe05a6673 ("bpf: Implement bpf iterator for map elements") Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200805055056.1457463-1-yhs@fb.com --- include/linux/bpf.h | 10 ++++---- include/uapi/linux/bpf.h | 15 ++++++------ kernel/bpf/bpf_iter.c | 58 +++++++++++++++++++++++------------------------ kernel/bpf/map_iter.c | 37 +++++++++++++++++++++++------- kernel/bpf/syscall.c | 2 +- net/core/bpf_sk_storage.c | 37 +++++++++++++++++++++++------- 6 files changed, 102 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cef4ef0d2b4e..55f694b63164 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1214,15 +1214,17 @@ struct bpf_iter_aux_info { struct bpf_map *map; }; -typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog, - struct bpf_iter_aux_info *aux); +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux); +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; - bpf_iter_check_target_t check_target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; u32 ctx_arg_info_size; - enum bpf_iter_link_info req_linfo; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; const struct bpf_iter_seq_info *seq_info; }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b134e679e9db..0480f893facd 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key { __u32 attach_type; /* program attach type */ }; +union bpf_iter_link_info { + struct { + __u32 map_fd; + } map; +}; + /* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { BPF_MAP_CREATE, @@ -249,13 +255,6 @@ enum bpf_link_type { MAX_BPF_LINK_TYPE, }; -enum bpf_iter_link_info { - BPF_ITER_LINK_UNSPEC = 0, - BPF_ITER_LINK_MAP_FD = 1, - - MAX_BPF_ITER_LINK_INFO, -}; - /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. @@ -623,6 +622,8 @@ union bpf_attr { }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ + __aligned_u64 iter_info; /* extra bpf_iter_link_info */ + __u32 iter_info_len; /* iter_info length */ } link_create; struct { /* struct used by BPF_LINK_UPDATE command */ diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 363b9cafc2d8..b6715964b685 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -338,8 +338,8 @@ static void bpf_iter_link_release(struct bpf_link *link) struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); - if (iter_link->aux.map) - bpf_map_put_with_uref(iter_link->aux.map); + if (iter_link->tinfo->reg_info->detach_target) + iter_link->tinfo->reg_info->detach_target(&iter_link->aux); } static void bpf_iter_link_dealloc(struct bpf_link *link) @@ -390,15 +390,35 @@ bool bpf_link_is_iter(struct bpf_link *link) int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { + union bpf_iter_link_info __user *ulinfo; struct bpf_link_primer link_primer; struct bpf_iter_target_info *tinfo; - struct bpf_iter_aux_info aux = {}; + union bpf_iter_link_info linfo; struct bpf_iter_link *link; - u32 prog_btf_id, target_fd; + u32 prog_btf_id, linfo_len; bool existed = false; - struct bpf_map *map; int err; + if (attr->link_create.target_fd || attr->link_create.flags) + return -EINVAL; + + memset(&linfo, 0, sizeof(union bpf_iter_link_info)); + + ulinfo = u64_to_user_ptr(attr->link_create.iter_info); + linfo_len = attr->link_create.iter_info_len; + if (!ulinfo ^ !linfo_len) + return -EINVAL; + + if (ulinfo) { + err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo), + linfo_len); + if (err) + return err; + linfo_len = min_t(u32, linfo_len, sizeof(linfo)); + if (copy_from_user(&linfo, ulinfo, linfo_len)) + return -EFAULT; + } + prog_btf_id = prog->aux->attach_btf_id; mutex_lock(&targets_mutex); list_for_each_entry(tinfo, &targets, list) { @@ -411,13 +431,6 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (!existed) return -ENOENT; - /* Make sure user supplied flags are target expected. */ - target_fd = attr->link_create.target_fd; - if (attr->link_create.flags != tinfo->reg_info->req_linfo) - return -EINVAL; - if (!attr->link_create.flags && target_fd) - return -EINVAL; - link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); if (!link) return -ENOMEM; @@ -431,28 +444,15 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) return err; } - if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) { - map = bpf_map_get_with_uref(target_fd); - if (IS_ERR(map)) { - err = PTR_ERR(map); - goto cleanup_link; - } - - aux.map = map; - err = tinfo->reg_info->check_target(prog, &aux); + if (tinfo->reg_info->attach_target) { + err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux); if (err) { - bpf_map_put_with_uref(map); - goto cleanup_link; + bpf_link_cleanup(&link_primer); + return err; } - - link->aux.map = map; } return bpf_link_settle(&link_primer); - -cleanup_link: - bpf_link_cleanup(&link_primer); - return err; } static void init_seq_meta(struct bpf_iter_priv_data *priv_data, diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index fbe1f557cb88..af86048e5afd 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -98,12 +98,21 @@ static struct bpf_iter_reg bpf_map_reg_info = { .seq_info = &bpf_map_seq_info, }; -static int bpf_iter_check_map(struct bpf_prog *prog, - struct bpf_iter_aux_info *aux) +static int bpf_iter_attach_map(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux) { u32 key_acc_size, value_acc_size, key_size, value_size; - struct bpf_map *map = aux->map; + struct bpf_map *map; bool is_percpu = false; + int err = -EINVAL; + + if (!linfo->map.map_fd) + return -EBADF; + + map = bpf_map_get_with_uref(linfo->map.map_fd); + if (IS_ERR(map)) + return PTR_ERR(map); if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || @@ -112,7 +121,7 @@ static int bpf_iter_check_map(struct bpf_prog *prog, else if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_LRU_HASH && map->map_type != BPF_MAP_TYPE_ARRAY) - return -EINVAL; + goto put_map; key_acc_size = prog->aux->max_rdonly_access; value_acc_size = prog->aux->max_rdwr_access; @@ -122,10 +131,22 @@ static int bpf_iter_check_map(struct bpf_prog *prog, else value_size = round_up(map->value_size, 8) * num_possible_cpus(); - if (key_acc_size > key_size || value_acc_size > value_size) - return -EACCES; + if (key_acc_size > key_size || value_acc_size > value_size) { + err = -EACCES; + goto put_map; + } + aux->map = map; return 0; + +put_map: + bpf_map_put_with_uref(map); + return err; +} + +static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) +{ + bpf_map_put_with_uref(aux->map); } DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, @@ -133,8 +154,8 @@ DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, static const struct bpf_iter_reg bpf_map_elem_reg_info = { .target = "bpf_map_elem", - .check_target = bpf_iter_check_map, - .req_linfo = BPF_ITER_LINK_MAP_FD, + .attach_target = bpf_iter_attach_map, + .detach_target = bpf_iter_detach_map, .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_map_elem, key), diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 2f343ce15747..86299a292214 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3883,7 +3883,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog * return -EINVAL; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.flags +#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len static int link_create(union bpf_attr *attr) { enum bpf_prog_type ptype; diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d3377c90a291..b988f48153a4 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data, return 0; } -static int bpf_iter_check_map(struct bpf_prog *prog, - struct bpf_iter_aux_info *aux) +static int bpf_iter_attach_map(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux) { - struct bpf_map *map = aux->map; + struct bpf_map *map; + int err = -EINVAL; + + if (!linfo->map.map_fd) + return -EBADF; + + map = bpf_map_get_with_uref(linfo->map.map_fd); + if (IS_ERR(map)) + return PTR_ERR(map); if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) - return -EINVAL; + goto put_map; - if (prog->aux->max_rdonly_access > map->value_size) - return -EACCES; + if (prog->aux->max_rdonly_access > map->value_size) { + err = -EACCES; + goto put_map; + } + aux->map = map; return 0; + +put_map: + bpf_map_put_with_uref(map); + return err; +} + +static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) +{ + bpf_map_put_with_uref(aux->map); } static const struct seq_operations bpf_sk_storage_map_seq_ops = { @@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = { static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { .target = "bpf_sk_storage_map", - .check_target = bpf_iter_check_map, - .req_linfo = BPF_ITER_LINK_MAP_FD, + .attach_target = bpf_iter_attach_map, + .detach_target = bpf_iter_detach_map, .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), -- cgit v1.2.3 From d58669b093997e4e5f98c38a54f99761657c19d2 Mon Sep 17 00:00:00 2001 From: Akshu Agrawal Date: Fri, 31 Jul 2020 19:06:01 +0530 Subject: ACPI: APD: Change name from ST to FCH AMD SoC general pupose clk is present in new platforms with same MMIO mappings. We can reuse the same clk handler support for other platforms. Hence, changing name from ST(SoC) to FCH(IP) Signed-off-by: Akshu Agrawal Acked-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_apd.c | 14 +++++++------- drivers/clk/x86/clk-st.c | 4 ++-- include/linux/platform_data/clk-fch.h | 17 +++++++++++++++++ include/linux/platform_data/clk-st.h | 17 ----------------- 4 files changed, 26 insertions(+), 26 deletions(-) create mode 100644 include/linux/platform_data/clk-fch.h delete mode 100644 include/linux/platform_data/clk-st.h (limited to 'include/linux') diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index f24f6d3f1fa5..36319deb24d9 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -8,7 +8,7 @@ */ #include -#include +#include #include #include #include @@ -79,11 +79,11 @@ static int misc_check_res(struct acpi_resource *ares, void *data) return !acpi_dev_resource_memory(ares, &res); } -static int st_misc_setup(struct apd_private_data *pdata) +static int fch_misc_setup(struct apd_private_data *pdata) { struct acpi_device *adev = pdata->adev; struct platform_device *clkdev; - struct st_clk_data *clk_data; + struct fch_clk_data *clk_data; struct resource_entry *rentry; struct list_head resource_list; int ret; @@ -106,7 +106,7 @@ static int st_misc_setup(struct apd_private_data *pdata) acpi_dev_free_resource_list(&resource_list); - clkdev = platform_device_register_data(&adev->dev, "clk-st", + clkdev = platform_device_register_data(&adev->dev, "clk-fch", PLATFORM_DEVID_NONE, clk_data, sizeof(*clk_data)); return PTR_ERR_OR_ZERO(clkdev); @@ -135,8 +135,8 @@ static const struct apd_device_desc cz_uart_desc = { .properties = uart_properties, }; -static const struct apd_device_desc st_misc_desc = { - .setup = st_misc_setup, +static const struct apd_device_desc fch_misc_desc = { + .setup = fch_misc_setup, }; #endif @@ -239,7 +239,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMD0020", APD_ADDR(cz_uart_desc) }, { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, - { "AMD0040", APD_ADDR(st_misc_desc)}, + { "AMD0040", APD_ADDR(fch_misc_desc)}, { "HYGO0010", APD_ADDR(wt_i2c_desc) }, #endif #ifdef CONFIG_ARM64 diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index 25d4b97aff9b..c2438874d9f2 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include /* Clock Driving Strength 2 register */ @@ -31,7 +31,7 @@ static struct clk_hw *hws[ST_MAX_CLKS]; static int st_clk_probe(struct platform_device *pdev) { - struct st_clk_data *st_data; + struct fch_clk_data *st_data; st_data = dev_get_platdata(&pdev->dev); if (!st_data || !st_data->base) diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h new file mode 100644 index 000000000000..850ca776156d --- /dev/null +++ b/include/linux/platform_data/clk-fch.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * clock framework for AMD misc clocks + * + * Copyright 2018 Advanced Micro Devices, Inc. + */ + +#ifndef __CLK_FCH_H +#define __CLK_FCH_H + +#include + +struct fch_clk_data { + void __iomem *base; +}; + +#endif /* __CLK_FCH_H */ diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-st.h deleted file mode 100644 index 7cdb6a402b35..000000000000 --- a/include/linux/platform_data/clk-st.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * clock framework for AMD Stoney based clock - * - * Copyright 2018 Advanced Micro Devices, Inc. - */ - -#ifndef __CLK_ST_H -#define __CLK_ST_H - -#include - -struct st_clk_data { - void __iomem *base; -}; - -#endif /* __CLK_ST_H */ -- cgit v1.2.3 From 7f8802f2d2ed67ffbac9264f946a52507f749e19 Mon Sep 17 00:00:00 2001 From: Akshu Agrawal Date: Fri, 31 Jul 2020 19:06:03 +0530 Subject: ACPI: APD: Add a fmw property is_raven Since there is slight difference in AMD RV based soc in misc clk architecture. The fmw property will help in differentiating the SoCs. Signed-off-by: Akshu Agrawal Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_apd.c | 4 ++++ include/linux/platform_data/clk-fch.h | 1 + 2 files changed, 5 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 36319deb24d9..4c348377a39d 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -82,6 +82,7 @@ static int misc_check_res(struct acpi_resource *ares, void *data) static int fch_misc_setup(struct apd_private_data *pdata) { struct acpi_device *adev = pdata->adev; + const union acpi_object *obj; struct platform_device *clkdev; struct fch_clk_data *clk_data; struct resource_entry *rentry; @@ -98,6 +99,9 @@ static int fch_misc_setup(struct apd_private_data *pdata) if (ret < 0) return -ENOENT; + acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj); + clk_data->is_rv = obj->integer.value; + list_for_each_entry(rentry, &resource_list, node) { clk_data->base = devm_ioremap(&adev->dev, rentry->res->start, resource_size(rentry->res)); diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h index 850ca776156d..b9f682459f08 100644 --- a/include/linux/platform_data/clk-fch.h +++ b/include/linux/platform_data/clk-fch.h @@ -12,6 +12,7 @@ struct fch_clk_data { void __iomem *base; + u32 is_rv; }; #endif /* __CLK_FCH_H */ -- cgit v1.2.3 From 519a8a6cf91dda095be2d36216fc4ebc525270a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 10 Aug 2020 18:42:14 +0200 Subject: net: Revert "net: optimize the sockptr_t for unified kernel/user address spaces" This reverts commits 6d04fe15f78acdf8e32329e208552e226f7a8ae6 and a31edb2059ed4e498f9aa8230c734b59d0ad797a. It turns out the idea to share a single pointer for both kernel and user space address causes various kinds of problems. So use the slightly less optimal version that uses an extra bit, but which is guaranteed to be safe everywhere. Fixes: 6d04fe15f78a ("net: optimize the sockptr_t for unified kernel/user address spaces") Reported-by: Eric Dumazet Reported-by: John Stultz Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 26 ++------------------------ net/ipv4/bpfilter/sockopt.c | 14 ++++++-------- net/socket.c | 6 +----- 3 files changed, 9 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 96840def9d69..ea193414298b 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -8,26 +8,9 @@ #ifndef _LINUX_SOCKPTR_H #define _LINUX_SOCKPTR_H -#include #include #include -#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE -typedef union { - void *kernel; - void __user *user; -} sockptr_t; - -static inline bool sockptr_is_kernel(sockptr_t sockptr) -{ - return (unsigned long)sockptr.kernel >= TASK_SIZE; -} - -static inline sockptr_t KERNEL_SOCKPTR(void *p) -{ - return (sockptr_t) { .kernel = p }; -} -#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ typedef struct { union { void *kernel; @@ -45,15 +28,10 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) { return (sockptr_t) { .kernel = p, .is_kernel = true }; } -#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ -static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p, - size_t size) +static inline sockptr_t USER_SOCKPTR(void __user *p) { - if (!access_ok(p, size)) - return -EFAULT; - *sp = (sockptr_t) { .user = p }; - return 0; + return (sockptr_t) { .user = p }; } static inline bool sockptr_is_null(sockptr_t sockptr) diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 545b2640f019..1b34cb9a7708 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -57,18 +57,16 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, return bpfilter_mbox_request(sk, optname, optval, optlen, true); } -int bpfilter_ip_get_sockopt(struct sock *sk, int optname, - char __user *user_optval, int __user *optlen) +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, + int __user *optlen) { - sockptr_t optval; - int err, len; + int len; if (get_user(len, optlen)) return -EFAULT; - err = init_user_sockptr(&optval, user_optval, len); - if (err) - return err; - return bpfilter_mbox_request(sk, optname, optval, len, false); + + return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len, + false); } static int __init bpfilter_sockopt_init(void) diff --git a/net/socket.c b/net/socket.c index f4d5998bdcba..dbbe8ea7d395 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2095,7 +2095,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, int optlen) { - sockptr_t optval; + sockptr_t optval = USER_SOCKPTR(user_optval); char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2103,10 +2103,6 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, if (optlen < 0) return -EINVAL; - err = init_user_sockptr(&optval, user_optval, optlen); - if (err) - return err; - sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; -- cgit v1.2.3 From 444da3f52407d74c9aa12187ac6b01f76ee47d62 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 10 Aug 2020 11:21:11 -0700 Subject: bitfield.h: don't compile-time validate _val in FIELD_FIT When ur_load_imm_any() is inlined into jeq_imm(), it's possible for the compiler to deduce a case where _val can only have the value of -1 at compile time. Specifically, /* struct bpf_insn: _s32 imm */ u64 imm = insn->imm; /* sign extend */ if (imm >> 32) { /* non-zero only if insn->imm is negative */ /* inlined from ur_load_imm_any */ u32 __imm = imm >> 32; /* therefore, always 0xffffffff */ if (__builtin_constant_p(__imm) && __imm > 255) compiletime_assert_XXX() This can result in tripping a BUILD_BUG_ON() in __BF_FIELD_CHECK() that checks that a given value is representable in one byte (interpreted as unsigned). FIELD_FIT() should return true or false at runtime for whether a value can fit for not. Don't break the build over a value that's too large for the mask. We'd prefer to keep the inlining and compiler optimizations though we know this case will always return false. Cc: stable@vger.kernel.org Fixes: 1697599ee301a ("bitfield.h: add FIELD_FIT() helper") Link: https://lore.kernel.org/kernel-hardening/CAK7LNASvb0UDJ0U5wkYYRzTAdnEs64HjXpEUL7d=V0CXiAXcNw@mail.gmail.com/ Reported-by: Masahiro Yamada Debugged-by: Sami Tolvanen Signed-off-by: Jakub Kicinski Signed-off-by: Nick Desaulniers Signed-off-by: David S. Miller --- include/linux/bitfield.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 48ea093ff04c..4e035aca6f7e 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -77,7 +77,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) -- cgit v1.2.3 From efa8480a831673bb52400df9dbe5da0aacda97bf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 10 Aug 2020 18:44:24 -0600 Subject: fs: RWF_NOWAIT should imply IOCB_NOIO With the change allowing read-ahead for IOCB_NOWAIT, we changed the RWF_NOWAIT semantics of only doing cached reads. Since we know have IOCB_NOIO to manage that specific side of it, just make RWF_NOWAIT imply IOCB_NOIO as well to restore the previous behavior. Fixes: 2e85abf053b9 ("mm: allow read-ahead with IOCB_NOWAIT set") Reported-by: Dave Chinner Reviewed-by: Dave Chinner Signed-off-by: Jens Axboe --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index bd7ec3eaeed0..f1cca4bfdd7b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3293,7 +3293,7 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; - kiocb_flags |= IOCB_NOWAIT; + kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO; } if (flags & RWF_HIPRI) kiocb_flags |= IOCB_HIPRI; -- cgit v1.2.3 From f6ebbcf08f37b01827c51309a188e85165e498e7 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 6 Aug 2020 14:03:55 +0200 Subject: cpufreq: intel_pstate: Implement passive mode with HWP enabled Allow intel_pstate to work in the passive mode with HWP enabled and make it set the HWP minimum performance limit (HWP floor) to the P-state value given by the target frequency supplied by the cpufreq governor, so as to prevent the HWP algorithm and the CPU scheduler from working against each other, at least when the schedutil governor is in use, and update the intel_pstate documentation accordingly. Among other things, this allows utilization clamps to be taken into account, at least to a certain extent, when intel_pstate is in use and makes it more likely that sufficient capacity for deadline tasks will be provided. After this change, the resulting behavior of an HWP system with intel_pstate in the passive mode should be close to the behavior of the analogous non-HWP system with intel_pstate in the passive mode, except that the HWP algorithm is generally allowed to make the CPU run at a frequency above the floor P-state set by intel_pstate in the entire available range of P-states, while without HWP a CPU can run in a P-state above the requested one if the latter falls into the range of turbo P-states (referred to as the turbo range) or if the P-states of all CPUs in one package are coordinated with each other at the hardware level. [Note that in principle the HWP floor may not be taken into account by the processor if it falls into the turbo range, in which case the processor has a license to choose any P-state, either below or above the HWP floor, just like a non-HWP processor in the case when the target P-state falls into the turbo range.] With this change applied, intel_pstate in the passive mode assumes complete control over the HWP request MSR and concurrent changes of that MSR (eg. via the direct MSR access interface) are overridden by it. Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada Reviewed-by: Francisco Jerez --- Documentation/admin-guide/pm/intel_pstate.rst | 89 +++++----- drivers/cpufreq/cpufreq.c | 6 +- drivers/cpufreq/intel_pstate.c | 245 +++++++++++++++++++------- include/linux/cpufreq.h | 2 + 4 files changed, 229 insertions(+), 113 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index 40d481cca368..f85767e09911 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -54,10 +54,13 @@ registered (see `below `_). Operation Modes =============== -``intel_pstate`` can operate in three different modes: in the active mode with -or without hardware-managed P-states support and in the passive mode. Which of -them will be in effect depends on what kernel command line options are used and -on the capabilities of the processor. +``intel_pstate`` can operate in two different modes, active or passive. In the +active mode, it uses its own internal performance scaling governor algorithm or +allows the hardware to do preformance scaling by itself, while in the passive +mode it responds to requests made by a generic ``CPUFreq`` governor implementing +a certain performance scaling algorithm. Which of them will be in effect +depends on what kernel command line options are used and on the capabilities of +the processor. Active Mode ----------- @@ -194,10 +197,11 @@ This is the default operation mode of ``intel_pstate`` for processors without hardware-managed P-states (HWP) support. It is always used if the ``intel_pstate=passive`` argument is passed to the kernel in the command line regardless of whether or not the given processor supports HWP. [Note that the -``intel_pstate=no_hwp`` setting implies ``intel_pstate=passive`` if it is used -without ``intel_pstate=active``.] Like in the active mode without HWP support, -in this mode ``intel_pstate`` may refuse to work with processors that are not -recognized by it. +``intel_pstate=no_hwp`` setting causes the driver to start in the passive mode +if it is not combined with ``intel_pstate=active``.] Like in the active mode +without HWP support, in this mode ``intel_pstate`` may refuse to work with +processors that are not recognized by it if HWP is prevented from being enabled +through the kernel command line. If the driver works in this mode, the ``scaling_driver`` policy attribute in ``sysfs`` for all ``CPUFreq`` policies contains the string "intel_cpufreq". @@ -318,10 +322,9 @@ manuals need to be consulted to get to it too. For this reason, there is a list of supported processors in ``intel_pstate`` and the driver initialization will fail if the detected processor is not in that -list, unless it supports the `HWP feature `_. [The interface to -obtain all of the information listed above is the same for all of the processors -supporting the HWP feature, which is why they all are supported by -``intel_pstate``.] +list, unless it supports the HWP feature. [The interface to obtain all of the +information listed above is the same for all of the processors supporting the +HWP feature, which is why ``intel_pstate`` works with all of them.] User Space Interface in ``sysfs`` @@ -425,22 +428,16 @@ argument is passed to the kernel in the command line. as well as the per-policy ones) are then reset to their default values, possibly depending on the target operation mode.] - That only is supported in some configurations, though (for example, if - the `HWP feature is enabled in the processor `_, - the operation mode of the driver cannot be changed), and if it is not - supported in the current configuration, writes to this attribute will - fail with an appropriate error. - ``energy_efficiency`` - This attribute is only present on platforms, which have CPUs matching - Kaby Lake or Coffee Lake desktop CPU model. By default - energy efficiency optimizations are disabled on these CPU models in HWP - mode by this driver. Enabling energy efficiency may limit maximum - operating frequency in both HWP and non HWP mode. In non HWP mode, - optimizations are done only in the turbo frequency range. In HWP mode, - optimizations are done in the entire frequency range. Setting this - attribute to "1" enables energy efficiency optimizations and setting - to "0" disables energy efficiency optimizations. + This attribute is only present on platforms with CPUs matching the Kaby + Lake or Coffee Lake desktop CPU model. By default, energy-efficiency + optimizations are disabled on these CPU models if HWP is enabled. + Enabling energy-efficiency optimizations may limit maximum operating + frequency with or without the HWP feature. With HWP enabled, the + optimizations are done only in the turbo frequency range. Without it, + they are done in the entire available frequency range. Setting this + attribute to "1" enables the energy-efficiency optimizations and setting + to "0" disables them. Interpretation of Policy Attributes ----------------------------------- @@ -484,8 +481,8 @@ Next, the following policy attributes have special meaning if policy for the time interval between the last two invocations of the driver's utilization update callback by the CPU scheduler for that CPU. -One more policy attribute is present if the `HWP feature is enabled in the -processor `_: +One more policy attribute is present if the HWP feature is enabled in the +processor: ``base_frequency`` Shows the base frequency of the CPU. Any frequency above this will be @@ -526,11 +523,11 @@ on the following rules, regardless of the current operation mode of the driver: 3. The global and per-policy limits can be set independently. -If the `HWP feature is enabled in the processor `_, the -resulting effective values are written into its registers whenever the limits -change in order to request its internal P-state selection logic to always set -P-states within these limits. Otherwise, the limits are taken into account by -scaling governors (in the `passive mode `_) and by the driver +In the `active mode with the HWP feature enabled `_, the +resulting effective values are written into hardware registers whenever the +limits change in order to request its internal P-state selection logic to always +set P-states within these limits. Otherwise, the limits are taken into account +by scaling governors (in the `passive mode `_) and by the driver every time before setting a new P-state for a CPU. Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument @@ -541,12 +538,11 @@ at all and the only way to set the limits is by using the policy attributes. Energy vs Performance Hints --------------------------- -If ``intel_pstate`` works in the `active mode with the HWP feature enabled -`_ in the processor, additional attributes are present -in every ``CPUFreq`` policy directory in ``sysfs``. They are intended to allow -user space to help ``intel_pstate`` to adjust the processor's internal P-state -selection logic by focusing it on performance or on energy-efficiency, or -somewhere between the two extremes: +If the hardware-managed P-states (HWP) is enabled in the processor, additional +attributes, intended to allow user space to help ``intel_pstate`` to adjust the +processor's internal P-state selection logic by focusing it on performance or on +energy-efficiency, or somewhere between the two extremes, are present in every +``CPUFreq`` policy directory in ``sysfs``. They are : ``energy_performance_preference`` Current value of the energy vs performance hint for the given policy @@ -650,12 +646,14 @@ of them have to be prepended with the ``intel_pstate=`` prefix. Do not register ``intel_pstate`` as the scaling driver even if the processor is supported by it. +``active`` + Register ``intel_pstate`` in the `active mode `_ to start + with. + ``passive`` Register ``intel_pstate`` in the `passive mode `_ to start with. - This option implies the ``no_hwp`` one described below. - ``force`` Register ``intel_pstate`` as the scaling driver instead of ``acpi-cpufreq`` even if the latter is preferred on the given system. @@ -670,13 +668,12 @@ of them have to be prepended with the ``intel_pstate=`` prefix. driver is used instead of ``acpi-cpufreq``. ``no_hwp`` - Do not enable the `hardware-managed P-states (HWP) feature - `_ even if it is supported by the processor. + Do not enable the hardware-managed P-states (HWP) feature even if it is + supported by the processor. ``hwp_only`` Register ``intel_pstate`` as the scaling driver only if the - `hardware-managed P-states (HWP) feature `_ is - supported by the processor. + hardware-managed P-states (HWP) feature is supported by the processor. ``support_acpi_ppc`` Take ACPI ``_PPC`` performance limits into account. diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index afad06b91c77..02ab56b2a0d8 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -73,8 +73,6 @@ static inline bool has_target(void) static unsigned int __cpufreq_get(struct cpufreq_policy *policy); static int cpufreq_init_governor(struct cpufreq_policy *policy); static void cpufreq_exit_governor(struct cpufreq_policy *policy); -static int cpufreq_start_governor(struct cpufreq_policy *policy); -static void cpufreq_stop_governor(struct cpufreq_policy *policy); static void cpufreq_governor_limits(struct cpufreq_policy *policy); static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_governor *new_gov, @@ -2266,7 +2264,7 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy) module_put(policy->governor->owner); } -static int cpufreq_start_governor(struct cpufreq_policy *policy) +int cpufreq_start_governor(struct cpufreq_policy *policy) { int ret; @@ -2293,7 +2291,7 @@ static int cpufreq_start_governor(struct cpufreq_policy *policy) return 0; } -static void cpufreq_stop_governor(struct cpufreq_policy *policy) +void cpufreq_stop_governor(struct cpufreq_policy *policy) { if (cpufreq_suspended || !policy->governor) return; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index fc459c9c00ff..e0220a6fbc69 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -36,6 +36,7 @@ #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 +#define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 #define INTEL_CPUFREQ_TRANSITION_DELAY 500 #ifdef CONFIG_ACPI @@ -220,6 +221,7 @@ struct global_params { * preference/bias * @epp_saved: Saved EPP/EPB during system suspend or CPU offline * operation + * @epp_cached Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set @@ -257,6 +259,7 @@ struct cpudata { s16 epp_policy; s16 epp_default; s16 epp_saved; + s16 epp_cached; u64 hwp_req_cached; u64 hwp_cap_cached; u64 last_io_update; @@ -639,6 +642,26 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw return index; } +static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) +{ + /* + * Use the cached HWP Request MSR value, because in the active mode the + * register itself may be updated by intel_pstate_hwp_boost_up() or + * intel_pstate_hwp_boost_down() at any time. + */ + u64 value = READ_ONCE(cpu->hwp_req_cached); + + value &= ~GENMASK_ULL(31, 24); + value |= (u64)epp << 24; + /* + * The only other updater of hwp_req_cached in the active mode, + * intel_pstate_hwp_set(), is called under the same lock as this + * function, so it cannot run in parallel with the update below. + */ + WRITE_ONCE(cpu->hwp_req_cached, value); + return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); +} + static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, int pref_index, bool use_raw, u32 raw_epp) @@ -650,28 +673,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, epp = cpu_data->epp_default; if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { - /* - * Use the cached HWP Request MSR value, because the register - * itself may be updated by intel_pstate_hwp_boost_up() or - * intel_pstate_hwp_boost_down() at any time. - */ - u64 value = READ_ONCE(cpu_data->hwp_req_cached); - - value &= ~GENMASK_ULL(31, 24); - if (use_raw) epp = raw_epp; else if (epp == -EINVAL) epp = epp_values[pref_index - 1]; - value |= (u64)epp << 24; - /* - * The only other updater of hwp_req_cached in the active mode, - * intel_pstate_hwp_set(), is called under the same lock as this - * function, so it cannot run in parallel with the update below. - */ - WRITE_ONCE(cpu_data->hwp_req_cached, value); - ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); + ret = intel_pstate_set_epp(cpu_data, epp); } else { if (epp == -EINVAL) epp = (pref_index - 1) << 2; @@ -697,10 +704,12 @@ static ssize_t show_energy_performance_available_preferences( cpufreq_freq_attr_ro(energy_performance_available_preferences); +static struct cpufreq_driver intel_pstate; + static ssize_t store_energy_performance_preference( struct cpufreq_policy *policy, const char *buf, size_t count) { - struct cpudata *cpu_data = all_cpu_data[policy->cpu]; + struct cpudata *cpu = all_cpu_data[policy->cpu]; char str_preference[21]; bool raw = false; ssize_t ret; @@ -725,15 +734,44 @@ static ssize_t store_energy_performance_preference( raw = true; } + /* + * This function runs with the policy R/W semaphore held, which + * guarantees that the driver pointer will not change while it is + * running. + */ + if (!intel_pstate_driver) + return -EAGAIN; + mutex_lock(&intel_pstate_limits_lock); - ret = intel_pstate_set_energy_pref_index(cpu_data, ret, raw, epp); - if (!ret) - ret = count; + if (intel_pstate_driver == &intel_pstate) { + ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); + } else { + /* + * In the passive mode the governor needs to be stopped on the + * target CPU before the EPP update and restarted after it, + * which is super-heavy-weight, so make sure it is worth doing + * upfront. + */ + if (!raw) + epp = ret ? epp_values[ret - 1] : cpu->epp_default; + + if (cpu->epp_cached != epp) { + int err; + + cpufreq_stop_governor(policy); + ret = intel_pstate_set_epp(cpu, epp); + err = cpufreq_start_governor(policy); + if (!ret) { + cpu->epp_cached = epp; + ret = err; + } + } + } mutex_unlock(&intel_pstate_limits_lock); - return ret; + return ret ?: count; } static ssize_t show_energy_performance_preference( @@ -1145,8 +1183,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, return count; } -static struct cpufreq_driver intel_pstate; - static void update_qos_request(enum freq_qos_req_type type) { int max_state, turbo_max, freq, i, perf_pct; @@ -1330,9 +1366,10 @@ static const struct attribute_group intel_pstate_attr_group = { static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; +static struct kobject *intel_pstate_kobject; + static void __init intel_pstate_sysfs_expose_params(void) { - struct kobject *intel_pstate_kobject; int rc; intel_pstate_kobject = kobject_create_and_add("intel_pstate", @@ -1357,17 +1394,31 @@ static void __init intel_pstate_sysfs_expose_params(void) rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); WARN_ON(rc); - if (hwp_active) { - rc = sysfs_create_file(intel_pstate_kobject, - &hwp_dynamic_boost.attr); - WARN_ON(rc); - } - if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) { rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr); WARN_ON(rc); } } + +static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) +{ + int rc; + + if (!hwp_active) + return; + + rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); + WARN_ON_ONCE(rc); +} + +static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) +{ + if (!hwp_active) + return; + + sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr); +} + /************************** sysfs end ************************/ static void intel_pstate_hwp_enable(struct cpudata *cpudata) @@ -2247,7 +2298,10 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) { - intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); + if (hwp_active) + intel_pstate_hwp_force_min_perf(policy->cpu); + else + intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); } static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) @@ -2255,12 +2309,10 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) pr_debug("CPU %d exiting\n", policy->cpu); intel_pstate_clear_update_util_hook(policy->cpu); - if (hwp_active) { + if (hwp_active) intel_pstate_hwp_save_state(policy); - intel_pstate_hwp_force_min_perf(policy->cpu); - } else { - intel_cpufreq_stop_cpu(policy); - } + + intel_cpufreq_stop_cpu(policy); } static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) @@ -2390,13 +2442,71 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in fp_toint(cpu->iowait_boost * 100)); } +static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate, + bool fast_switch) +{ + u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; + + value &= ~HWP_MIN_PERF(~0L); + value |= HWP_MIN_PERF(target_pstate); + + /* + * The entire MSR needs to be updated in order to update the HWP min + * field in it, so opportunistically update the max too if needed. + */ + value &= ~HWP_MAX_PERF(~0L); + value |= HWP_MAX_PERF(cpu->max_perf_ratio); + + if (value == prev) + return; + + WRITE_ONCE(cpu->hwp_req_cached, value); + if (fast_switch) + wrmsrl(MSR_HWP_REQUEST, value); + else + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); +} + +static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu, + u32 target_pstate, bool fast_switch) +{ + if (fast_switch) + wrmsrl(MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, target_pstate)); + else + wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, + pstate_funcs.get_val(cpu, target_pstate)); +} + +static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate, + bool fast_switch) +{ + int old_pstate = cpu->pstate.current_pstate; + + target_pstate = intel_pstate_prepare_request(cpu, target_pstate); + if (target_pstate != old_pstate) { + cpu->pstate.current_pstate = target_pstate; + if (hwp_active) + intel_cpufreq_adjust_hwp(cpu, target_pstate, + fast_switch); + else + intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, + fast_switch); + } + + intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : + INTEL_PSTATE_TRACE_TARGET, old_pstate); + + return target_pstate; +} + static int intel_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { struct cpudata *cpu = all_cpu_data[policy->cpu]; struct cpufreq_freqs freqs; - int target_pstate, old_pstate; + int target_pstate; update_turbo_state(); @@ -2404,6 +2514,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); + switch (relation) { case CPUFREQ_RELATION_L: target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); @@ -2415,15 +2526,11 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); break; } - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - old_pstate = cpu->pstate.current_pstate; - if (target_pstate != cpu->pstate.current_pstate) { - cpu->pstate.current_pstate = target_pstate; - wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, - pstate_funcs.get_val(cpu, target_pstate)); - } + + target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false); + freqs.new = target_pstate * cpu->pstate.scaling; - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate); + cpufreq_freq_transition_end(policy, &freqs, false); return 0; @@ -2433,15 +2540,14 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { struct cpudata *cpu = all_cpu_data[policy->cpu]; - int target_pstate, old_pstate; + int target_pstate; update_turbo_state(); target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); - target_pstate = intel_pstate_prepare_request(cpu, target_pstate); - old_pstate = cpu->pstate.current_pstate; - intel_pstate_update_pstate(cpu, target_pstate); - intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); + + target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true); + return target_pstate * cpu->pstate.scaling; } @@ -2461,7 +2567,6 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) return ret; policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; - policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; /* This reflects the intel_pstate_get_cpu_pstates() setting. */ policy->cur = policy->cpuinfo.min_freq; @@ -2473,10 +2578,18 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (hwp_active) + if (hwp_active) { + u64 value; + intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state); - else + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; + rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); + WRITE_ONCE(cpu->hwp_req_cached, value); + cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24; + } else { turbo_max = cpu->pstate.turbo_pstate; + policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; + } min_freq = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); min_freq *= cpu->pstate.scaling; @@ -2553,6 +2666,10 @@ static void intel_pstate_driver_cleanup(void) } } put_online_cpus(); + + if (intel_pstate_driver == &intel_pstate) + intel_pstate_sysfs_hide_hwp_dynamic_boost(); + intel_pstate_driver = NULL; } @@ -2560,6 +2677,9 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) { int ret; + if (driver == &intel_pstate) + intel_pstate_sysfs_expose_hwp_dynamic_boost(); + memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; @@ -2577,9 +2697,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) static int intel_pstate_unregister_driver(void) { - if (hwp_active) - return -EBUSY; - cpufreq_unregister_driver(intel_pstate_driver); intel_pstate_driver_cleanup(); @@ -2835,7 +2952,10 @@ static int __init intel_pstate_init(void) hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; - default_driver = &intel_pstate; + intel_cpufreq.attr = hwp_cpufreq_attrs; + if (!default_driver) + default_driver = &intel_pstate; + goto hwp_cpu_matched; } } else { @@ -2906,14 +3026,13 @@ static int __init intel_pstate_setup(char *str) if (!str) return -EINVAL; - if (!strcmp(str, "disable")) { + if (!strcmp(str, "disable")) no_load = 1; - } else if (!strcmp(str, "active")) { + else if (!strcmp(str, "active")) default_driver = &intel_pstate; - } else if (!strcmp(str, "passive")) { + else if (!strcmp(str, "passive")) default_driver = &intel_cpufreq; - no_hwp = 1; - } + if (!strcmp(str, "no_hwp")) { pr_info("HWP disabled\n"); no_hwp = 1; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 58687a5bf9c8..8f141d4c859c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -576,6 +576,8 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); +int cpufreq_start_governor(struct cpufreq_policy *policy); +void cpufreq_stop_governor(struct cpufreq_policy *policy); #define cpufreq_governor_init(__governor) \ static int __init __governor##_init(void) \ -- cgit v1.2.3 From 772616b031f06e05846488b01dab46a7c832da13 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 11 Aug 2020 18:30:21 -0700 Subject: mm: memcg/percpu: per-memcg percpu memory statistics MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Percpu memory can represent a noticeable chunk of the total memory consumption, especially on big machines with many CPUs. Let's track percpu memory usage for each memcg and display it in memory.stat. A percpu allocation is usually scattered over multiple pages (and nodes), and can be significantly smaller than a page. So let's add a byte-sized counter on the memcg level: MEMCG_PERCPU_B. Byte-sized vmstat infra created for slabs can be perfectly reused for percpu case. [guro@fb.com: v3] Link: http://lkml.kernel.org/r/20200623184515.4132564-4-guro@fb.com Signed-off-by: Roman Gushchin Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Acked-by: Dennis Zhou Acked-by: Johannes Weiner Cc: Christoph Lameter Cc: David Rientjes Cc: Joonsoo Kim Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Tejun Heo Cc: Tobin C. Harding Cc: Vlastimil Babka Cc: Waiman Long Cc: Bixuan Cui Cc: Michal Koutný Cc: Stephen Rothwell Link: http://lkml.kernel.org/r/20200608230819.832349-4-guro@fb.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/cgroup-v2.rst | 4 ++++ include/linux/memcontrol.h | 8 ++++++++ mm/memcontrol.c | 4 +++- mm/percpu.c | 10 ++++++++++ 4 files changed, 25 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index fa4018afa5a4..6be43781ec7f 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1274,6 +1274,10 @@ PAGE_SIZE multiple when read back. Amount of memory used for storing in-kernel data structures. + percpu + Amount of memory used for storing per-cpu kernel + data structures. + sock Amount of memory used in network transmission buffers diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1bb49b600310..2c2d301eac33 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -32,6 +32,7 @@ struct kmem_cache; enum memcg_stat_item { MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, + MEMCG_PERCPU_B, MEMCG_NR_STAT, }; @@ -339,6 +340,13 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +static __always_inline bool memcg_stat_item_in_bytes(int idx) +{ + if (idx == MEMCG_PERCPU_B) + return true; + return vmstat_item_in_bytes(idx); +} + static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8d9ceea7fe4d..36d5300f9b69 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -781,7 +781,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) if (mem_cgroup_disabled()) return; - if (vmstat_item_in_bytes(idx)) + if (memcg_stat_item_in_bytes(idx)) threshold <<= PAGE_SHIFT; x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); @@ -1488,6 +1488,8 @@ static char *memory_stat_format(struct mem_cgroup *memcg) seq_buf_printf(&s, "slab %llu\n", (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B))); + seq_buf_printf(&s, "percpu %llu\n", + (u64)memcg_page_state(memcg, MEMCG_PERCPU_B)); seq_buf_printf(&s, "sock %llu\n", (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); diff --git a/mm/percpu.c b/mm/percpu.c index dc1a213293aa..f4709629e6de 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1610,6 +1610,11 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, if (chunk) { chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; + + rcu_read_lock(); + mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, + size * num_possible_cpus()); + rcu_read_unlock(); } else { obj_cgroup_uncharge(objcg, size * num_possible_cpus()); obj_cgroup_put(objcg); @@ -1628,6 +1633,11 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) obj_cgroup_uncharge(objcg, size * num_possible_cpus()); + rcu_read_lock(); + mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, + -(size * num_possible_cpus())); + rcu_read_unlock(); + obj_cgroup_put(objcg); } -- cgit v1.2.3 From 8ca39e6874f812a393bb66d9fdbb7598d5f0451c Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 11 Aug 2020 18:30:32 -0700 Subject: mm/hugetlb: add mempolicy check in the reservation routine In the reservation routine, we only check whether the cpuset meets the memory allocation requirements. But we ignore the mempolicy of MPOL_BIND case. If someone mmap hugetlb succeeds, but the subsequent memory allocation may fail due to mempolicy restrictions and receives the SIGBUS signal. This can be reproduced by the follow steps. 1) Compile the test case. cd tools/testing/selftests/vm/ gcc map_hugetlb.c -o map_hugetlb 2) Pre-allocate huge pages. Suppose there are 2 numa nodes in the system. Each node will pre-allocate one huge page. echo 2 > /proc/sys/vm/nr_hugepages 3) Run test case(mmap 4MB). We receive the SIGBUS signal. numactl --membind=3D0 ./map_hugetlb 4 With this patch applied, the mmap will fail in the step 3) and throw "mmap: Cannot allocate memory". [akpm@linux-foundation.org: include sched.h for `current'] Reported-by: Jianchao Guo Suggested-by: Michal Hocko Signed-off-by: Muchun Song Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Cc: David Rientjes Cc: Mel Gorman Cc: Michel Lespinasse Cc: Baoquan He Link: http://lkml.kernel.org/r/20200728034938.14993-1-songmuchun@bytedance.com Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 16 +++++++++++++++- mm/hugetlb.c | 22 ++++++++++++++++++---- mm/mempolicy.c | 2 +- 3 files changed, 34 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index ea9c15b60a96..5f1648c23e29 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -6,7 +6,7 @@ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 - +#include #include #include #include @@ -152,6 +152,15 @@ extern int huge_node(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); +extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + struct mempolicy *mpol = get_task_policy(current); + + return policy_nodemask(gfp, mpol); +} + extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -281,5 +290,10 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, static inline void mpol_put_task_policy(struct task_struct *task) { } + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + return NULL; +} #endif /* CONFIG_NUMA */ #endif diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e52c878940bb..dffafb5bf2ed 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3458,13 +3458,21 @@ static int __init default_hugepagesz_setup(char *s) } __setup("default_hugepagesz=", default_hugepagesz_setup); -static unsigned int cpuset_mems_nr(unsigned int *array) +static unsigned int allowed_mems_nr(struct hstate *h) { int node; unsigned int nr = 0; + nodemask_t *mpol_allowed; + unsigned int *array = h->free_huge_pages_node; + gfp_t gfp_mask = htlb_alloc_mask(h); + + mpol_allowed = policy_nodemask_current(gfp_mask); - for_each_node_mask(node, cpuset_current_mems_allowed) - nr += array[node]; + for_each_node_mask(node, cpuset_current_mems_allowed) { + if (!mpol_allowed || + (mpol_allowed && node_isset(node, *mpol_allowed))) + nr += array[node]; + } return nr; } @@ -3643,12 +3651,18 @@ static int hugetlb_acct_memory(struct hstate *h, long delta) * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. + * + * Apart from cpuset, we also have memory policy mechanism that + * also determines from which node the kernel will allocate memory + * in a NUMA system. So similar to cpuset, we also should consider + * the memory policy of the current task. Similar to the description + * above. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; - if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { + if (delta > allowed_mems_nr(h)) { return_unused_surplus_pages(h, delta); goto out; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b9e85d467352..7af44d7cdd11 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1890,7 +1890,7 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ -static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) +nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(policy->mode == MPOL_BIND) && -- cgit v1.2.3 From b518154e59aab3ad0780a169c5cc84bd4ee4357e Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:40 -0700 Subject: mm/vmscan: protect the workingset on anonymous LRU In current implementation, newly created or swap-in anonymous page is started on active list. Growing active list results in rebalancing active/inactive list so old pages on active list are demoted to inactive list. Hence, the page on active list isn't protected at all. Following is an example of this situation. Assume that 50 hot pages on active list. Numbers denote the number of pages on active/inactive list (active | inactive). 1. 50 hot pages on active list 50(h) | 0 2. workload: 50 newly created (used-once) pages 50(uo) | 50(h) 3. workload: another 50 newly created (used-once) pages 50(uo) | 50(uo), swap-out 50(h) This patch tries to fix this issue. Like as file LRU, newly created or swap-in anonymous pages will be inserted to the inactive list. They are promoted to active list if enough reference happens. This simple modification changes the above example as following. 1. 50 hot pages on active list 50(h) | 0 2. workload: 50 newly created (used-once) pages 50(h) | 50(uo) 3. workload: another 50 newly created (used-once) pages 50(h) | 50(uo), swap-out 50(uo) As you can see, hot pages on active list would be protected. Note that, this implementation has a drawback that the page cannot be promoted and will be swapped-out if re-access interval is greater than the size of inactive list but less than the size of total(active+inactive). To solve this potential issue, following patch will apply workingset detection similar to the one that's already applied to file LRU. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Link: http://lkml.kernel.org/r/1595490560-15117-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 2 +- kernel/events/uprobes.c | 2 +- mm/huge_memory.c | 2 +- mm/khugepaged.c | 2 +- mm/memory.c | 9 ++++----- mm/migrate.c | 2 +- mm/swap.c | 13 +++++++------ mm/swapfile.c | 2 +- mm/userfaultfd.c | 2 +- mm/vmscan.c | 4 +--- 10 files changed, 19 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 7eb59bc552a5..51ec9cdb92c0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -352,7 +352,7 @@ extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_active_or_unevictable(struct page *page, +extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 25de10c904e6..49047d479c57 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (new_page) { get_page(new_page); page_add_new_anon_rmap(new_page, vma, addr, false); - lru_cache_add_active_or_unevictable(new_page, vma); + lru_cache_add_inactive_or_unevictable(new_page, vma); } else /* no new page, just dec_mm_counter for old_page */ dec_mm_counter(mm, MM_ANONPAGES); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 206f52b36ffb..863c495776d7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -640,7 +640,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); page_add_new_anon_rmap(page, vma, haddr, true); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b52bd46ad146..15a9af791014 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1173,7 +1173,7 @@ static void collapse_huge_page(struct mm_struct *mm, spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(new_page, vma, address, true); - lru_cache_add_active_or_unevictable(new_page, vma); + lru_cache_add_inactive_or_unevictable(new_page, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); diff --git a/mm/memory.c b/mm/memory.c index c39a13b09602..6fe8b5b22c57 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2715,7 +2715,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) */ ptep_clear_flush_notify(vma, vmf->address, vmf->pte); page_add_new_anon_rmap(new_page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(new_page, vma); + lru_cache_add_inactive_or_unevictable(new_page, vma); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the @@ -3266,10 +3266,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(page != swapcache && swapcache)) { page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); } else { do_page_add_anon_rmap(page, vma, vmf->address, exclusive); - activate_page(page); } swap_free(entry); @@ -3414,7 +3413,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); setpte: set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); @@ -3672,7 +3671,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) if (write && !(vma->vm_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/migrate.c b/mm/migrate.c index d179657f8685..819e55130134 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2830,7 +2830,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, inc_mm_counter(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, addr, false); if (!is_zone_device_page(page)) - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); get_page(page); if (flush) { diff --git a/mm/swap.c b/mm/swap.c index de257c0a89b1..9285e60c7d6e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -476,23 +476,24 @@ void lru_cache_add(struct page *page) EXPORT_SYMBOL(lru_cache_add); /** - * lru_cache_add_active_or_unevictable + * lru_cache_add_inactive_or_unevictable * @page: the page to be added to LRU * @vma: vma in which page is mapped for determining reclaimability * - * Place @page on the active or unevictable LRU list, depending on its + * Place @page on the inactive or unevictable LRU list, depending on its * evictability. Note that if the page is not evictable, it goes * directly back onto it's zone's unevictable list, it does NOT use a * per cpu pagevec. */ -void lru_cache_add_active_or_unevictable(struct page *page, +void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma) { + bool unevictable; + VM_BUG_ON_PAGE(PageLRU(page), page); - if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) - SetPageActive(page); - else if (!TestSetPageMlocked(page)) { + unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; + if (unlikely(unevictable) && !TestSetPageMlocked(page)) { /* * We use the irq-unsafe __mod_zone_page_stat because this * counter is not modified from interrupt context, and the pte diff --git a/mm/swapfile.c b/mm/swapfile.c index 6c26916e95fd..82183432fdd0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1915,7 +1915,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, page_add_anon_rmap(page, vma, addr, false); } else { /* ksm created a completely new copy */ page_add_new_anon_rmap(page, vma, addr, false); - lru_cache_add_active_or_unevictable(page, vma); + lru_cache_add_inactive_or_unevictable(page, vma); } swap_free(entry); /* diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index b80419320c7d..9a3d451402d7 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, inc_mm_counter(dst_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, dst_vma, dst_addr, false); - lru_cache_add_active_or_unevictable(page, dst_vma); + lru_cache_add_inactive_or_unevictable(page, dst_vma); set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); diff --git a/mm/vmscan.c b/mm/vmscan.c index e34fc04b7045..783cd7fdc61a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -998,8 +998,6 @@ static enum page_references page_check_references(struct page *page, return PAGEREF_RECLAIM; if (referenced_ptes) { - if (PageSwapBacked(page)) - return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table * references from the instantiating fault, so we need @@ -1022,7 +1020,7 @@ static enum page_references page_check_references(struct page *page, /* * Activate file-backed executable pages after first usage. */ - if (vm_flags & VM_EXEC) + if ((vm_flags & VM_EXEC) && !PageSwapBacked(page)) return PAGEREF_ACTIVATE; return PAGEREF_KEEP; -- cgit v1.2.3 From 170b04b7ae49634df103810dad67b22cf8a99aa6 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:43 -0700 Subject: mm/workingset: prepare the workingset detection infrastructure for anon LRU To prepare the workingset detection for anon LRU, this patch splits workingset event counters for refault, activate and restore into anon and file variants, as well as the refaults counter in struct lruvec. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Link: http://lkml.kernel.org/r/1595490560-15117-4-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 16 +++++++++++----- mm/memcontrol.c | 16 +++++++++++----- mm/vmscan.c | 15 ++++++++++----- mm/vmstat.c | 9 ++++++--- mm/workingset.c | 8 +++++--- 5 files changed, 43 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 635a96cd9b1f..efbd95dd3bbf 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -173,9 +173,15 @@ enum node_stat_item { NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, - WORKINGSET_REFAULT, - WORKINGSET_ACTIVATE, - WORKINGSET_RESTORE, + WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_FILE, + WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_FILE, + WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_FILE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. @@ -277,8 +283,8 @@ struct lruvec { unsigned long file_cost; /* Non-resident age, driven by LRU movement */ atomic_long_t nonresident_age; - /* Refaults at the time of last reclaim cycle */ - unsigned long refaults; + /* Refaults at the time of last reclaim cycle, anon=0, file=1 */ + unsigned long refaults[2]; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; #ifdef CONFIG_MEMCG diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f1fd265b9f9e..c6c579217855 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1530,12 +1530,18 @@ static char *memory_stat_format(struct mem_cgroup *memcg) seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT), memcg_events(memcg, PGMAJFAULT)); - seq_buf_printf(&s, "workingset_refault %lu\n", - memcg_page_state(memcg, WORKINGSET_REFAULT)); - seq_buf_printf(&s, "workingset_activate %lu\n", - memcg_page_state(memcg, WORKINGSET_ACTIVATE)); + seq_buf_printf(&s, "workingset_refault_anon %lu\n", + memcg_page_state(memcg, WORKINGSET_REFAULT_ANON)); + seq_buf_printf(&s, "workingset_refault_file %lu\n", + memcg_page_state(memcg, WORKINGSET_REFAULT_FILE)); + seq_buf_printf(&s, "workingset_activate_anon %lu\n", + memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON)); + seq_buf_printf(&s, "workingset_activate_file %lu\n", + memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE)); seq_buf_printf(&s, "workingset_restore %lu\n", - memcg_page_state(memcg, WORKINGSET_RESTORE)); + memcg_page_state(memcg, WORKINGSET_RESTORE_ANON)); + seq_buf_printf(&s, "workingset_restore %lu\n", + memcg_page_state(memcg, WORKINGSET_RESTORE_FILE)); seq_buf_printf(&s, "workingset_nodereclaim %lu\n", memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); diff --git a/mm/vmscan.c b/mm/vmscan.c index 783cd7fdc61a..017f323318a3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2683,7 +2683,10 @@ again: if (!sc->force_deactivate) { unsigned long refaults; - if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) + refaults = lruvec_page_state(target_lruvec, + WORKINGSET_ACTIVATE_ANON); + if (refaults != target_lruvec->refaults[0] || + inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) sc->may_deactivate |= DEACTIVATE_ANON; else sc->may_deactivate &= ~DEACTIVATE_ANON; @@ -2694,8 +2697,8 @@ again: * rid of any stale active pages quickly. */ refaults = lruvec_page_state(target_lruvec, - WORKINGSET_ACTIVATE); - if (refaults != target_lruvec->refaults || + WORKINGSET_ACTIVATE_FILE); + if (refaults != target_lruvec->refaults[1] || inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) sc->may_deactivate |= DEACTIVATE_FILE; else @@ -2972,8 +2975,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) unsigned long refaults; target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); - refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE); - target_lruvec->refaults = refaults; + refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); + target_lruvec->refaults[0] = refaults; + refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); + target_lruvec->refaults[1] = refaults; } /* diff --git a/mm/vmstat.c b/mm/vmstat.c index 2b866cbab11d..fef03463a0cf 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1167,9 +1167,12 @@ const char * const vmstat_text[] = { "nr_isolated_anon", "nr_isolated_file", "workingset_nodes", - "workingset_refault", - "workingset_activate", - "workingset_restore", + "workingset_refault_anon", + "workingset_refault_file", + "workingset_activate_anon", + "workingset_activate_file", + "workingset_restore_anon", + "workingset_restore_file", "workingset_nodereclaim", "nr_anon_pages", "nr_mapped", diff --git a/mm/workingset.c b/mm/workingset.c index b199726924dd..941bbaa6c262 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -280,6 +281,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) */ void workingset_refault(struct page *page, void *shadow) { + bool file = page_is_file_lru(page); struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; @@ -346,7 +348,7 @@ void workingset_refault(struct page *page, void *shadow) memcg = page_memcg(page); lruvec = mem_cgroup_lruvec(memcg, pgdat); - inc_lruvec_state(lruvec, WORKINGSET_REFAULT); + inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file); /* * Compare the distance to the existing workingset size. We @@ -366,7 +368,7 @@ void workingset_refault(struct page *page, void *shadow) SetPageActive(page); workingset_age_nonresident(lruvec, hpage_nr_pages(page)); - inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); + inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file); /* Page was active prior to eviction */ if (workingset) { @@ -375,7 +377,7 @@ void workingset_refault(struct page *page, void *shadow) spin_lock_irq(&page_pgdat(page)->lru_lock); lru_note_cost_page(page); spin_unlock_irq(&page_pgdat(page)->lru_lock); - inc_lruvec_state(lruvec, WORKINGSET_RESTORE); + inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file); } out: rcu_read_unlock(); -- cgit v1.2.3 From 3852f6768ede542ed48b9077bedf482c7ecb6327 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:47 -0700 Subject: mm/swapcache: support to handle the shadow entries Workingset detection for anonymous page will be implemented in the following patch and it requires to store the shadow entries into the swapcache. This patch implements an infrastructure to store the shadow entry in the swapcache. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Vlastimil Babka Link: http://lkml.kernel.org/r/1595490560-15117-5-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 17 ++++++++++++---- mm/shmem.c | 3 ++- mm/swap_state.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++------ mm/swapfile.c | 2 ++ mm/vmscan.c | 2 +- 5 files changed, 69 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 51ec9cdb92c0..8a4c592698a9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -414,9 +414,13 @@ extern struct address_space *swapper_spaces[]; extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); -extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); -extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); +extern int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp, void **shadowp); +extern void __delete_from_swap_cache(struct page *page, + swp_entry_t entry, void *shadow); extern void delete_from_swap_cache(struct page *); +extern void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t entry, @@ -570,13 +574,13 @@ static inline int add_to_swap(struct page *page) } static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp_mask) + gfp_t gfp_mask, void **shadowp) { return -1; } static inline void __delete_from_swap_cache(struct page *page, - swp_entry_t entry) + swp_entry_t entry, void *shadow) { } @@ -584,6 +588,11 @@ static inline void delete_from_swap_cache(struct page *page) { } +static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end) +{ +} + static inline int page_swapcount(struct page *page) { return 0; diff --git a/mm/shmem.c b/mm/shmem.c index eb6b36d89722..49bde088a7ea 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1434,7 +1434,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) list_add(&info->swaplist, &shmem_swaplist); if (add_to_swap_cache(page, swap, - __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN) == 0) { + __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, + NULL) == 0) { spin_lock_irq(&info->lock); shmem_recalc_inode(inode); info->swapped++; diff --git a/mm/swap_state.c b/mm/swap_state.c index e82f4f8b1f63..a29b33c7c236 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -110,12 +110,14 @@ void show_swap_cache_info(void) * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ -int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) +int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp, void **shadowp) { struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swp_offset(entry); XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); unsigned long i, nr = hpage_nr_pages(page); + void *old; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page); @@ -125,16 +127,25 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) SetPageSwapCache(page); do { + unsigned long nr_shadows = 0; + xas_lock_irq(&xas); xas_create_range(&xas); if (xas_error(&xas)) goto unlock; for (i = 0; i < nr; i++) { VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); + old = xas_load(&xas); + if (xa_is_value(old)) { + nr_shadows++; + if (shadowp) + *shadowp = old; + } set_page_private(page + i, entry.val + i); xas_store(&xas, page); xas_next(&xas); } + address_space->nrexceptional -= nr_shadows; address_space->nrpages += nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); ADD_CACHE_INFO(add_total, nr); @@ -154,7 +165,8 @@ unlock: * This must be called only on pages that have * been verified to be in the swap cache. */ -void __delete_from_swap_cache(struct page *page, swp_entry_t entry) +void __delete_from_swap_cache(struct page *page, + swp_entry_t entry, void *shadow) { struct address_space *address_space = swap_address_space(entry); int i, nr = hpage_nr_pages(page); @@ -166,12 +178,14 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry) VM_BUG_ON_PAGE(PageWriteback(page), page); for (i = 0; i < nr; i++) { - void *entry = xas_store(&xas, NULL); + void *entry = xas_store(&xas, shadow); VM_BUG_ON_PAGE(entry != page, entry); set_page_private(page + i, 0); xas_next(&xas); } ClearPageSwapCache(page); + if (shadow) + address_space->nrexceptional += nr; address_space->nrpages -= nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); ADD_CACHE_INFO(del_total, nr); @@ -208,7 +222,7 @@ int add_to_swap(struct page *page) * Add it to the swap cache. */ err = add_to_swap_cache(page, entry, - __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); + __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); if (err) /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely @@ -246,13 +260,44 @@ void delete_from_swap_cache(struct page *page) struct address_space *address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); - __delete_from_swap_cache(page, entry); + __delete_from_swap_cache(page, entry, NULL); xa_unlock_irq(&address_space->i_pages); put_swap_page(page, entry); page_ref_sub(page, hpage_nr_pages(page)); } +void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end) +{ + unsigned long curr = begin; + void *old; + + for (;;) { + unsigned long nr_shadows = 0; + swp_entry_t entry = swp_entry(type, curr); + struct address_space *address_space = swap_address_space(entry); + XA_STATE(xas, &address_space->i_pages, curr); + + xa_lock_irq(&address_space->i_pages); + xas_for_each(&xas, old, end) { + if (!xa_is_value(old)) + continue; + xas_store(&xas, NULL); + nr_shadows++; + } + address_space->nrexceptional -= nr_shadows; + xa_unlock_irq(&address_space->i_pages); + + /* search the next swapcache until we meet end */ + curr >>= SWAP_ADDRESS_SPACE_SHIFT; + curr++; + curr <<= SWAP_ADDRESS_SPACE_SHIFT; + if (curr > end) + break; + } +} + /* * If we are the only user, then try to free up the swap cache. * @@ -429,7 +474,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __SetPageSwapBacked(page); /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) { + if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) { put_swap_page(page, entry); goto fail_unlock; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 82183432fdd0..e653eea1eb88 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -696,6 +696,7 @@ static void add_to_avail_list(struct swap_info_struct *p) static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { + unsigned long begin = offset; unsigned long end = offset + nr_entries - 1; void (*swap_slot_free_notify)(struct block_device *, unsigned long); @@ -721,6 +722,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, swap_slot_free_notify(si->bdev, offset); offset++; } + clear_shadow_from_swap_cache(si->type, begin, end); } static void set_cluster_next(struct swap_info_struct *si, unsigned long next) diff --git a/mm/vmscan.c b/mm/vmscan.c index 017f323318a3..e84c4dd08c4e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -896,7 +896,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); - __delete_from_swap_cache(page, swap); + __delete_from_swap_cache(page, swap, NULL); xa_unlock_irqrestore(&mapping->i_pages, flags); put_swap_page(page, swap); workingset_eviction(page, target_memcg); -- cgit v1.2.3 From aae466b0052e1888edd1d7f473d4310d64936196 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:30:50 -0700 Subject: mm/swap: implement workingset detection for anonymous LRU This patch implements workingset detection for anonymous LRU. All the infrastructure is implemented by the previous patches so this patch just activates the workingset detection by installing/retrieving the shadow entry and adding refault calculation. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Link: http://lkml.kernel.org/r/1595490560-15117-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/swap.h | 6 ++++++ mm/memory.c | 11 ++++------- mm/swap_state.c | 23 ++++++++++++++++++----- mm/vmscan.c | 7 ++++--- mm/workingset.c | 15 +++++++++++---- 5 files changed, 43 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 8a4c592698a9..661046994db4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[]; extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); +extern void *get_shadow_from_swap_cache(swp_entry_t entry); extern int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp, void **shadowp); extern void __delete_from_swap_cache(struct page *page, @@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page) return 0; } +static inline void *get_shadow_from_swap_cache(swp_entry_t entry) +{ + return NULL; +} + static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask, void **shadowp) { diff --git a/mm/memory.c b/mm/memory.c index 6fe8b5b22c57..de311fc7639e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) int locked; int exclusive = 0; vm_fault_t ret = 0; + void *shadow = NULL; if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) goto out; @@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_page; } - /* - * XXX: Move to lru_cache_add() when it - * supports new vs putback - */ - spin_lock_irq(&page_pgdat(page)->lru_lock); - lru_note_cost_page(page); - spin_unlock_irq(&page_pgdat(page)->lru_lock); + shadow = get_shadow_from_swap_cache(entry); + if (shadow) + workingset_refault(page, shadow); lru_cache_add(page); swap_readpage(page, true); diff --git a/mm/swap_state.c b/mm/swap_state.c index a29b33c7c236..b73aabdfd35a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -106,6 +106,20 @@ void show_swap_cache_info(void) printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); } +void *get_shadow_from_swap_cache(swp_entry_t entry) +{ + struct address_space *address_space = swap_address_space(entry); + pgoff_t idx = swp_offset(entry); + struct page *page; + + page = find_get_entry(address_space, idx); + if (xa_is_value(page)) + return page; + if (page) + put_page(page); + return NULL; +} + /* * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. @@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, { struct swap_info_struct *si; struct page *page; + void *shadow = NULL; *new_page_allocated = false; @@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __SetPageSwapBacked(page); /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) { + if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { put_swap_page(page, entry); goto fail_unlock; } @@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, goto fail_unlock; } - /* XXX: Move to lru_cache_add() when it supports new vs putback */ - spin_lock_irq(&page_pgdat(page)->lru_lock); - lru_note_cost_page(page); - spin_unlock_irq(&page_pgdat(page)->lru_lock); + if (shadow) + workingset_refault(page, shadow); /* Caller will initiate read into locked page */ SetPageWorkingset(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index e84c4dd08c4e..66d73fea80e4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, { unsigned long flags; int refcount; + void *shadow = NULL; BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); @@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; mem_cgroup_swapout(page, swap); - __delete_from_swap_cache(page, swap, NULL); + if (reclaimed && !mapping_exiting(mapping)) + shadow = workingset_eviction(page, target_memcg); + __delete_from_swap_cache(page, swap, shadow); xa_unlock_irqrestore(&mapping->i_pages, flags); put_swap_page(page, swap); - workingset_eviction(page, target_memcg); } else { void (*freepage)(struct page *); - void *shadow = NULL; freepage = mapping->a_ops->freepage; /* diff --git a/mm/workingset.c b/mm/workingset.c index 941bbaa6c262..8cbe4e3cbe5c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow) /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if - * all the memory was available to the page cache. Whether - * cache can compete with anon or not depends on having swap. + * all the memory was available to the workingset. Whether + * workingset competition needs to consider anon or not depends + * on having swap. */ workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); - if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { + if (!file) { workingset_size += lruvec_page_state(eviction_lruvec, - NR_INACTIVE_ANON); + NR_INACTIVE_FILE); + } + if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { workingset_size += lruvec_page_state(eviction_lruvec, NR_ACTIVE_ANON); + if (file) { + workingset_size += lruvec_page_state(eviction_lruvec, + NR_INACTIVE_ANON); + } } if (refault_distance > workingset_size) goto out; -- cgit v1.2.3 From facdaa917c4d5a376d09d25865f5a863f906234a Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Tue, 11 Aug 2020 18:31:00 -0700 Subject: mm: proactive compaction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some applications, we need to allocate almost all memory as hugepages. However, on a running system, higher-order allocations can fail if the memory is fragmented. Linux kernel currently does on-demand compaction as we request more hugepages, but this style of compaction incurs very high latency. Experiments with one-time full memory compaction (followed by hugepage allocations) show that kernel is able to restore a highly fragmented memory state to a fairly compacted memory state within <1 sec for a 32G system. Such data suggests that a more proactive compaction can help us allocate a large fraction of memory as hugepages keeping allocation latencies low. For a more proactive compaction, the approach taken here is to define a new sysctl called 'vm.compaction_proactiveness' which dictates bounds for external fragmentation which kcompactd tries to maintain. The tunable takes a value in range [0, 100], with a default of 20. Note that a previous version of this patch [1] was found to introduce too many tunables (per-order extfrag{low, high}), but this one reduces them to just one sysctl. Also, the new tunable is an opaque value instead of asking for specific bounds of "external fragmentation", which would have been difficult to estimate. The internal interpretation of this opaque value allows for future fine-tuning. Currently, we use a simple translation from this tunable to [low, high] "fragmentation score" thresholds (low=100-proactiveness, high=low+10%). The score for a node is defined as weighted mean of per-zone external fragmentation. A zone's present_pages determines its weight. To periodically check per-node score, we reuse per-node kcompactd threads, which are woken up every 500 milliseconds to check the same. If a node's score exceeds its high threshold (as derived from user-provided proactiveness value), proactive compaction is started until its score reaches its low threshold value. By default, proactiveness is set to 20, which implies threshold values of low=80 and high=90. This patch is largely based on ideas from Michal Hocko [2]. See also the LWN article [3]. Performance data ================ System: x64_64, 1T RAM, 80 CPU threads. Kernel: 5.6.0-rc3 + this patch echo madvise | sudo tee /sys/kernel/mm/transparent_hugepage/enabled echo madvise | sudo tee /sys/kernel/mm/transparent_hugepage/defrag Before starting the driver, the system was fragmented from a userspace program that allocates all memory and then for each 2M aligned section, frees 3/4 of base pages using munmap. The workload is mainly anonymous userspace pages, which are easy to move around. I intentionally avoided unmovable pages in this test to see how much latency we incur when hugepage allocations hit direct compaction. 1. Kernel hugepage allocation latencies With the system in such a fragmented state, a kernel driver then allocates as many hugepages as possible and measures allocation latency: (all latency values are in microseconds) - With vanilla 5.6.0-rc3 percentile latency –––––––––– ––––––– 5 7894 10 9496 25 12561 30 15295 40 18244 50 21229 60 27556 75 30147 80 31047 90 32859 95 33799 Total 2M hugepages allocated = 383859 (749G worth of hugepages out of 762G total free => 98% of free memory could be allocated as hugepages) - With 5.6.0-rc3 + this patch, with proactiveness=20 sysctl -w vm.compaction_proactiveness=20 percentile latency –––––––––– ––––––– 5 2 10 2 25 3 30 3 40 3 50 4 60 4 75 4 80 4 90 5 95 429 Total 2M hugepages allocated = 384105 (750G worth of hugepages out of 762G total free => 98% of free memory could be allocated as hugepages) 2. JAVA heap allocation In this test, we first fragment memory using the same method as for (1). Then, we start a Java process with a heap size set to 700G and request the heap to be allocated with THP hugepages. We also set THP to madvise to allow hugepage backing of this heap. /usr/bin/time java -Xms700G -Xmx700G -XX:+UseTransparentHugePages -XX:+AlwaysPreTouch The above command allocates 700G of Java heap using hugepages. - With vanilla 5.6.0-rc3 17.39user 1666.48system 27:37.89elapsed - With 5.6.0-rc3 + this patch, with proactiveness=20 8.35user 194.58system 3:19.62elapsed Elapsed time remains around 3:15, as proactiveness is further increased. Note that proactive compaction happens throughout the runtime of these workloads. The situation of one-time compaction, sufficient to supply hugepages for following allocation stream, can probably happen for more extreme proactiveness values, like 80 or 90. In the above Java workload, proactiveness is set to 20. The test starts with a node's score of 80 or higher, depending on the delay between the fragmentation step and starting the benchmark, which gives more-or-less time for the initial round of compaction. As t he benchmark consumes hugepages, node's score quickly rises above the high threshold (90) and proactive compaction starts again, which brings down the score to the low threshold level (80). Repeat. bpftrace also confirms proactive compaction running 20+ times during the runtime of this Java benchmark. kcompactd threads consume 100% of one of the CPUs while it tries to bring a node's score within thresholds. Backoff behavior ================ Above workloads produce a memory state which is easy to compact. However, if memory is filled with unmovable pages, proactive compaction should essentially back off. To test this aspect: - Created a kernel driver that allocates almost all memory as hugepages followed by freeing first 3/4 of each hugepage. - Set proactiveness=40 - Note that proactive_compact_node() is deferred maximum number of times with HPAGE_FRAG_CHECK_INTERVAL_MSEC of wait between each check (=> ~30 seconds between retries). [1] https://patchwork.kernel.org/patch/11098289/ [2] https://lore.kernel.org/linux-mm/20161230131412.GI13301@dhcp22.suse.cz/ [3] https://lwn.net/Articles/817905/ Signed-off-by: Nitin Gupta Signed-off-by: Andrew Morton Tested-by: Oleksandr Natalenko Reviewed-by: Vlastimil Babka Reviewed-by: Khalid Aziz Reviewed-by: Oleksandr Natalenko Cc: Vlastimil Babka Cc: Khalid Aziz Cc: Michal Hocko Cc: Mel Gorman Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Joonsoo Kim Cc: David Rientjes Cc: Nitin Gupta Cc: Oleksandr Natalenko Link: http://lkml.kernel.org/r/20200616204527.19185-1-nigupta@nvidia.com Signed-off-by: Linus Torvalds --- Documentation/admin-guide/sysctl/vm.rst | 15 +++ include/linux/compaction.h | 2 + kernel/sysctl.c | 9 ++ mm/compaction.c | 183 +++++++++++++++++++++++++++++++- mm/internal.h | 1 + mm/vmstat.c | 18 ++++ 6 files changed, 223 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index d997cc3c26d0..4b9d2e8e9142 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -119,6 +119,21 @@ all zones are compacted such that free memory is available in contiguous blocks where possible. This can be important for example in the allocation of huge pages although processes will also directly compact memory as required. +compaction_proactiveness +======================== + +This tunable takes a value in the range [0, 100] with a default value of +20. This tunable determines how aggressively compaction is done in the +background. Setting it to 0 disables proactive compaction. + +Note that compaction has a non-trivial system-wide impact as pages +belonging to different processes are moved around, which could also lead +to latency spikes in unsuspecting applications. The kernel employs +various heuristics to avoid wasting CPU cycles if it detects that +proactive compaction is not being effective. + +Be careful when setting it to extreme values like 100, as that may +cause excessive background compaction activity. compact_unevictable_allowed =========================== diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 6fa0eea3f530..7a242d46454e 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; +extern int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; +extern int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f785de3caac0..ab72e52f8a7b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2851,6 +2851,15 @@ static struct ctl_table vm_table[] = { .mode = 0200, .proc_handler = sysctl_compaction_handler, }, + { + .procname = "compaction_proactiveness", + .data = &sysctl_compaction_proactiveness, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &one_hundred, + }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, diff --git a/mm/compaction.c b/mm/compaction.c index 86375605faa9..544a98811c82 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -50,6 +50,24 @@ static inline void count_compact_events(enum vm_event_item item, long delta) #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) +/* + * Fragmentation score check interval for proactive compaction purposes. + */ +static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; + +/* + * Page order with-respect-to which proactive compaction + * calculates external fragmentation, which is used as + * the "fragmentation score" of a node/zone. + */ +#if defined CONFIG_TRANSPARENT_HUGEPAGE +#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER +#elif defined HUGETLB_PAGE_ORDER +#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER +#else +#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) +#endif + static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; @@ -1857,6 +1875,76 @@ static inline bool is_via_compact_memory(int order) return order == -1; } +static bool kswapd_is_running(pg_data_t *pgdat) +{ + return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); +} + +/* + * A zone's fragmentation score is the external fragmentation wrt to the + * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value + * in the range [0, 100]. + * + * The scaling factor ensures that proactive compaction focuses on larger + * zones like ZONE_NORMAL, rather than smaller, specialized zones like + * ZONE_DMA32. For smaller zones, the score value remains close to zero, + * and thus never exceeds the high threshold for proactive compaction. + */ +static int fragmentation_score_zone(struct zone *zone) +{ + unsigned long score; + + score = zone->present_pages * + extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); + return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); +} + +/* + * The per-node proactive (background) compaction process is started by its + * corresponding kcompactd thread when the node's fragmentation score + * exceeds the high threshold. The compaction process remains active till + * the node's score falls below the low threshold, or one of the back-off + * conditions is met. + */ +static int fragmentation_score_node(pg_data_t *pgdat) +{ + unsigned long score = 0; + int zoneid; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone; + + zone = &pgdat->node_zones[zoneid]; + score += fragmentation_score_zone(zone); + } + + return score; +} + +static int fragmentation_score_wmark(pg_data_t *pgdat, bool low) +{ + int wmark_low; + + /* + * Cap the low watermak to avoid excessive compaction + * activity in case a user sets the proactivess tunable + * close to 100 (maximum). + */ + wmark_low = max(100 - sysctl_compaction_proactiveness, 5); + return low ? wmark_low : min(wmark_low + 10, 100); +} + +static bool should_proactive_compact_node(pg_data_t *pgdat) +{ + int wmark_high; + + if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) + return false; + + wmark_high = fragmentation_score_wmark(pgdat, false); + return fragmentation_score_node(pgdat) > wmark_high; +} + static enum compact_result __compact_finished(struct compact_control *cc) { unsigned int order; @@ -1883,6 +1971,25 @@ static enum compact_result __compact_finished(struct compact_control *cc) return COMPACT_PARTIAL_SKIPPED; } + if (cc->proactive_compaction) { + int score, wmark_low; + pg_data_t *pgdat; + + pgdat = cc->zone->zone_pgdat; + if (kswapd_is_running(pgdat)) + return COMPACT_PARTIAL_SKIPPED; + + score = fragmentation_score_zone(cc->zone); + wmark_low = fragmentation_score_wmark(pgdat, true); + + if (score > wmark_low) + ret = COMPACT_CONTINUE; + else + ret = COMPACT_SUCCESS; + + goto out; + } + if (is_via_compact_memory(cc->order)) return COMPACT_CONTINUE; @@ -1941,6 +2048,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) } } +out: if (cc->contended || fatal_signal_pending(current)) ret = COMPACT_CONTENDED; @@ -2421,6 +2529,41 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, return rc; } +/* + * Compact all zones within a node till each zone's fragmentation score + * reaches within proactive compaction thresholds (as determined by the + * proactiveness tunable). + * + * It is possible that the function returns before reaching score targets + * due to various back-off conditions, such as, contention on per-node or + * per-zone locks. + */ +static void proactive_compact_node(pg_data_t *pgdat) +{ + int zoneid; + struct zone *zone; + struct compact_control cc = { + .order = -1, + .mode = MIGRATE_SYNC_LIGHT, + .ignore_skip_hint = true, + .whole_zone = true, + .gfp_mask = GFP_KERNEL, + .proactive_compaction = true, + }; + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + cc.zone = zone; + + compact_zone(&cc, NULL); + + VM_BUG_ON(!list_empty(&cc.freepages)); + VM_BUG_ON(!list_empty(&cc.migratepages)); + } +} /* Compact all zones within a node */ static void compact_node(int nid) @@ -2467,6 +2610,13 @@ static void compact_nodes(void) /* The written value is actually unused, all memory is compacted */ int sysctl_compact_memory; +/* + * Tunable for proactive compaction. It determines how + * aggressively the kernel should compact memory in the + * background. It takes values in the range [0, 100]. + */ +int __read_mostly sysctl_compaction_proactiveness = 20; + /* * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory @@ -2646,6 +2796,7 @@ static int kcompactd(void *p) { pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; + unsigned int proactive_defer = 0; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); @@ -2661,12 +2812,34 @@ static int kcompactd(void *p) unsigned long pflags; trace_mm_compaction_kcompactd_sleep(pgdat->node_id); - wait_event_freezable(pgdat->kcompactd_wait, - kcompactd_work_requested(pgdat)); + if (wait_event_freezable_timeout(pgdat->kcompactd_wait, + kcompactd_work_requested(pgdat), + msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) { + + psi_memstall_enter(&pflags); + kcompactd_do_work(pgdat); + psi_memstall_leave(&pflags); + continue; + } - psi_memstall_enter(&pflags); - kcompactd_do_work(pgdat); - psi_memstall_leave(&pflags); + /* kcompactd wait timeout */ + if (should_proactive_compact_node(pgdat)) { + unsigned int prev_score, score; + + if (proactive_defer) { + proactive_defer--; + continue; + } + prev_score = fragmentation_score_node(pgdat); + proactive_compact_node(pgdat); + score = fragmentation_score_node(pgdat); + /* + * Defer proactive compaction if the fragmentation + * score did not go down i.e. no progress made. + */ + proactive_defer = score < prev_score ? + 0 : 1 << COMPACT_MAX_DEFER_SHIFT; + } } return 0; diff --git a/mm/internal.h b/mm/internal.h index 9886db20d94f..42cf0b610847 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -239,6 +239,7 @@ struct compact_control { bool no_set_skip_hint; /* Don't mark blocks for skipping */ bool ignore_block_suitable; /* Scan blocks considered unsuitable */ bool direct_compaction; /* False from kcompactd or /proc/... */ + bool proactive_compaction; /* kcompactd proactive compaction */ bool whole_zone; /* Whole zone should/has been scanned */ bool contended; /* Signal lock or sched contention */ bool rescan; /* Rescanning the same pageblock */ diff --git a/mm/vmstat.c b/mm/vmstat.c index fef03463a0cf..f183aa37994e 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1096,6 +1096,24 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); } +/* + * Calculates external fragmentation within a zone wrt the given order. + * It is defined as the percentage of pages found in blocks of size + * less than 1 << order. It returns values in range [0, 100]. + */ +int extfrag_for_order(struct zone *zone, unsigned int order) +{ + struct contig_page_info info; + + fill_contig_page_info(zone, order, &info); + if (info.free_pages == 0) + return 0; + + return div_u64((info.free_pages - + (info.free_blocks_suitable << order)) * 100, + info.free_pages); +} + /* Same as __fragmentation index but allocs contig_page_info on stack */ int fragmentation_index(struct zone *zone, unsigned int order) { -- cgit v1.2.3 From d34c0a7599ea8c301bc471dfa1eb2bf2db6752d1 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Tue, 11 Aug 2020 18:31:07 -0700 Subject: mm: use unsigned types for fragmentation score Proactive compaction uses per-node/zone "fragmentation score" which is always in range [0, 100], so use unsigned type of these scores as well as for related constants. Signed-off-by: Nitin Gupta Signed-off-by: Andrew Morton Reviewed-by: Baoquan He Cc: Luis Chamberlain Cc: Kees Cook Cc: Iurii Zaikin Cc: Vlastimil Babka Cc: Joonsoo Kim Link: http://lkml.kernel.org/r/20200618010319.13159-1-nigupta@nvidia.com Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 4 ++-- kernel/sysctl.c | 2 +- mm/compaction.c | 18 +++++++++--------- mm/vmstat.c | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 7a242d46454e..25a521d299c1 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,13 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; -extern int sysctl_compaction_proactiveness; +extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; -extern int extfrag_for_order(struct zone *zone, unsigned int order); +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ab72e52f8a7b..287862f91717 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2854,7 +2854,7 @@ static struct ctl_table vm_table[] = { { .procname = "compaction_proactiveness", .data = &sysctl_compaction_proactiveness, - .maxlen = sizeof(int), + .maxlen = sizeof(sysctl_compaction_proactiveness), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, diff --git a/mm/compaction.c b/mm/compaction.c index ed8ea1511634..b7d433f1706a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -53,7 +53,7 @@ static inline void count_compact_events(enum vm_event_item item, long delta) /* * Fragmentation score check interval for proactive compaction purposes. */ -static const int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; +static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500; /* * Page order with-respect-to which proactive compaction @@ -1890,7 +1890,7 @@ static bool kswapd_is_running(pg_data_t *pgdat) * ZONE_DMA32. For smaller zones, the score value remains close to zero, * and thus never exceeds the high threshold for proactive compaction. */ -static int fragmentation_score_zone(struct zone *zone) +static unsigned int fragmentation_score_zone(struct zone *zone) { unsigned long score; @@ -1906,9 +1906,9 @@ static int fragmentation_score_zone(struct zone *zone) * the node's score falls below the low threshold, or one of the back-off * conditions is met. */ -static int fragmentation_score_node(pg_data_t *pgdat) +static unsigned int fragmentation_score_node(pg_data_t *pgdat) { - unsigned long score = 0; + unsigned int score = 0; int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { @@ -1921,17 +1921,17 @@ static int fragmentation_score_node(pg_data_t *pgdat) return score; } -static int fragmentation_score_wmark(pg_data_t *pgdat, bool low) +static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) { - int wmark_low; + unsigned int wmark_low; /* * Cap the low watermak to avoid excessive compaction * activity in case a user sets the proactivess tunable * close to 100 (maximum). */ - wmark_low = max(100 - sysctl_compaction_proactiveness, 5); - return low ? wmark_low : min(wmark_low + 10, 100); + wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); + return low ? wmark_low : min(wmark_low + 10, 100U); } static bool should_proactive_compact_node(pg_data_t *pgdat) @@ -2615,7 +2615,7 @@ int sysctl_compact_memory; * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ -int __read_mostly sysctl_compaction_proactiveness = 20; +unsigned int __read_mostly sysctl_compaction_proactiveness = 20; /* * This is the entry point for compacting all nodes via diff --git a/mm/vmstat.c b/mm/vmstat.c index f183aa37994e..3bb034c99887 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1101,7 +1101,7 @@ static int __fragmentation_index(unsigned int order, struct contig_page_info *in * It is defined as the percentage of pages found in blocks of size * less than 1 << order. It returns values in range [0, 100]. */ -int extfrag_for_order(struct zone *zone, unsigned int order) +unsigned int extfrag_for_order(struct zone *zone, unsigned int order) { struct contig_page_info info; -- cgit v1.2.3 From 860b32729a21e0565585a9e2ecea2e5244d65acd Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 11 Aug 2020 18:31:10 -0700 Subject: mm/compaction: correct the comments of compact_defer_shift There is no compact_defer_limit. It should be compact_defer_shift in use. and add compact_order_failed explanation. Signed-off-by: Alex Shi Signed-off-by: Andrew Morton Reviewed-by: Alexander Duyck Link: http://lkml.kernel.org/r/3bd60e1b-a74e-050d-ade4-6e8f54e00b92@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 1 + mm/compaction.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index efbd95dd3bbf..8379432f4f2f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -536,6 +536,7 @@ struct zone { * On compaction failure, 1< Date: Tue, 11 Aug 2020 18:31:19 -0700 Subject: include/linux/mempolicy.h: fix typo Change "interlave" to "interleave". Signed-off-by: Yanfei Xu Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Link: http://lkml.kernel.org/r/20200810063454.9357-1-yanfei.xu@windriver.com Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5f1648c23e29..5f1c74df264d 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -28,7 +28,7 @@ struct mm_struct; * the process policy is used. Interrupts ignore the memory policy * of the current process. * - * Locking policy for interlave: + * Locking policy for interleave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on * mmap_lock. -- cgit v1.2.3 From 9066e5cfb73cdbcdbb49e87999482ab615e9fc76 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Tue, 11 Aug 2020 18:31:22 -0700 Subject: mm, oom: make the calculation of oom badness more accurate Recently we found an issue on our production environment that when memcg oom is triggered the oom killer doesn't chose the process with largest resident memory but chose the first scanned process. Note that all processes in this memcg have the same oom_score_adj, so the oom killer should chose the process with largest resident memory. Bellow is part of the oom info, which is enough to analyze this issue. [7516987.983223] memory: usage 16777216kB, limit 16777216kB, failcnt 52843037 [7516987.983224] memory+swap: usage 16777216kB, limit 9007199254740988kB, failcnt 0 [7516987.983225] kmem: usage 301464kB, limit 9007199254740988kB, failcnt 0 [...] [7516987.983293] [ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name [7516987.983510] [ 5740] 0 5740 257 1 32768 0 -998 pause [7516987.983574] [58804] 0 58804 4594 771 81920 0 -998 entry_point.bas [7516987.983577] [58908] 0 58908 7089 689 98304 0 -998 cron [7516987.983580] [58910] 0 58910 16235 5576 163840 0 -998 supervisord [7516987.983590] [59620] 0 59620 18074 1395 188416 0 -998 sshd [7516987.983594] [59622] 0 59622 18680 6679 188416 0 -998 python [7516987.983598] [59624] 0 59624 1859266 5161 548864 0 -998 odin-agent [7516987.983600] [59625] 0 59625 707223 9248 983040 0 -998 filebeat [7516987.983604] [59627] 0 59627 416433 64239 774144 0 -998 odin-log-agent [7516987.983607] [59631] 0 59631 180671 15012 385024 0 -998 python3 [7516987.983612] [61396] 0 61396 791287 3189 352256 0 -998 client [7516987.983615] [61641] 0 61641 1844642 29089 946176 0 -998 client [7516987.983765] [ 9236] 0 9236 2642 467 53248 0 -998 php_scanner [7516987.983911] [42898] 0 42898 15543 838 167936 0 -998 su [7516987.983915] [42900] 1000 42900 3673 867 77824 0 -998 exec_script_vr2 [7516987.983918] [42925] 1000 42925 36475 19033 335872 0 -998 python [7516987.983921] [57146] 1000 57146 3673 848 73728 0 -998 exec_script_J2p [7516987.983925] [57195] 1000 57195 186359 22958 491520 0 -998 python2 [7516987.983928] [58376] 1000 58376 275764 14402 290816 0 -998 rosmaster [7516987.983931] [58395] 1000 58395 155166 4449 245760 0 -998 rosout [7516987.983935] [58406] 1000 58406 18285584 3967322 37101568 0 -998 data_sim [7516987.984221] oom-kill:constraint=CONSTRAINT_MEMCG,nodemask=(null),cpuset=3aa16c9482ae3a6f6b78bda68a55d32c87c99b985e0f11331cddf05af6c4d753,mems_allowed=0-1,oom_memcg=/kubepods/podf1c273d3-9b36-11ea-b3df-246e9693c184,task_memcg=/kubepods/podf1c273d3-9b36-11ea-b3df-246e9693c184/1f246a3eeea8f70bf91141eeaf1805346a666e225f823906485ea0b6c37dfc3d,task=pause,pid=5740,uid=0 [7516987.984254] Memory cgroup out of memory: Killed process 5740 (pause) total-vm:1028kB, anon-rss:4kB, file-rss:0kB, shmem-rss:0kB [7516988.092344] oom_reaper: reaped process 5740 (pause), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB We can find that the first scanned process 5740 (pause) was killed, but its rss is only one page. That is because, when we calculate the oom badness in oom_badness(), we always ignore the negtive point and convert all of these negtive points to 1. Now as oom_score_adj of all the processes in this targeted memcg have the same value -998, the points of these processes are all negtive value. As a result, the first scanned process will be killed. The oom_socre_adj (-998) in this memcg is set by kubelet, because it is a a Guaranteed pod, which has higher priority to prevent from being killed by system oom. To fix this issue, we should make the calculation of oom point more accurate. We can achieve it by convert the chosen_point from 'unsigned long' to 'long'. [cai@lca.pw: reported a issue in the previous version] [mhocko@suse.com: fixed the issue reported by Cai] [mhocko@suse.com: add the comment in proc_oom_score()] [laoar.shao@gmail.com: v3] Link: http://lkml.kernel.org/r/1594396651-9931-1-git-send-email-laoar.shao@gmail.com Signed-off-by: Yafang Shao Signed-off-by: Andrew Morton Tested-by: Naresh Kamboju Acked-by: Michal Hocko Cc: David Rientjes Cc: Qian Cai Link: http://lkml.kernel.org/r/1594309987-9919-1-git-send-email-laoar.shao@gmail.com Signed-off-by: Linus Torvalds --- fs/proc/base.c | 11 ++++++++++- include/linux/oom.h | 4 ++-- mm/oom_kill.c | 22 ++++++++++------------ 3 files changed, 22 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/base.c b/fs/proc/base.c index a333caeca291..617db4e0faa0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -551,8 +551,17 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, { unsigned long totalpages = totalram_pages() + total_swap_pages; unsigned long points = 0; + long badness; + + badness = oom_badness(task, totalpages); + /* + * Special case OOM_SCORE_ADJ_MIN for all others scale the + * badness value into [0, 2000] range which we have been + * exporting for a long time so userspace might depend on it. + */ + if (badness != LONG_MIN) + points = (1000 + badness * 1000 / (long)totalpages) * 2 / 3; - points = oom_badness(task, totalpages) * 1000 / totalpages; seq_printf(m, "%lu\n", points); return 0; diff --git a/include/linux/oom.h b/include/linux/oom.h index c696c265f019..f022f581ac29 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -48,7 +48,7 @@ struct oom_control { /* Used by oom implementation, do not set */ unsigned long totalpages; struct task_struct *chosen; - unsigned long chosen_points; + long chosen_points; /* Used to print the constraint info. */ enum oom_constraint constraint; @@ -107,7 +107,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) bool __oom_reap_task_mm(struct mm_struct *mm); -extern unsigned long oom_badness(struct task_struct *p, +long oom_badness(struct task_struct *p, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d30ce75f23fb..48e0db54d838 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -196,17 +196,17 @@ static bool is_dump_unreclaim_slabs(void) * predictable as possible. The goal is to return the highest value for the * task consuming the most memory to avoid subsequent oom failures. */ -unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) +long oom_badness(struct task_struct *p, unsigned long totalpages) { long points; long adj; if (oom_unkillable_task(p)) - return 0; + return LONG_MIN; p = find_lock_task_mm(p); if (!p) - return 0; + return LONG_MIN; /* * Do not even consider tasks which are explicitly marked oom @@ -218,7 +218,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) test_bit(MMF_OOM_SKIP, &p->mm->flags) || in_vfork(p)) { task_unlock(p); - return 0; + return LONG_MIN; } /* @@ -233,11 +233,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) adj *= totalpages / 1000; points += adj; - /* - * Never return 0 for an eligible task regardless of the root bonus and - * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). - */ - return points > 0 ? points : 1; + return points; } static const char * const oom_constraint_text[] = { @@ -310,7 +306,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc) static int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; - unsigned long points; + long points; if (oom_unkillable_task(task)) goto next; @@ -336,12 +332,12 @@ static int oom_evaluate_task(struct task_struct *task, void *arg) * killed first if it triggers an oom, then select it. */ if (oom_task_origin(task)) { - points = ULONG_MAX; + points = LONG_MAX; goto select; } points = oom_badness(task, oc->totalpages); - if (!points || points < oc->chosen_points) + if (points == LONG_MIN || points < oc->chosen_points) goto next; select: @@ -365,6 +361,8 @@ abort: */ static void select_bad_process(struct oom_control *oc) { + oc->chosen_points = LONG_MIN; + if (is_memcg_oom(oc)) mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); else { -- cgit v1.2.3 From 34ae204f18519f0920bd50a644abd6fefc8dbfcf Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Tue, 11 Aug 2020 18:31:38 -0700 Subject: hugetlbfs: remove call to huge_pte_alloc without i_mmap_rwsem Commit c0d0381ade79 ("hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization") requires callers of huge_pte_alloc to hold i_mmap_rwsem in at least read mode. This is because the explicit locking in huge_pmd_share (called by huge_pte_alloc) was removed. When restructuring the code, the call to huge_pte_alloc in the else block at the beginning of hugetlb_fault was missed. Unfortunately, that else clause is exercised when there is no page table entry. This will likely lead to a call to huge_pmd_share. If huge_pmd_share thinks pmd sharing is possible, it will traverse the mapping tree (i_mmap) without holding i_mmap_rwsem. If someone else is modifying the tree, bad things such as addressing exceptions or worse could happen. Simply remove the else clause. It should have been removed previously. The code following the else will call huge_pte_alloc with the appropriate locking. To prevent this type of issue in the future, add routines to assert that i_mmap_rwsem is held, and call these routines in huge pmd sharing routines. Fixes: c0d0381ade79 ("hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization") Suggested-by: Matthew Wilcox Signed-off-by: Mike Kravetz Signed-off-by: Andrew Morton Cc: Michal Hocko Cc: Hugh Dickins Cc: Naoya Horiguchi Cc: "Aneesh Kumar K.V" Cc: Andrea Arcangeli Cc: "Kirill A.Shutemov" Cc: Davidlohr Bueso Cc: Prakash Sangappa Cc: Link: http://lkml.kernel.org/r/e670f327-5cf9-1959-96e4-6dc7cc30d3d5@oracle.com Signed-off-by: Linus Torvalds --- include/linux/fs.h | 10 ++++++++++ include/linux/hugetlb.h | 8 +++++--- mm/hugetlb.c | 15 +++++++-------- mm/rmap.c | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 011af396aa17..7c69dd7c6160 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -518,6 +518,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) up_read(&mapping->i_mmap_rwsem); } +static inline void i_mmap_assert_locked(struct address_space *mapping) +{ + lockdep_assert_held(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_assert_write_locked(struct address_space *mapping) +{ + lockdep_assert_held_write(&mapping->i_mmap_rwsem); +} + /* * Might pages of this file be mapped into userspace? */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 50650d0d01b9..a520bf26e5d8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -164,7 +164,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -203,8 +204,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write( return NULL; } -static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, - pte_t *ptep) +static inline int huge_pmd_unshare(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { return 0; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dffafb5bf2ed..8a18f1234e80 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3967,7 +3967,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, continue; ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) { + if (huge_pmd_unshare(mm, vma, &address, ptep)) { spin_unlock(ptl); /* * We just unmapped a page of PMDs by clearing a PUD. @@ -4554,10 +4554,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); - } else { - ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); - if (!ptep) - return VM_FAULT_OOM; } /* @@ -5034,7 +5030,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) { + if (huge_pmd_unshare(mm, vma, &address, ptep)) { pages++; spin_unlock(ptl); shared_pmd = true; @@ -5415,12 +5411,14 @@ out: * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user */ -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { pgd_t *pgd = pgd_offset(mm, *addr); p4d_t *p4d = p4d_offset(pgd, *addr); pud_t *pud = pud_offset(p4d, *addr); + i_mmap_assert_write_locked(vma->vm_file->f_mapping); BUG_ON(page_count(virt_to_page(ptep)) == 0); if (page_count(virt_to_page(ptep)) == 1) return 0; @@ -5438,7 +5436,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) return NULL; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { return 0; } diff --git a/mm/rmap.c b/mm/rmap.c index 5fe2dedce1fc..6cce9ef06753 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1469,7 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * do this outside rmap routines. */ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); - if (huge_pmd_unshare(mm, &address, pvmw.pte)) { + if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { /* * huge_pmd_unshare unmapped an entire PMD * page. There is no way of knowing exactly -- cgit v1.2.3 From 4958e4d86ecb011a81f5d80ce2023ef9f10692d8 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 11 Aug 2020 18:31:48 -0700 Subject: mm: thp: remove debug_cow switch Since commit 3917c80280c93a7123f ("thp: change CoW semantics for anon-THP"), the CoW page fault of THP has been rewritten, debug_cow is not used anymore. So, just remove it. Signed-off-by: Yang Shi Signed-off-by: Andrew Morton Reviewed-by: Zi Yan Cc: Kirill A. Shutemov Link: http://lkml.kernel.org/r/1592270980-116062-1-git-send-email-yang.shi@linux.alibaba.com Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 7 ------- mm/huge_memory.c | 21 --------------------- 2 files changed, 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 17c4c4975145..467302056e17 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -181,13 +181,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1< Date: Tue, 11 Aug 2020 18:31:51 -0700 Subject: mm/vmstat: add events for THP migration without split Add following new vmstat events which will help in validating THP migration without split. Statistics reported through these new VM events will help in performance debugging. 1. THP_MIGRATION_SUCCESS 2. THP_MIGRATION_FAILURE 3. THP_MIGRATION_SPLIT In addition, these new events also update normal page migration statistics appropriately via PGMIGRATE_SUCCESS and PGMIGRATE_FAILURE. While here, this updates current trace event 'mm_migrate_pages' to accommodate now available THP statistics. [akpm@linux-foundation.org: s/hpage_nr_pages/thp_nr_pages/] [ziy@nvidia.com: v2] Link: http://lkml.kernel.org/r/C5E3C65C-8253-4638-9D3C-71A61858BB8B@nvidia.com [anshuman.khandual@arm.com: s/thp_nr_pages/hpage_nr_pages/] Link: http://lkml.kernel.org/r/1594287583-16568-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Signed-off-by: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Daniel Jordan Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Zi Yan Cc: John Hubbard Cc: Naoya Horiguchi Link: http://lkml.kernel.org/r/1594080415-27924-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Linus Torvalds --- Documentation/vm/page_migration.rst | 27 +++++++++++++++++++ include/linux/vm_event_item.h | 3 +++ include/trace/events/migrate.h | 17 +++++++++--- mm/migrate.c | 52 +++++++++++++++++++++++++++++++------ mm/vmstat.c | 3 +++ 5 files changed, 91 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/Documentation/vm/page_migration.rst b/Documentation/vm/page_migration.rst index 1d6cd7db4e43..68883ac485fa 100644 --- a/Documentation/vm/page_migration.rst +++ b/Documentation/vm/page_migration.rst @@ -253,5 +253,32 @@ which are function pointers of struct address_space_operations. PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag for own purpose. +Monitoring Migration +===================== + +The following events (counters) can be used to monitor page migration. + +1. PGMIGRATE_SUCCESS: Normal page migration success. Each count means that a + page was migrated. If the page was a non-THP page, then this counter is + increased by one. If the page was a THP, then this counter is increased by + the number of THP subpages. For example, migration of a single 2MB THP that + has 4KB-size base pages (subpages) will cause this counter to increase by + 512. + +2. PGMIGRATE_FAIL: Normal page migration failure. Same counting rules as for + _SUCCESS, above: this will be increased by the number of subpages, if it was + a THP. + +3. THP_MIGRATION_SUCCESS: A THP was migrated without being split. + +4. THP_MIGRATION_FAIL: A THP could not be migrated nor it could be split. + +5. THP_MIGRATION_SPLIT: A THP was migrated, but not as such: first, the THP had + to be split. After splitting, a migration retry was used for it's sub-pages. + +THP_MIGRATION_* events also update the appropriate PGMIGRATE_SUCCESS or +PGMIGRATE_FAIL events. For example, a THP migration failure will cause both +THP_MIGRATION_FAIL and PGMIGRATE_FAIL to increase. + Christoph Lameter, May 8, 2006. Minchan Kim, Mar 28, 2016. diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 24fc7c3ae7d6..2e6ca53b9bbd 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -56,6 +56,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #endif #ifdef CONFIG_MIGRATION PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, + THP_MIGRATION_SUCCESS, + THP_MIGRATION_FAIL, + THP_MIGRATION_SPLIT, #endif #ifdef CONFIG_COMPACTION COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 705b33d1e395..4d434398d64d 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -46,13 +46,18 @@ MIGRATE_REASON TRACE_EVENT(mm_migrate_pages, TP_PROTO(unsigned long succeeded, unsigned long failed, - enum migrate_mode mode, int reason), + unsigned long thp_succeeded, unsigned long thp_failed, + unsigned long thp_split, enum migrate_mode mode, int reason), - TP_ARGS(succeeded, failed, mode, reason), + TP_ARGS(succeeded, failed, thp_succeeded, thp_failed, + thp_split, mode, reason), TP_STRUCT__entry( __field( unsigned long, succeeded) __field( unsigned long, failed) + __field( unsigned long, thp_succeeded) + __field( unsigned long, thp_failed) + __field( unsigned long, thp_split) __field( enum migrate_mode, mode) __field( int, reason) ), @@ -60,13 +65,19 @@ TRACE_EVENT(mm_migrate_pages, TP_fast_assign( __entry->succeeded = succeeded; __entry->failed = failed; + __entry->thp_succeeded = thp_succeeded; + __entry->thp_failed = thp_failed; + __entry->thp_split = thp_split; __entry->mode = mode; __entry->reason = reason; ), - TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s", + TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s", __entry->succeeded, __entry->failed, + __entry->thp_succeeded, + __entry->thp_failed, + __entry->thp_split, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); diff --git a/mm/migrate.c b/mm/migrate.c index b52f30113366..5ea275878383 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1418,22 +1418,35 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, enum migrate_mode mode, int reason) { int retry = 1; + int thp_retry = 1; int nr_failed = 0; int nr_succeeded = 0; + int nr_thp_succeeded = 0; + int nr_thp_failed = 0; + int nr_thp_split = 0; int pass = 0; + bool is_thp = false; struct page *page; struct page *page2; int swapwrite = current->flags & PF_SWAPWRITE; - int rc; + int rc, nr_subpages; if (!swapwrite) current->flags |= PF_SWAPWRITE; - for(pass = 0; pass < 10 && retry; pass++) { + for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { retry = 0; + thp_retry = 0; list_for_each_entry_safe(page, page2, from, lru) { retry: + /* + * THP statistics is based on the source huge page. + * Capture required information that might get lost + * during migration. + */ + is_thp = PageTransHuge(page); + nr_subpages = hpage_nr_pages(page); cond_resched(); if (PageHuge(page)) @@ -1464,15 +1477,30 @@ retry: unlock_page(page); if (!rc) { list_safe_reset_next(page, page2, lru); + nr_thp_split++; goto retry; } } + if (is_thp) { + nr_thp_failed++; + nr_failed += nr_subpages; + goto out; + } nr_failed++; goto out; case -EAGAIN: + if (is_thp) { + thp_retry++; + break; + } retry++; break; case MIGRATEPAGE_SUCCESS: + if (is_thp) { + nr_thp_succeeded++; + nr_succeeded += nr_subpages; + break; + } nr_succeeded++; break; default: @@ -1482,19 +1510,27 @@ retry: * removed from migration page list and not * retried in the next outer loop. */ + if (is_thp) { + nr_thp_failed++; + nr_failed += nr_subpages; + break; + } nr_failed++; break; } } } - nr_failed += retry; + nr_failed += retry + thp_retry; + nr_thp_failed += thp_retry; rc = nr_failed; out: - if (nr_succeeded) - count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); - if (nr_failed) - count_vm_events(PGMIGRATE_FAIL, nr_failed); - trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); + count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); + count_vm_events(PGMIGRATE_FAIL, nr_failed); + count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded); + count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed); + count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split); + trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded, + nr_thp_failed, nr_thp_split, mode, reason); if (!swapwrite) current->flags &= ~PF_SWAPWRITE; diff --git a/mm/vmstat.c b/mm/vmstat.c index 3bb034c99887..727a26d1ec1d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1277,6 +1277,9 @@ const char * const vmstat_text[] = { #ifdef CONFIG_MIGRATION "pgmigrate_success", "pgmigrate_fail", + "thp_migration_success", + "thp_migration_fail", + "thp_migration_split", #endif #ifdef CONFIG_COMPACTION "compact_migrate_scanned", -- cgit v1.2.3 From af161bee93332a1ff10ba029f41936d21850ae82 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 11 Aug 2020 18:32:06 -0700 Subject: include/linux/sched/mm.h: optimize current_gfp_context() The current_gfp_context() converts a number of PF_MEMALLOC_* per-process flags into the corresponding GFP_* flags for memory allocation. In that function, current->flags is accessed 3 times. That may lead to duplicated access of the same memory location. This is not usually a problem with minimal debug config options on as the compiler can optimize away the duplicated memory accesses. With most of the debug config options on, however, that may not be the case. For example, the x86-64 object size of the __need_fs_reclaim() in a debug kernel that calls current_gfp_context() was 309 bytes. With this patch applied, the object size is reduced to 202 bytes. This is a saving of 107 bytes and will probably be slightly faster too. Use READ_ONCE() to access current->flags to prevent the compiler from possibly accessing current->flags multiple times. Signed-off-by: Waiman Long Signed-off-by: Andrew Morton Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Mathieu Desnoyers Cc: Michel Lespinasse Link: http://lkml.kernel.org/r/20200618212936.9776-1-longman@redhat.com Signed-off-by: Linus Torvalds --- include/linux/sched/mm.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 85023ddc2dc2..f889e332912f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -178,14 +178,16 @@ static inline bool in_vfork(struct task_struct *tsk) */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { + unsigned int pflags = READ_ONCE(current->flags); + + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ - if (current->flags & PF_MEMALLOC_NOIO) + if (pflags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); - else if (current->flags & PF_MEMALLOC_NOFS) + else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; } return flags; -- cgit v1.2.3 From 1067b261cc973d68c29de54ef0587c56786e6bd5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:27 -0700 Subject: mm: drop duplicated words in Drop the doubled words "used" and "by". Drop the repeated acronym "TLB" and make several other fixes around it. (capital letters, spellos) Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: SeongJae Park Link: http://lkml.kernel.org/r/2bb6e13e-44df-4920-52d9-4d3539945f73@infradead.org Signed-off-by: Linus Torvalds --- include/linux/pgtable.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 53e97da1e8e2..a124c21e3204 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -804,7 +804,7 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, /* * No-op macros that just return the current protection value. Defined here - * because these macros can be used used even if CONFIG_MMU is not defined. + * because these macros can be used even if CONFIG_MMU is not defined. */ #ifndef pgprot_nx @@ -1234,7 +1234,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd) * Technically a PTE can be PROTNONE even when not doing NUMA balancing but * the only case the kernel cares is for NUMA balancing and is only ever set * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked - * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * _PAGE_PROTNONE so by default, implement the helper as "always no". It * is the responsibility of the caller to distinguish between PROT_NONE * protections and NUMA hinting fault protections. */ @@ -1318,10 +1318,10 @@ static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) /* * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in - * THP regime. stock flush_tlb_range() typically has optimization to nuke the - * entire TLB TLB if flush span is greater than a threshold, which will - * likely be true for a single huge page. Thus a single thp flush will - * invalidate the entire TLB which is not desitable. + * THP regime. Stock flush_tlb_range() typically has optimization to nuke the + * entire TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single THP flush will + * invalidate the entire TLB which is not desirable. * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) -- cgit v1.2.3 From 11192337206d2211bce3ba4ec1575439b6045654 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:30 -0700 Subject: mm: drop duplicated words in Drop the doubled words "to" and "the". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: SeongJae Park Link: http://lkml.kernel.org/r/d9fae8d6-0d60-4d52-9385-3199ee98de49@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index f6a82f9bccd7..f97b10117d44 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -479,7 +479,7 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags) { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } /* - * vm_fault is filled by the the pagefault handler and passed to the vma's + * vm_fault is filled by the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * @@ -2599,7 +2599,7 @@ extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ +/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, unsigned long address); #if VM_GROWSUP -- cgit v1.2.3 From 3ecabd31341fb4307f156a2bf4467c3f8fa9101b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:33 -0700 Subject: include/linux/highmem.h: fix duplicated words in a comment Change the doubled word "is" in a comment to "it is". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Link: http://lkml.kernel.org/r/ad605959-0083-4794-8d31-6b073300dd6f@infradead.org Signed-off-by: Linus Torvalds --- include/linux/highmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/highmem.h b/include/linux/highmem.h index d6e82e3de027..14e6202ce47f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -73,7 +73,7 @@ static inline void kunmap(struct page *page) * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * - * However when holding an atomic kmap is is not legal to sleep, so atomic + * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. * * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap -- cgit v1.2.3 From c82f16b52cb3b50ac1b47e6c590e3dddefc5a97c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:36 -0700 Subject: include/linux/frontswap.h: drop duplicated word in a comment Drop the doubled word "in" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Konrad Rzeszutek Wilk Link: http://lkml.kernel.org/r/3af7ed91-ad62-8445-40a4-9e07a64b9523@infradead.org Signed-off-by: Linus Torvalds --- include/linux/frontswap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 6d775984905b..b07d88c92bb2 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -10,7 +10,7 @@ /* * Return code to denote that requested number of * frontswap pages are unused(moved to page cache). - * Used in in shmem_unuse and try_to_unuse. + * Used in shmem_unuse and try_to_unuse. */ #define FRONTSWAP_PAGES_UNUSED 2 -- cgit v1.2.3 From 0845f83122d93ae3bafa7ea10209de2148a3b9bc Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:32:40 -0700 Subject: include/linux/memcontrol.h: drop duplicate word and fix spello Drop the doubled word "for" in a comment. Fix spello of "incremented". Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Acked-by: Chris Down Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Link: http://lkml.kernel.org/r/b04aa2e4-7c95-12f0-599d-43d07fb28134@infradead.org Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2c2d301eac33..385237e4cb44 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -65,8 +65,8 @@ struct mem_cgroup_id { /* * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremated by the number of pages. This counter is used for - * for trigger some periodic events. This is straightforward and better + * it will be incremented by the number of pages. This counter is used + * to trigger some periodic events. This is straightforward and better * than using jiffies etc. to handle periodic memcg event. */ enum mem_cgroup_events_target { -- cgit v1.2.3 From bfe00c5bbd9ee37b99c281429556c335271d027b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:33:34 -0700 Subject: syscalls: use uaccess_kernel in addr_limit_user_check Patch series "clean up address limit helpers", v2. In preparation for eventually phasing out direct use of set_fs(), this series removes the segment_eq() arch helper that is only used to implement or duplicate the uaccess_kernel() API, and then adds descriptive helpers to force the kernel address limit. This patch (of 6): Use the uaccess_kernel helper instead of duplicating it. [hch@lst.de: arm: don't call addr_limit_user_check for nommu] Link: http://lkml.kernel.org/r/20200721045834.GA9613@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Tested-by: Guenter Roeck Acked-by: Linus Torvalds Cc: Nick Hu Cc: Greentime Hu Cc: Vincent Chen Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Geert Uytterhoeven Link: http://lkml.kernel.org/r/20200714105505.935079-1-hch@lst.de Link: http://lkml.kernel.org/r/20200710135706.537715-1-hch@lst.de Link: http://lkml.kernel.org/r/20200710135706.537715-2-hch@lst.de Signed-off-by: Linus Torvalds --- arch/arm/kernel/signal.c | 2 ++ include/linux/syscalls.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab2568996ddb..c9dc912b83f0 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -713,7 +713,9 @@ struct page *get_signal_page(void) /* Defer to generic check */ asmlinkage void addr_limit_check_failed(void) { +#ifdef CONFIG_MMU addr_limit_user_check(); +#endif } #ifdef CONFIG_DEBUG_RSEQ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a2429d336593..dc2b827c81e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -263,7 +263,7 @@ static inline void addr_limit_user_check(void) return; #endif - if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), + if (CHECK_DATA_CORRUPTION(uaccess_kernel(), "Invalid address limit on user-mode return")) force_sig(SIGKILL); -- cgit v1.2.3 From 428e2976a5bf7e7f5554286d7a5a33b8147b106a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:33:44 -0700 Subject: uaccess: remove segment_eq segment_eq is only used to implement uaccess_kernel. Just open code uaccess_kernel in the arch uaccess headers and remove one layer of indirection. Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Acked-by: Linus Torvalds Acked-by: Greentime Hu Acked-by: Geert Uytterhoeven Cc: Nick Hu Cc: Vincent Chen Cc: Paul Walmsley Cc: Palmer Dabbelt Link: http://lkml.kernel.org/r/20200710135706.537715-5-hch@lst.de Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/uaccess.h | 2 +- arch/arc/include/asm/segment.h | 3 +-- arch/arm/include/asm/uaccess.h | 4 ++-- arch/arm64/include/asm/uaccess.h | 2 +- arch/csky/include/asm/segment.h | 2 +- arch/h8300/include/asm/segment.h | 2 +- arch/ia64/include/asm/uaccess.h | 2 +- arch/m68k/include/asm/segment.h | 2 +- arch/microblaze/include/asm/uaccess.h | 2 +- arch/mips/include/asm/uaccess.h | 2 +- arch/nds32/include/asm/uaccess.h | 2 +- arch/nios2/include/asm/uaccess.h | 2 +- arch/openrisc/include/asm/uaccess.h | 2 +- arch/parisc/include/asm/uaccess.h | 2 +- arch/powerpc/include/asm/uaccess.h | 3 +-- arch/riscv/include/asm/uaccess.h | 4 +--- arch/s390/include/asm/uaccess.h | 2 +- arch/sh/include/asm/segment.h | 3 +-- arch/sparc/include/asm/uaccess_32.h | 2 +- arch/sparc/include/asm/uaccess_64.h | 2 +- arch/x86/include/asm/uaccess.h | 2 +- arch/xtensa/include/asm/uaccess.h | 2 +- include/asm-generic/uaccess.h | 4 ++-- include/linux/uaccess.h | 2 -- 24 files changed, 25 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 1fe2b56cb861..1b6f25efa247 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -20,7 +20,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * Is a address valid? This does a straightforward calculation rather diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h index 6a2a5be5026d..871f8ab11bfd 100644 --- a/arch/arc/include/asm/segment.h +++ b/arch/arc/include/asm/segment.h @@ -14,8 +14,7 @@ typedef unsigned long mm_segment_t; #define KERNEL_DS MAKE_MM_SEG(0) #define USER_DS MAKE_MM_SEG(TASK_SIZE) - -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) #endif /* __ASSEMBLY__ */ #endif /* __ASMARC_SEGMENT_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index b5fdd30252f8..a13d90206472 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -76,7 +76,7 @@ static inline void set_fs(mm_segment_t fs) modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * We use 33-bit arithmetic here. Success returns zero, failure returns @@ -267,7 +267,7 @@ extern int __put_user_8(void *, unsigned long long); */ #define USER_DS KERNEL_DS -#define segment_eq(a, b) (1) +#define uaccess_kernel() (true) #define __addr_ok(addr) ((void)(addr), 1) #define __range_ok(addr, size) ((void)(addr), 0) #define get_fs() (KERNEL_DS) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8d7c466f809b..991dd5f031e4 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -50,7 +50,7 @@ static inline void set_fs(mm_segment_t fs) CONFIG_ARM64_UAO)); } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* * Test whether a block of memory is a valid user space address. diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index db2640d5f575..79ede9b1a646 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -13,6 +13,6 @@ typedef struct { #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASM_CSKY_SEGMENT_H */ diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h index a407978f9f9f..37950725d9b9 100644 --- a/arch/h8300/include/asm/segment.h +++ b/arch/h8300/include/asm/segment.h @@ -33,7 +33,7 @@ static inline mm_segment_t get_fs(void) return USER_DS; } -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 8aa473a4b0f4..179243c3dfc7 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -50,7 +50,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * When accessing user memory, we need to make sure the entire area really is in diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index c6686559e9b7..2b5e68a71ef7 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -52,7 +52,7 @@ static inline void set_fs(mm_segment_t val) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 6723c56ec378..304b04ffea2f 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -41,7 +41,7 @@ # define get_fs() (current_thread_info()->addr_limit) # define set_fs(val) (current_thread_info()->addr_limit = (val)) -# define segment_eq(a, b) ((a).seg == (b).seg) +# define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #ifndef CONFIG_MMU diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 62b298c50905..61fc01f177a6 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -72,7 +72,7 @@ extern u64 __ua_limit; #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* * eva_kernel_access() - determine whether kernel memory access on an EVA system diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 3a9219f53ee0..010ba5f1d7dd 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -44,7 +44,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index e83f831a76f9..a741abbed6fb 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -30,7 +30,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __access_ok(addr, len) \ (((signed long)(((long)get_fs().seg) & \ diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 17c24f14615f..48b691530d3e 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -43,7 +43,7 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a, b) ((a) == (b)) +#define uaccess_kernel() (get_fs() == KERNEL_DS) /* Ensure that the range from addr to addr+size is all within the process' * address space diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ebbb9ffe038c..ed2cd4fb479b 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -14,7 +14,7 @@ #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 64c04ab09112..00699903f1ef 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -38,8 +38,7 @@ static inline void set_fs(mm_segment_t fs) set_thread_flag(TIF_FSCHECK); } -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (get_fs().seg) #ifdef __powerpc64__ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 22de922d6ecb..f56c66b3f5fe 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -64,11 +64,9 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (get_fs().seg) - /** * access_ok: - Checks if a user space pointer is valid * @addr: User space pointer to start of block to check diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 324438889fe1..f09444d6aeab 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -32,7 +32,7 @@ #define USER_DS_SACF (3) #define get_fs() (current->thread.mm_segment) -#define segment_eq(a,b) (((a) & 2) == ((b) & 2)) +#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS) void set_fs(mm_segment_t fs); diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 33d1d28057cb..02e54a3335d6 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -24,8 +24,7 @@ typedef struct { #define USER_DS KERNEL_DS #endif -#define segment_eq(a, b) ((a).seg == (b).seg) - +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index d6d8413eca83..0a2d3ebc4bb8 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -28,7 +28,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test * can be fairly lightweight. diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index bf9d330073b2..698cf69f74e9 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -32,7 +32,7 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define set_fs(val) \ do { \ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 2f3e8f2a958f..ecefaffd15d4 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -33,7 +33,7 @@ static inline void set_fs(mm_segment_t fs) set_thread_flag(TIF_FSCHECK); } -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define user_addr_max() (current->thread.addr_limit.seg) /* diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index e57f0d0a88d8..b9758119feca 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -35,7 +35,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define segment_eq(a, b) ((a).seg == (b).seg) +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index e935318804f8..ba68ee4dabfa 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -86,8 +86,8 @@ static inline void set_fs(mm_segment_t fs) } #endif -#ifndef segment_eq -#define segment_eq(a, b) ((a).seg == (b).seg) +#ifndef uaccess_kernel +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) #endif #define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 0a76ddc07d59..5c62d0c6f15b 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,8 +6,6 @@ #include #include -#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) - #include /* -- cgit v1.2.3 From 3d13f313ce4c34c524ccc37986fe77172f601ff3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:33:47 -0700 Subject: uaccess: add force_uaccess_{begin,end} helpers Add helpers to wrap the get_fs/set_fs magic for undoing any damange done by set_fs(KERNEL_DS). There is no real functional benefit, but this documents the intent of these calls better, and will allow stubbing the functions out easily for kernels builds that do not allow address space overrides in the future. [hch@lst.de: drop two incorrect hunks, fix a commit log typo] Link: http://lkml.kernel.org/r/20200714105505.935079-6-hch@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Acked-by: Linus Torvalds Acked-by: Mark Rutland Acked-by: Greentime Hu Acked-by: Geert Uytterhoeven Cc: Nick Hu Cc: Vincent Chen Cc: Paul Walmsley Cc: Palmer Dabbelt Link: http://lkml.kernel.org/r/20200710135706.537715-6-hch@lst.de Signed-off-by: Linus Torvalds --- arch/arm64/kernel/sdei.c | 2 +- arch/m68k/include/asm/tlbflush.h | 6 +++--- arch/mips/kernel/unaligned.c | 27 +++++++++++++-------------- arch/nds32/mm/alignment.c | 7 +++---- arch/sh/kernel/traps_32.c | 12 +++++------- drivers/firmware/arm_sdei.c | 5 ++--- include/linux/uaccess.h | 18 ++++++++++++++++++ kernel/events/callchain.c | 5 ++--- kernel/events/core.c | 5 ++--- kernel/kthread.c | 5 ++--- kernel/stacktrace.c | 5 ++--- mm/maccess.c | 22 ++++++++++------------ 12 files changed, 63 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index dab88260b137..7689f2031c0c 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -180,7 +180,7 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, /* * We didn't take an exception to get here, set PAN. UAO will be cleared - * by sdei_event_handler()s set_fs(USER_DS) call. + * by sdei_event_handler()s force_uaccess_begin() call. */ __uaccess_enable_hw_pan(); diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h index 191e75a6bb24..5337bc2c262f 100644 --- a/arch/m68k/include/asm/tlbflush.h +++ b/arch/m68k/include/asm/tlbflush.h @@ -85,10 +85,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { if (vma->vm_mm == current->active_mm) { - mm_segment_t old_fs = get_fs(); - set_fs(USER_DS); + mm_segment_t old_fs = force_uaccess_begin(); + __flush_tlb_one(addr); - set_fs(old_fs); + force_uaccess_end(old_fs); } } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 0adce604fa44..126a5f3f4e4c 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -191,17 +191,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, * memory, so we need to "switch" the address limit to * user space, so that address check can work properly. */ - seg = get_fs(); - set_fs(USER_DS); + seg = force_uaccess_begin(); switch (insn.spec3_format.func) { case lhe_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadHWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -209,12 +208,12 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lwe_op: if (!access_ok(addr, 4)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -222,12 +221,12 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhue_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } LoadHWUE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } compute_return_epc(regs); @@ -235,35 +234,35 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case she_op: if (!access_ok(addr, 2)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreHWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } break; case swe_op: if (!access_ok(addr, 4)) { - set_fs(seg); + force_uaccess_end(seg); goto sigbus; } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreWE(addr, value, res); if (res) { - set_fs(seg); + force_uaccess_end(seg); goto fault; } break; default: - set_fs(seg); + force_uaccess_end(seg); goto sigill; } - set_fs(seg); + force_uaccess_end(seg); } #endif break; diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index c8b9061a2ee3..1eb7ded6992b 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c @@ -512,7 +512,7 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) { unsigned long inst; int ret = -EFAULT; - mm_segment_t seg = get_fs(); + mm_segment_t seg; inst = get_inst(regs->ipc); @@ -520,13 +520,12 @@ int do_unaligned_access(unsigned long addr, struct pt_regs *regs) "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr, regs->ipc, inst); - set_fs(USER_DS); - + seg = force_uaccess_begin(); if (inst & NDS32_16BIT_INSTRUCTION) ret = do_16((inst >> 16) & 0xffff, regs); else ret = do_32(inst, regs); - set_fs(seg); + force_uaccess_end(seg); return ret; } diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 058c6181bb30..b62ad0ba2395 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -482,8 +482,6 @@ asmlinkage void do_address_error(struct pt_regs *regs, error_code = lookup_exception_vector(); #endif - oldfs = get_fs(); - if (user_mode(regs)) { int si_code = BUS_ADRERR; unsigned int user_action; @@ -491,13 +489,13 @@ asmlinkage void do_address_error(struct pt_regs *regs, local_irq_enable(); inc_unaligned_user_access(); - set_fs(USER_DS); + oldfs = force_uaccess_begin(); if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), sizeof(instruction))) { - set_fs(oldfs); + force_uaccess_end(oldfs); goto uspace_segv; } - set_fs(oldfs); + force_uaccess_end(oldfs); /* shout about userspace fixups */ unaligned_fixups_notify(current, instruction, regs); @@ -520,11 +518,11 @@ fixup: goto uspace_segv; } - set_fs(USER_DS); + oldfs = force_uaccess_begin(); tmp = handle_unaligned_access(instruction, regs, &user_mem_access, 0, address); - set_fs(oldfs); + force_uaccess_end(oldfs); if (tmp == 0) return; /* sorted */ diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index e7e36aab2386..b4b9ce97f415 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1136,15 +1136,14 @@ int sdei_event_handler(struct pt_regs *regs, * access kernel memory. * Do the same here because this doesn't come via the same entry code. */ - orig_addr_limit = get_fs(); - set_fs(USER_DS); + orig_addr_limit = force_uaccess_begin(); err = arg->callback(event_num, regs, arg->callback_arg); if (err) pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", event_num, smp_processor_id(), err); - set_fs(orig_addr_limit); + force_uaccess_end(orig_addr_limit); return err; } diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5c62d0c6f15b..94b285411659 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -8,6 +8,24 @@ #include +/* + * Force the uaccess routines to be wired up for actual userspace access, + * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone + * using force_uaccess_end below. + */ +static inline mm_segment_t force_uaccess_begin(void) +{ + mm_segment_t fs = get_fs(); + + set_fs(USER_DS); + return fs; +} + +static inline void force_uaccess_end(mm_segment_t oldfs) +{ + set_fs(oldfs); +} + /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index c6ce894e4ce9..58cbe357fb2b 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -217,10 +217,9 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); - fs = get_fs(); - set_fs(USER_DS); + fs = force_uaccess_begin(); perf_callchain_user(&ctx, regs); - set_fs(fs); + force_uaccess_end(fs); } } diff --git a/kernel/events/core.c b/kernel/events/core.c index d1f0a7e5b182..6961333ebad5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6453,10 +6453,9 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, /* Data. */ sp = perf_user_stack_pointer(regs); - fs = get_fs(); - set_fs(USER_DS); + fs = force_uaccess_begin(); rem = __output_copy_user(handle, (void *) sp, dump_size); - set_fs(fs); + force_uaccess_end(fs); dyn_size = dump_size - rem; perf_output_skip(handle, rem); diff --git a/kernel/kthread.c b/kernel/kthread.c index b2807e7be772..3edaa380dc7b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1258,8 +1258,7 @@ void kthread_use_mm(struct mm_struct *mm) if (active_mm != mm) mmdrop(active_mm); - to_kthread(tsk)->oldfs = get_fs(); - set_fs(USER_DS); + to_kthread(tsk)->oldfs = force_uaccess_begin(); } EXPORT_SYMBOL_GPL(kthread_use_mm); @@ -1274,7 +1273,7 @@ void kthread_unuse_mm(struct mm_struct *mm) WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); WARN_ON_ONCE(!tsk->mm); - set_fs(to_kthread(tsk)->oldfs); + force_uaccess_end(to_kthread(tsk)->oldfs); task_lock(tsk); sync_mm_rss(mm); diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index 2af66e449aa6..946f44a9e86a 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -233,10 +233,9 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) if (current->flags & PF_KTHREAD) return 0; - fs = get_fs(); - set_fs(USER_DS); + fs = force_uaccess_begin(); arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); - set_fs(fs); + force_uaccess_end(fs); return c.len; } diff --git a/mm/maccess.c b/mm/maccess.c index f98ff91e32c6..3bd70405f2d8 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -205,15 +205,14 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs = force_uaccess_begin(); - set_fs(USER_DS); if (access_ok(src, size)) { pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); } - set_fs(old_fs); + force_uaccess_end(old_fs); if (ret) return -EFAULT; @@ -233,15 +232,14 @@ EXPORT_SYMBOL_GPL(copy_from_user_nofault); long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs = force_uaccess_begin(); - set_fs(USER_DS); if (access_ok(dst, size)) { pagefault_disable(); ret = __copy_to_user_inatomic(dst, src, size); pagefault_enable(); } - set_fs(old_fs); + force_uaccess_end(old_fs); if (ret) return -EFAULT; @@ -270,17 +268,17 @@ EXPORT_SYMBOL_GPL(copy_to_user_nofault); long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs; long ret; if (unlikely(count <= 0)) return 0; - set_fs(USER_DS); + old_fs = force_uaccess_begin(); pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); - set_fs(old_fs); + force_uaccess_end(old_fs); if (ret >= count) { ret = count; @@ -310,14 +308,14 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, */ long strnlen_user_nofault(const void __user *unsafe_addr, long count) { - mm_segment_t old_fs = get_fs(); + mm_segment_t old_fs; int ret; - set_fs(USER_DS); + old_fs = force_uaccess_begin(); pagefault_disable(); ret = strnlen_user(unsafe_addr, count); pagefault_enable(); - set_fs(old_fs); + force_uaccess_end(old_fs); return ret; } -- cgit v1.2.3 From c5f748e2f2ad970078de11bd6b12bcd81147c636 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:33:57 -0700 Subject: include/linux/compiler-clang.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Nathan Chancellor Link: http://lkml.kernel.org/r/6a18c301-3505-742f-4dd7-0f38d0e537b9@infradead.org Signed-off-by: Linus Torvalds --- include/linux/compiler-clang.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 8a072d00e688..cee0c728d39a 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -40,7 +40,7 @@ #endif /* - * Not all versions of clang implement the the type-generic versions + * Not all versions of clang implement the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements * __has_builtin allowing us to avoid awkward version * checks. Unfortunately, we don't know which version of gcc clang -- cgit v1.2.3 From cd1a406fa46f81b1a859ef874b8413a6fa6ac7e8 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:34:00 -0700 Subject: include/linux/exportfs.h: drop duplicated word in a comment Drop the doubled word "a" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Alexander Viro Link: http://lkml.kernel.org/r/c61b707a-8fd8-5b1b-aab0-679122881543@infradead.org Signed-off-by: Linus Torvalds --- include/linux/exportfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index d896b8657085..3ceb72b67a7a 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -178,7 +178,7 @@ struct fid { * get_name: * @get_name should find a name for the given @child in the given @parent * directory. The name should be stored in the @name (with the - * understanding that it is already pointing to a a %NAME_MAX+1 sized + * understanding that it is already pointing to a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code * or error. @get_name will be called without @parent->i_mutex held. * -- cgit v1.2.3 From 121ae8da9cd49f7139bf80f26352337458e63fe1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:34:04 -0700 Subject: include/linux/async_tx.h: drop duplicated word in a comment Drop the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Dan Williams Link: http://lkml.kernel.org/r/e85802f7-8f48-8b4c-29b3-ea237a2c7ae9@infradead.org Signed-off-by: Linus Torvalds --- include/linux/async_tx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 75e582b8d2d9..4c328fef403c 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -36,7 +36,7 @@ struct dma_chan_ref { /** * async_tx_flags - modifiers for the async_* calls * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the - * the destination address is not a source. The asynchronous case handles this + * destination address is not a source. The asynchronous case handles this * implicitly, the synchronous case needs to zero the destination block. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is * also one of the source addresses. In the synchronous case the destination -- cgit v1.2.3 From f48ff83e9c1a8fc2e8bfedb4855933eb1ed159b2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 11 Aug 2020 18:34:07 -0700 Subject: include/linux/xz.h: drop duplicated word Drop the doubled word "than" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Cc: Lasse Collin Link: http://lkml.kernel.org/r/05ebba7a-c1e4-01ae-fc7b-15c081b33f3e@infradead.org Signed-off-by: Linus Torvalds --- include/linux/xz.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/xz.h b/include/linux/xz.h index 64cffa6ddfce..24ad7d875977 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -28,7 +28,7 @@ * enum xz_mode - Operation mode * * @XZ_SINGLE: Single-call mode. This uses less RAM than - * than multi-call modes, because the LZMA2 + * multi-call modes, because the LZMA2 * dictionary doesn't need to be allocated as * part of the decoder state. All required data * structures are allocated at initialization, -- cgit v1.2.3 From 8043fc147a97ec2eefc582487f344f2cbe86d12e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Aug 2020 18:34:10 -0700 Subject: kernel: add a kernel_wait helper Add a helper that waits for a pid and stores the status in the passed in kernel pointer. Use it to fix the usage of kernel_wait4 in call_usermodehelper_exec_sync that only happens to work due to the implicit set_fs(KERNEL_DS) for kernel threads. Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Acked-by: "Eric W. Biederman" Cc: Luis Chamberlain Link: http://lkml.kernel.org/r/20200721130449.5008-1-hch@lst.de Signed-off-by: Linus Torvalds --- include/linux/sched/task.h | 1 + kernel/exit.c | 16 ++++++++++++++++ kernel/umh.c | 29 ++++------------------------- 3 files changed, 21 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ae3060f0b0c9..a98965007eef 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -88,6 +88,7 @@ struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); +int kernel_wait(pid_t pid, int *stat); extern void free_task(struct task_struct *tsk); diff --git a/kernel/exit.c b/kernel/exit.c index c2d2961576f2..733e80f334e7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1626,6 +1626,22 @@ long kernel_wait4(pid_t upid, int __user *stat_addr, int options, return ret; } +int kernel_wait(pid_t pid, int *stat) +{ + struct wait_opts wo = { + .wo_type = PIDTYPE_PID, + .wo_pid = find_get_pid(pid), + .wo_flags = WEXITED, + }; + int ret; + + ret = do_wait(&wo); + if (ret > 0 && wo.wo_stat) + *stat = wo.wo_stat; + put_pid(wo.wo_pid); + return ret; +} + SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { diff --git a/kernel/umh.c b/kernel/umh.c index a25433f9cd9a..fcf3ee803630 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -119,37 +119,16 @@ static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info) { pid_t pid; - /* If SIGCLD is ignored kernel_wait4 won't populate the status. */ + /* If SIGCLD is ignored do_wait won't populate the status. */ kernel_sigaction(SIGCHLD, SIG_DFL); pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD); - if (pid < 0) { + if (pid < 0) sub_info->retval = pid; - } else { - int ret = -ECHILD; - /* - * Normally it is bogus to call wait4() from in-kernel because - * wait4() wants to write the exit code to a userspace address. - * But call_usermodehelper_exec_sync() always runs as kernel - * thread (workqueue) and put_user() to a kernel address works - * OK for kernel threads, due to their having an mm_segment_t - * which spans the entire address space. - * - * Thus the __user pointer cast is valid here. - */ - kernel_wait4(pid, (int __user *)&ret, 0, NULL); - - /* - * If ret is 0, either call_usermodehelper_exec_async failed and - * the real error code is already in sub_info->retval or - * sub_info->retval is 0 anyway, so don't mess with it then. - */ - if (ret) - sub_info->retval = ret; - } + else + kernel_wait(pid, &sub_info->retval); /* Restore default kernel sig handler */ kernel_sigaction(SIGCHLD, SIG_IGN); - umh_complete(sub_info); } -- cgit v1.2.3 From 376653435dacf84a8aca87e66aff94079a817cf2 Mon Sep 17 00:00:00 2001 From: Arvind Sankar Date: Tue, 11 Aug 2020 18:34:16 -0700 Subject: kernel.h: remove duplicate include of asm/div64.h This seems to have been added inadvertently in commit 72deb455b5ec ("block: remove CONFIG_LBDAF") Fixes: 72deb455b5ec ("block: remove CONFIG_LBDAF") Signed-off-by: Arvind Sankar Signed-off-by: Andrew Morton Reviewed-by: Christoph Hellwig Link: http://lkml.kernel.org/r/20200727034852.2813453-1-nivedita@alum.mit.edu Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7339a00c895e..e19c13616666 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -17,7 +17,6 @@ #include #include #include -#include #define STACK_MAGIC 0xdeadbeef -- cgit v1.2.3 From 7f317d34906c1033f0752fc137dda04e43979bb8 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Tue, 11 Aug 2020 18:34:19 -0700 Subject: include/: replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Signed-off-by: Alexander A. Klimov Signed-off-by: Andrew Morton Reviewed-by: Kees Cook Link: http://lkml.kernel.org/r/20200726110117.16346-1-grandmaster@al2klimov.de Signed-off-by: Linus Torvalds --- include/clocksource/timer-ti-dm.h | 2 +- include/linux/btree.h | 2 +- include/linux/delay.h | 2 +- include/linux/dma/k3-psil.h | 2 +- include/linux/dma/k3-udma-glue.h | 2 +- include/linux/dma/ti-cppi5.h | 2 +- include/linux/irqchip/irq-omap-intc.h | 2 +- include/linux/jhash.h | 2 +- include/linux/leds-ti-lmu-common.h | 2 +- include/linux/platform_data/davinci-cpufreq.h | 2 +- include/linux/platform_data/davinci_asp.h | 2 +- include/linux/platform_data/elm.h | 2 +- include/linux/platform_data/gpio-davinci.h | 2 +- include/linux/platform_data/gpmc-omap.h | 2 +- include/linux/platform_data/mtd-davinci-aemif.h | 2 +- include/linux/platform_data/omap-twl4030.h | 2 +- include/linux/platform_data/uio_pruss.h | 2 +- include/linux/platform_data/usb-omap.h | 2 +- include/linux/soc/ti/k3-ringacc.h | 2 +- include/linux/soc/ti/knav_qmss.h | 2 +- include/linux/soc/ti/ti-msgmgr.h | 2 +- include/linux/wkup_m3_ipc.h | 2 +- include/linux/xxhash.h | 2 +- include/linux/xz.h | 2 +- include/linux/zlib.h | 2 +- include/soc/arc/aux.h | 2 +- include/uapi/linux/elf.h | 2 +- include/uapi/linux/map_to_7segment.h | 2 +- include/uapi/linux/types.h | 2 +- include/uapi/linux/usb/ch9.h | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index 531ca87fcd08..4c61dade8835 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -1,7 +1,7 @@ /* * OMAP Dual-Mode Timers * - * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/ * Tarun Kanti DebBarma * Thara Gopinath * diff --git a/include/linux/btree.h b/include/linux/btree.h index 68f858c831b1..243ee544397a 100644 --- a/include/linux/btree.h +++ b/include/linux/btree.h @@ -10,7 +10,7 @@ * * A B+Tree is a data structure for looking up arbitrary (currently allowing * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure - * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not * use binary search to find the key on lookups. * * Each B+Tree consists of a head, that contains bookkeeping information and diff --git a/include/linux/delay.h b/include/linux/delay.h index 5e016a4029d9..1d0e2ce6b6d9 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -16,7 +16,7 @@ * 3. CPU clock rate changes. * * Please see this thread: - * http://lists.openwall.net/linux-kernel/2011/01/09/56 + * https://lists.openwall.net/linux-kernel/2011/01/09/56 */ #include diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 61d5cc0ad601..1962f75fa2d3 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_PSIL_H_ diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h index caadbab1632a..5eb34ad973a7 100644 --- a/include/linux/dma/k3-udma-glue.h +++ b/include/linux/dma/k3-udma-glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_UDMA_GLUE_H_ diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h index 579356ae447e..5896441ee604 100644 --- a/include/linux/dma/ti-cppi5.h +++ b/include/linux/dma/ti-cppi5.h @@ -2,7 +2,7 @@ /* * CPPI5 descriptors interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __TI_CPPI5_H__ diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h index 216e5adf80ce..dca379c0d7eb 100644 --- a/include/linux/irqchip/irq-omap-intc.h +++ b/include/linux/irqchip/irq-omap-intc.h @@ -2,7 +2,7 @@ /** * irq-omap-intc.h - INTC Idle Functions * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * * Author: Felipe Balbi */ diff --git a/include/linux/jhash.h b/include/linux/jhash.h index ba2f6a9776b6..19ddd43aee68 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -5,7 +5,7 @@ * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * - * http://burtleburtle.net/bob/hash/ + * https://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h index 5eb111f38803..420b61e5a213 100644 --- a/include/linux/leds-ti-lmu-common.h +++ b/include/linux/leds-ti-lmu-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ // TI LMU Common Core -// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ #ifndef _TI_LMU_COMMON_H_ #define _TI_LMU_COMMON_H_ diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h index 3fbf9f2793b5..bc208c64e3d7 100644 --- a/include/linux/platform_data/davinci-cpufreq.h +++ b/include/linux/platform_data/davinci-cpufreq.h @@ -2,7 +2,7 @@ /* * TI DaVinci CPUFreq platform support. * - * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/ */ #ifndef _MACH_DAVINCI_CPUFREQ_H diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 7fe80f1c7e08..5d1fb0d78a22 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -1,7 +1,7 @@ /* * TI DaVinci Audio Serial Port support * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 0f491d8abfdd..3cc78f0447b1 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -2,7 +2,7 @@ /* * BCH Error Location Module * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __ELM_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index a93841bfb9f7..e182a46e609f 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -1,7 +1,7 @@ /* * DaVinci GPIO Platform Related Defines * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h index ef663e570552..c9cc4e32435d 100644 --- a/include/linux/platform_data/gpmc-omap.h +++ b/include/linux/platform_data/gpmc-omap.h @@ -2,7 +2,7 @@ /* * OMAP GPMC Platform data * - * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com * Roger Quadros */ diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h index a403dd51dacc..a49826214a39 100644 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -1,7 +1,7 @@ /* * TI DaVinci AEMIF support * - * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ + * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/ * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 8419c8caf54e..0dd851ea1c72 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -3,7 +3,7 @@ * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 * codec, header. * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * All rights reserved. * * Author: Peter Ujfalusi diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h index 3d47d219827f..31f2e22661bc 100644 --- a/include/linux/platform_data/uio_pruss.h +++ b/include/linux/platform_data/uio_pruss.h @@ -3,7 +3,7 @@ * * Platform data for uio_pruss driver * - * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index fa579b4c666b..5e70d667031c 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -1,7 +1,7 @@ /* * usb-omap.h - Platform data for the various OMAP USB IPs * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h index 7ac115432fa1..5a472eca5ee4 100644 --- a/include/linux/soc/ti/k3-ringacc.h +++ b/include/linux/soc/ti/k3-ringacc.h @@ -2,7 +2,7 @@ /* * K3 Ring Accelerator (RA) subsystem interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __SOC_TI_K3_RINGACC_API_H_ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h index 9745df6ed9d3..c75ef99c99ca 100644 --- a/include/linux/soc/ti/knav_qmss.h +++ b/include/linux/soc/ti/knav_qmss.h @@ -1,7 +1,7 @@ /* * Keystone Navigator Queue Management Sub-System header * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * Author: Sandeep Nair * Cyril Chemparathy * Santosh Shilimkar diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index eac8e0c6fe11..1f6e76d423cf 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,7 +1,7 @@ /* * Texas Instruments' Message Manager * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h index e497e621dbb7..3f496967b538 100644 --- a/include/linux/wkup_m3_ipc.h +++ b/include/linux/wkup_m3_ipc.h @@ -1,7 +1,7 @@ /* * TI Wakeup M3 for AMx3 SoCs Power Management Routines * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Dave Gerlach * * This program is free software; you can redistribute it and/or diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 52b073fea17f..df42511438d0 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -34,7 +34,7 @@ * ("BSD"). * * You can contact the author at: - * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ diff --git a/include/linux/xz.h b/include/linux/xz.h index 24ad7d875977..9884c8440188 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -2,7 +2,7 @@ * XZ decompressor * * Authors: Lasse Collin - * Igor Pavlov + * Igor Pavlov * * This file has been put into the public domain. * You can do whatever you want with this file. diff --git a/include/linux/zlib.h b/include/linux/zlib.h index c757d848a758..78ede944c082 100644 --- a/include/linux/zlib.h +++ b/include/linux/zlib.h @@ -23,7 +23,7 @@ The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt + Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). */ diff --git a/include/soc/arc/aux.h b/include/soc/arc/aux.h index e223c4ffa153..9c2eff6140b6 100644 --- a/include/soc/arc/aux.h +++ b/include/soc/arc/aux.h @@ -22,7 +22,7 @@ static inline int read_aux_reg(u32 r) /* * function helps elide unused variable warning - * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html + * see: https://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html */ static inline void write_aux_reg(u32 r, u32 v) { diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index c6dd0215482e..22220945a5fd 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -53,7 +53,7 @@ typedef __s64 Elf64_Sxword; * * - Oracle: Linker and Libraries. * Part No: 817–1984–19, August 2011. - * http://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf + * https://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf * * - System V ABI AMD64 Architecture Processor Supplement * Draft Version 0.99.4, diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h index f9ed18134b83..13a06e5e966e 100644 --- a/include/uapi/linux/map_to_7segment.h +++ b/include/uapi/linux/map_to_7segment.h @@ -24,7 +24,7 @@ * of (ASCII) characters to a 7-segments notation. * * The 7 segment's wikipedia notation below is used as standard. - * See: http://en.wikipedia.org/wiki/Seven_segment_display + * See: https://en.wikipedia.org/wiki/Seven_segment_display * * Notation: +-a-+ * f b diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 2fce8b6876e9..f6d2f83cbe29 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -7,7 +7,7 @@ #ifndef __ASSEMBLY__ #ifndef __KERNEL__ #ifndef __EXPORTED_HEADERS__ -#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" +#warning "Attempt to use kernel headers from user space, see https://kernelnewbies.org/KernelHeaders" #endif /* __EXPORTED_HEADERS__ */ #endif diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 48766fdf6580..0f865ae4ba89 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -1229,7 +1229,7 @@ struct usb_set_sel_req { * As per USB compliance update, a device that is actively drawing * more than 100mA from USB must report itself as bus-powered in * the GetStatus(DEVICE) call. - * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 + * https://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 */ #define USB_SELF_POWER_VBUS_MAX_DRAW 100 -- cgit v1.2.3 From 9e58c5e2fcd8d8d8820ea277e0f577f563eed1f1 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 11 Aug 2020 18:34:23 -0700 Subject: include/linux/poison.h: remove obsolete comment When the definition was changed, the comment became stale. Just remove it since there isn't anything useful to say here. Fixes: b8a0255db958 ("include/linux/poison.h: use POISON_POINTER_DELTA for poison pointers") Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Cc: Vasily Kulikov Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20200730174108.GJ23808@casper.infradead.org Signed-off-by: Linus Torvalds --- include/linux/poison.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/poison.h b/include/linux/poison.h index df34330b4e34..dc8ae5d8db03 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -24,10 +24,6 @@ #define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ -/* - * Magic number "tsta" to indicate a static timer initializer - * for the object debugging code. - */ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ -- cgit v1.2.3 From 25fd529c34d063d1bef23742f2e8f8341c639dc3 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 11 Aug 2020 18:34:26 -0700 Subject: sparse: group the defines by functionality By popular demand, reorder the defines for sparse annotations and group them by functionality. Signed-off-by: Luc Van Oostenryck Signed-off-by: Andrew Morton Acked-by: Miguel Ojeda Cc: Geert Uytterhoeven Link: lore.kernel.org/r/CAMuHMdWQsirja-h3wBcZezk+H2Q_HShhAks8Hc8ps5fTAp=ObQ@mail.gmail.com Link: http://lkml.kernel.org/r/20200621143652.53798-1-luc.vanoostenryck@gmail.com Signed-off-by: Linus Torvalds --- include/linux/compiler_types.h | 44 ++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2e231ba8fe3f..4b33cb385f96 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,48 +5,54 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ +/* address spaces */ # define __kernel __attribute__((address_space(0))) # define __user __attribute__((noderef, address_space(__user))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) # define __iomem __attribute__((noderef, address_space(__iomem))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +/* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(__percpu))) -# define __rcu __attribute__((noderef, address_space(__rcu))) +/* other */ +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __safe __attribute__((safe)) # define __private __attribute__((noderef)) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) #else /* __CHECKER__ */ +/* address spaces */ +# define __kernel # ifdef STRUCTLEAK_PLUGIN -# define __user __attribute__((user)) +# define __user __attribute__((user)) # else # define __user # endif -# define __kernel -# define __safe -# define __force -# define __nocast # define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) +# define __percpu +# define __rcu +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +/* context/locking */ # define __must_hold(x) # define __acquires(x) # define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 +# define __acquire(x) (void)0 +# define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu -# define __rcu +/* other */ +# define __force +# define __nocast +# define __safe # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) +# define __builtin_warning(x, y...) (1) #endif /* __CHECKER__ */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ -- cgit v1.2.3 From 0a650e472d2039a5f768acd82f2a7068bf338dfd Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 11 Aug 2020 18:34:35 -0700 Subject: lib/generic-radix-tree.c: remove unneeded __rcu struct __genradix is defined as having its member 'root' annotated as __rcu. But in the corresponding API RCU is not used. Sparse reports this type mismatch as: lib/generic-radix-tree.c:56:35: warning: incorrect type in initializer (different address spaces) lib/generic-radix-tree.c:56:35: expected struct genradix_root *r lib/generic-radix-tree.c:56:35: got struct genradix_root [noderef] *__val with 6 other ones. So, correct root's type by removing this unneeded __rcu. Signed-off-by: Luc Van Oostenryck Signed-off-by: Andrew Morton Cc: Kent Overstreet Link: http://lkml.kernel.org/r/20200621161745.55396-1-luc.vanoostenryck@gmail.com Signed-off-by: Linus Torvalds --- include/linux/generic-radix-tree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index 02393c0c98f9..bfd00320c7f3 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -44,7 +44,7 @@ struct genradix_root; struct __genradix { - struct genradix_root __rcu *root; + struct genradix_root *root; }; /* -- cgit v1.2.3 From b642e44e8ab335868b549fe5753b783ca47bf3a3 Mon Sep 17 00:00:00 2001 From: Kars Mulder Date: Tue, 11 Aug 2020 18:34:53 -0700 Subject: kstrto*: correct documentation references to simple_strto*() The documentation of the kstrto*() functions reference the simple_strtoull function by "used as a replacement for [the obsolete] simple_strtoull". All these functions describes themselves as replacements for the function simple_strtoull, even though a function like kstrtol() would be more aptly described as a replacement of simple_strtol(). Fix these references by making the documentation of kstrto*() reference the closest simple_strto*() equivalent available. The functions kstrto[u]int() do not have direct simple_strto[u]int() equivalences, so these are made to refer to simple_strto[u]l() instead. Furthermore, add parentheses after function names, as is standard in kernel documentation. Fixes: 4c925d6031f71 ("kstrto*: add documentation") Signed-off-by: Kars Mulder Signed-off-by: Andrew Morton Reviewed-by: Andy Shevchenko Cc: Eldad Zack Cc: Miguel Ojeda Cc: Geert Uytterhoeven Cc: Mans Rullgard Cc: Petr Mladek Link: http://lkml.kernel.org/r/1ee1-5f234c00-f3-165a6440@234394593 Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 4 ++-- lib/kstrtox.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e19c13616666..47b1dad63ffc 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -346,7 +346,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Used as a replacement for the simple_strtoul(). Return code must be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { @@ -374,7 +374,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Used as a replacement for the simple_strtol(). Return code must be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 1006bf70bf74..252ac414ba9a 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -115,7 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtoull(). Return code must * be checked. */ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) @@ -139,7 +139,7 @@ EXPORT_SYMBOL(kstrtoull); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtoll(). Return code must * be checked. */ int kstrtoll(const char *s, unsigned int base, long long *res) @@ -211,7 +211,7 @@ EXPORT_SYMBOL(_kstrtol); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtoul(). Return code must * be checked. */ int kstrtouint(const char *s, unsigned int base, unsigned int *res) @@ -242,7 +242,7 @@ EXPORT_SYMBOL(kstrtouint); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull. Return code must + * Used as a replacement for the obsolete simple_strtol(). Return code must * be checked. */ int kstrtoint(const char *s, unsigned int base, int *res) -- cgit v1.2.3 From ef0f2685336bbc334e8b6997ce9b155e5f7edd31 Mon Sep 17 00:00:00 2001 From: Kars Mulder Date: Tue, 11 Aug 2020 18:34:56 -0700 Subject: kstrto*: do not describe simple_strto*() as obsolete/replaced The documentation of the kstrto*() functions describes kstrto*() as "replacements" of the "obsolete" simple_strto*() functions. Both of these terms are inaccurate: they're not replacements because they have different behaviour, and the simple_strto*() are not obsolete because there are cases where they have benefits over kstrto*(). Remove usage of the terms "replacement" and "obsolete" in reference to simple_strto*(), and instead use the term "preferred over". Fixes: 4c925d6031f71 ("kstrto*: add documentation") Fixes: 885e68e8b7b13 ("kernel.h: update comment about simple_strto() functions") Signed-off-by: Kars Mulder Signed-off-by: Andrew Morton Reviewed-by: Andy Shevchenko Cc: Eldad Zack Cc: Miguel Ojeda Cc: Geert Uytterhoeven Cc: Mans Rullgard Cc: Petr Mladek Link: http://lkml.kernel.org/r/29b9-5f234c80-13-4e3aa200@244003027 Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 4 ++-- lib/kstrtox.c | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 47b1dad63ffc..92517ae53e7c 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -346,7 +346,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoul(). Return code must be checked. + * Preferred over simple_strtoul(). Return code must be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { @@ -374,7 +374,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtol(). Return code must be checked. + * Preferred over simple_strtol(). Return code must be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 252ac414ba9a..a14ccf905055 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -115,8 +115,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoull(). Return code must - * be checked. + * Preferred over simple_strtoull(). Return code must be checked. */ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) { @@ -139,8 +138,7 @@ EXPORT_SYMBOL(kstrtoull); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoll(). Return code must - * be checked. + * Preferred over simple_strtoll(). Return code must be checked. */ int kstrtoll(const char *s, unsigned int base, long long *res) { @@ -211,8 +209,7 @@ EXPORT_SYMBOL(_kstrtol); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtoul(). Return code must - * be checked. + * Preferred over simple_strtoul(). Return code must be checked. */ int kstrtouint(const char *s, unsigned int base, unsigned int *res) { @@ -242,8 +239,7 @@ EXPORT_SYMBOL(kstrtouint); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the obsolete simple_strtol(). Return code must - * be checked. + * Preferred over simple_strtol(). Return code must be checked. */ int kstrtoint(const char *s, unsigned int base, int *res) { -- cgit v1.2.3 From 0935288c6e008c0682ab6171fb5605dcc049e7bd Mon Sep 17 00:00:00 2001 From: Vijay Balakrishna Date: Tue, 11 Aug 2020 18:36:33 -0700 Subject: kdump: append kernel build-id string to VMCOREINFO Make kernel GNU build-id available in VMCOREINFO. Having build-id in VMCOREINFO facilitates presenting appropriate kernel namelist image with debug information file to kernel crash dump analysis tools. Currently VMCOREINFO lacks uniquely identifiable key for crash analysis automation. Regarding if this patch is necessary or matching of linux_banner and OSRELEASE in VMCOREINFO employed by crash(8) meets the need -- IMO, build-id approach more foolproof, in most instances it is a cryptographic hash generated using internal code/ELF bits unlike kernel version string upon which linux_banner is based that is external to the code. I feel each is intended for a different purpose. Also OSRELEASE is not suitable when two different kernel builds from same version with different features enabled. Currently for most linux (and non-linux) systems build-id can be extracted using standard methods for file types such as user mode crash dumps, shared libraries, loadable kernel modules etc., This is an exception for linux kernel dump. Having build-id in VMCOREINFO brings some uniformity for automation tools. Tyler said: : I think this is a nice improvement over today's linux_banner approach for : correlating vmlinux to a kernel dump. : : The elf notes parsing in this patch lines up with what is described in in : the "Notes (Nhdr)" section of the elf(5) man page. : : BUILD_ID_MAX is sufficient to hold a sha1 build-id, which is the default : build-id type today in GNU ld(2). It is also sufficient to hold the : "fast" build-id, which is the default build-id type today in LLVM lld(2). Signed-off-by: Vijay Balakrishna Signed-off-by: Andrew Morton Reviewed-by: Tyler Hicks Acked-by: Baoquan He Cc: Dave Young Cc: Vivek Goyal Link: http://lkml.kernel.org/r/1591849672-34104-1-git-send-email-vijayb@linux.microsoft.com Signed-off-by: Linus Torvalds --- include/linux/crash_core.h | 6 ++++++ kernel/crash_core.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) (limited to 'include/linux') diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 525510a9f965..6594dbc34a37 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,6 +38,8 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_BUILD_ID(value) \ + vmcoreinfo_append_str("BUILD-ID=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -64,6 +66,10 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; +/* raw contents of kernel .notes section */ +extern const void __start_notes __weak; +extern const void __stop_notes __weak; + Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 18175687133a..106e4500fd53 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -11,6 +11,8 @@ #include #include +#include + /* vmcoreinfo stuff */ unsigned char *vmcoreinfo_data; size_t vmcoreinfo_size; @@ -376,6 +378,53 @@ phys_addr_t __weak paddr_vmcoreinfo_note(void) } EXPORT_SYMBOL(paddr_vmcoreinfo_note); +#define NOTES_SIZE (&__stop_notes - &__start_notes) +#define BUILD_ID_MAX SHA1_DIGEST_SIZE +#define NT_GNU_BUILD_ID 3 + +struct elf_note_section { + struct elf_note n_hdr; + u8 n_data[]; +}; + +/* + * Add build ID from .notes section as generated by the GNU ld(1) + * or LLVM lld(1) --build-id option. + */ +static void add_build_id_vmcoreinfo(void) +{ + char build_id[BUILD_ID_MAX * 2 + 1]; + int n_remain = NOTES_SIZE; + + while (n_remain >= sizeof(struct elf_note)) { + const struct elf_note_section *note_sec = + &__start_notes + NOTES_SIZE - n_remain; + const u32 n_namesz = note_sec->n_hdr.n_namesz; + + if (note_sec->n_hdr.n_type == NT_GNU_BUILD_ID && + n_namesz != 0 && + !strcmp((char *)¬e_sec->n_data[0], "GNU")) { + if (note_sec->n_hdr.n_descsz <= BUILD_ID_MAX) { + const u32 n_descsz = note_sec->n_hdr.n_descsz; + const u8 *s = ¬e_sec->n_data[n_namesz]; + + s = PTR_ALIGN(s, 4); + bin2hex(build_id, s, n_descsz); + build_id[2 * n_descsz] = '\0'; + VMCOREINFO_BUILD_ID(build_id); + return; + } + pr_warn("Build ID is too large to include in vmcoreinfo: %u > %u\n", + note_sec->n_hdr.n_descsz, + BUILD_ID_MAX); + return; + } + n_remain -= sizeof(struct elf_note) + + ALIGN(note_sec->n_hdr.n_namesz, 4) + + ALIGN(note_sec->n_hdr.n_descsz, 4); + } +} + static int __init crash_save_vmcoreinfo_init(void) { vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL); @@ -394,6 +443,7 @@ static int __init crash_save_vmcoreinfo_init(void) } VMCOREINFO_OSRELEASE(init_uts_ns.name.release); + add_build_id_vmcoreinfo(); VMCOREINFO_PAGESIZE(PAGE_SIZE); VMCOREINFO_SYMBOL(init_uts_ns); -- cgit v1.2.3 From 79076e1241bb3bf02d0aac7d39120d8161fe07b1 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 11 Aug 2020 18:36:46 -0700 Subject: kernel/panic.c: make oops_may_print() return bool The return value of oops_may_print() is true or false, so change its type to reflect that. Signed-off-by: Tiezhu Yang Signed-off-by: Andrew Morton Reviewed-by: Kees Cook Cc: Xuefeng Li Link: http://lkml.kernel.org/r/1591103358-32087-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 2 +- kernel/panic.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 92517ae53e7c..211005163682 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -322,7 +322,7 @@ void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); void print_oops_end_marker(void); -extern int oops_may_print(void); +extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; diff --git a/kernel/panic.c b/kernel/panic.c index e2157ca387c8..93ba061d1c79 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -505,7 +505,7 @@ static void do_oops_enter_exit(void) * Return true if the calling CPU is allowed to print oops-related info. * This is a bit racy.. */ -int oops_may_print(void) +bool oops_may_print(void) { return pause_on_oops_flag == 0; } -- cgit v1.2.3 From 63037f74725ddd8a767ed2ad0369e60a3bf1f2ce Mon Sep 17 00:00:00 2001 From: Yue Hu Date: Tue, 11 Aug 2020 18:36:53 -0700 Subject: panic: make print_oops_end_marker() static Since print_oops_end_marker() is not used externally, also remove it in kernel.h at the same time. Signed-off-by: Yue Hu Signed-off-by: Andrew Morton Cc: Kees Cook Link: http://lkml.kernel.org/r/20200724011516.12756-1-zbestahu@gmail.com Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 1 - kernel/panic.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 211005163682..500def620d8f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -321,7 +321,6 @@ void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); -void print_oops_end_marker(void); extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; diff --git a/kernel/panic.c b/kernel/panic.c index 93ba061d1c79..aef8872ba843 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -551,7 +551,7 @@ static int init_oops_id(void) } late_initcall(init_oops_id); -void print_oops_end_marker(void) +static void print_oops_end_marker(void) { init_oops_id(); pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); -- cgit v1.2.3 From b4b382238ed2f94f0d3860f9120b66404fa99463 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:14 -0700 Subject: mm/migrate: move migration helper from .h to .c It's not performance sensitive function. Move it to .c. This is a preparation step for future change. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Acked-by: Mike Kravetz Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1594622517-20681-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 33 +++++---------------------------- mm/migrate.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 540998d9810b..abeb4b15b297 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -31,34 +31,6 @@ enum migrate_reason { /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ extern const char *migrate_reason_names[MR_TYPES]; -static inline struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) -{ - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; - unsigned int order = 0; - struct page *new_page = NULL; - - if (PageHuge(page)) - return alloc_huge_page_nodemask(page_hstate(compound_head(page)), - preferred_nid, nodemask); - - if (PageTransHuge(page)) { - gfp_mask |= GFP_TRANSHUGE; - order = HPAGE_PMD_ORDER; - } - - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) - gfp_mask |= __GFP_HIGHMEM; - - new_page = __alloc_pages_nodemask(gfp_mask, order, - preferred_nid, nodemask); - - if (new_page && PageTransHuge(new_page)) - prep_transhuge_page(new_page); - - return new_page; -} - #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); @@ -67,6 +39,8 @@ extern int migrate_page(struct address_space *mapping, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); @@ -85,6 +59,9 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask) + { return NULL; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } diff --git a/mm/migrate.c b/mm/migrate.c index 52896b4921a7..5269bc520aee 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1538,6 +1538,35 @@ out: return rc; } +struct page *new_page_nodemask(struct page *page, + int preferred_nid, nodemask_t *nodemask) +{ + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + unsigned int order = 0; + struct page *new_page = NULL; + + if (PageHuge(page)) + return alloc_huge_page_nodemask( + page_hstate(compound_head(page)), + preferred_nid, nodemask); + + if (PageTransHuge(page)) { + gfp_mask |= GFP_TRANSHUGE; + order = HPAGE_PMD_ORDER; + } + + if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) + gfp_mask |= __GFP_HIGHMEM; + + new_page = __alloc_pages_nodemask(gfp_mask, order, + preferred_nid, nodemask); + + if (new_page && PageTransHuge(new_page)) + prep_transhuge_page(new_page); + + return new_page; +} + #ifdef CONFIG_NUMA static int store_status(int __user *status, int start, int value, int nr) -- cgit v1.2.3 From d92bbc2719bd2be237ee336113b63492a6baca3b Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:17 -0700 Subject: mm/hugetlb: unify migration callbacks There is no difference between two migration callback functions, alloc_huge_page_node() and alloc_huge_page_nodemask(), except __GFP_THISNODE handling. It's redundant to have two almost similar functions in order to handle this flag. So, this patch tries to remove one by introducing a new argument, gfp_mask, to alloc_huge_page_nodemask(). After introducing gfp_mask argument, it's caller's job to provide correct gfp_mask. So, every callsites for alloc_huge_page_nodemask() are changed to provide gfp_mask. Note that it's safe to remove a node id check in alloc_huge_page_node() since there is no caller passing NUMA_NO_NODE as a node id. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Reviewed-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1594622517-20681-4-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 26 ++++++++++++++++++-------- mm/hugetlb.c | 35 ++--------------------------------- mm/mempolicy.c | 10 ++++++---- mm/migrate.c | 11 +++++++---- 4 files changed, 33 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a520bf26e5d8..3517edde681e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -10,6 +10,7 @@ #include #include #include +#include struct ctl_table; struct user_struct; @@ -506,9 +507,8 @@ struct huge_bootmem_page { struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); -struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask); + nodemask_t *nmask, gfp_t gfp_mask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, @@ -694,6 +694,15 @@ static inline bool hugepage_movable_supported(struct hstate *h) return true; } +/* Movability of hugepages depends on migration support. */ +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + if (hugepage_movable_supported(h)) + return GFP_HIGHUSER_MOVABLE; + else + return GFP_HIGHUSER; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -761,13 +770,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma, return NULL; } -static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) -{ - return NULL; -} - static inline struct page * -alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, + nodemask_t *nmask, gfp_t gfp_mask) { return NULL; } @@ -880,6 +885,11 @@ static inline bool hugepage_movable_supported(struct hstate *h) return false; } +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + return 0; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b66bf74e999e..eaab9ef88e9d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1093,15 +1093,6 @@ retry_cpuset: return NULL; } -/* Movability of hugepages depends on migration support. */ -static inline gfp_t htlb_alloc_mask(struct hstate *h) -{ - if (hugepage_movable_supported(h)) - return GFP_HIGHUSER_MOVABLE; - else - return GFP_HIGHUSER; -} - static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, @@ -1985,32 +1976,10 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, return page; } -/* page migration callback function */ -struct page *alloc_huge_page_node(struct hstate *h, int nid) -{ - gfp_t gfp_mask = htlb_alloc_mask(h); - struct page *page = NULL; - - if (nid != NUMA_NO_NODE) - gfp_mask |= __GFP_THISNODE; - - spin_lock(&hugetlb_lock); - if (h->free_huge_pages - h->resv_huge_pages > 0) - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); - spin_unlock(&hugetlb_lock); - - if (!page) - page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); - - return page; -} - /* page migration callback function */ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask) + nodemask_t *nmask, gfp_t gfp_mask) { - gfp_t gfp_mask = htlb_alloc_mask(h); - spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) { struct page *page; @@ -2038,7 +2007,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); - page = alloc_huge_page_nodemask(h, node, nodemask); + page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); mpol_cond_put(mpol); return page; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 25b7e412c20b..9ae2b704bdf6 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1068,10 +1068,12 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist, /* page allocation callback for NUMA node migration */ struct page *alloc_new_node_page(struct page *page, unsigned long node) { - if (PageHuge(page)) - return alloc_huge_page_node(page_hstate(compound_head(page)), - node); - else if (PageTransHuge(page)) { + if (PageHuge(page)) { + struct hstate *h = page_hstate(compound_head(page)); + gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; + + return alloc_huge_page_nodemask(h, node, NULL, gfp_mask); + } else if (PageTransHuge(page)) { struct page *thp; thp = alloc_pages_node(node, diff --git a/mm/migrate.c b/mm/migrate.c index 5269bc520aee..8d084e9bc13b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1545,10 +1545,13 @@ struct page *new_page_nodemask(struct page *page, unsigned int order = 0; struct page *new_page = NULL; - if (PageHuge(page)) - return alloc_huge_page_nodemask( - page_hstate(compound_head(page)), - preferred_nid, nodemask); + if (PageHuge(page)) { + struct hstate *h = page_hstate(compound_head(page)); + + gfp_mask = htlb_alloc_mask(h); + return alloc_huge_page_nodemask(h, preferred_nid, + nodemask, gfp_mask); + } if (PageTransHuge(page)) { gfp_mask |= GFP_TRANSHUGE; -- cgit v1.2.3 From 19fc7bed252c16ace29491e4cfa2bafb264eb505 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:25 -0700 Subject: mm/migrate: introduce a standard migration target allocation function There are some similar functions for migration target allocation. Since there is no fundamental difference, it's better to keep just one rather than keeping all variants. This patch implements base migration target allocation function. In the following patches, variants will be converted to use this function. Changes should be mechanical, but, unfortunately, there are some differences. First, some callers' nodemask is assgined to NULL since NULL nodemask will be considered as all available nodes, that is, &node_states[N_MEMORY]. Second, for hugetlb page allocation, gfp_mask is redefined as regular hugetlb allocation gfp_mask plus __GFP_THISNODE if user provided gfp_mask has it. This is because future caller of this function requires to set this node constaint. Lastly, if provided nodeid is NUMA_NO_NODE, nodeid is set up to the node where migration source lives. It helps to remove simple wrappers for setting up the nodeid. Note that PageHighmem() call in previous function is changed to open-code "is_highmem_idx()" since it provides more readability. [akpm@linux-foundation.org: tweak patch title, per Vlastimil] [akpm@linux-foundation.org: fix typo in comment] Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Christoph Hellwig Cc: Mike Kravetz Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1594622517-20681-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 15 +++++++++++++++ include/linux/migrate.h | 9 +++++---- mm/internal.h | 7 +++++++ mm/memory-failure.c | 7 +++++-- mm/memory_hotplug.c | 12 ++++++++---- mm/migrate.c | 26 ++++++++++++++++---------- mm/page_isolation.c | 7 +++++-- 7 files changed, 61 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3517edde681e..30e1f14119c8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -703,6 +703,16 @@ static inline gfp_t htlb_alloc_mask(struct hstate *h) return GFP_HIGHUSER; } +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + gfp_t modified_mask = htlb_alloc_mask(h); + + /* Some callers might want to enforce node */ + modified_mask |= (gfp_mask & __GFP_THISNODE); + + return modified_mask; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -890,6 +900,11 @@ static inline gfp_t htlb_alloc_mask(struct hstate *h) return 0; } +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + return 0; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/migrate.h b/include/linux/migrate.h index abeb4b15b297..0f8d1583fa8e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -10,6 +10,8 @@ typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); +struct migration_target_control; + /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; @@ -39,8 +41,7 @@ extern int migrate_page(struct address_space *mapping, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); -extern struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask); +extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); @@ -59,8 +60,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } -static inline struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) +static inline struct page *alloc_migration_target(struct page *page, + unsigned long private) { return NULL; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } diff --git a/mm/internal.h b/mm/internal.h index 42cf0b610847..f725aa8a9698 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -614,4 +614,11 @@ static inline bool is_migrate_highatomic_page(struct page *page) void setup_zone_pageset(struct zone *zone); extern struct page *alloc_new_node_page(struct page *page, unsigned long node); + +struct migration_target_control { + int nid; /* preferred node id */ + nodemask_t *nmask; + gfp_t gfp_mask; +}; + #endif /* __MM_INTERNAL_H */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 47b8ccb1fb9b..f1aa6433f404 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1648,9 +1648,12 @@ EXPORT_SYMBOL(unpoison_memory); static struct page *new_page(struct page *p, unsigned long private) { - int nid = page_to_nid(p); + struct migration_target_control mtc = { + .nid = page_to_nid(p), + .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + }; - return new_page_nodemask(p, nid, &node_states[N_MEMORY]); + return alloc_migration_target(p, (unsigned long)&mtc); } /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0a9e1972fbe7..c32ead89c911 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1276,19 +1276,23 @@ found: static struct page *new_node_page(struct page *page, unsigned long private) { - int nid = page_to_nid(page); nodemask_t nmask = node_states[N_MEMORY]; + struct migration_target_control mtc = { + .nid = page_to_nid(page), + .nmask = &nmask, + .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + }; /* * try to allocate from a different node but reuse this node if there * are no other online nodes to be used (e.g. we are offlining a part * of the only existing node) */ - node_clear(nid, nmask); + node_clear(mtc.nid, nmask); if (nodes_empty(nmask)) - node_set(nid, nmask); + node_set(mtc.nid, nmask); - return new_page_nodemask(page, nid, &nmask); + return alloc_migration_target(page, (unsigned long)&mtc); } static int diff --git a/mm/migrate.c b/mm/migrate.c index 46cca5c2ebff..48b1f149494b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1538,19 +1538,26 @@ out: return rc; } -struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) +struct page *alloc_migration_target(struct page *page, unsigned long private) { - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + struct migration_target_control *mtc; + gfp_t gfp_mask; unsigned int order = 0; struct page *new_page = NULL; + int nid; + int zidx; + + mtc = (struct migration_target_control *)private; + gfp_mask = mtc->gfp_mask; + nid = mtc->nid; + if (nid == NUMA_NO_NODE) + nid = page_to_nid(page); if (PageHuge(page)) { struct hstate *h = page_hstate(compound_head(page)); - gfp_mask = htlb_alloc_mask(h); - return alloc_huge_page_nodemask(h, preferred_nid, - nodemask, gfp_mask); + gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); + return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); } if (PageTransHuge(page)) { @@ -1562,12 +1569,11 @@ struct page *new_page_nodemask(struct page *page, gfp_mask |= GFP_TRANSHUGE; order = HPAGE_PMD_ORDER; } - - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) + zidx = zone_idx(page_zone(page)); + if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_page = __alloc_pages_nodemask(gfp_mask, order, - preferred_nid, nodemask); + new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); if (new_page && PageTransHuge(new_page)) prep_transhuge_page(new_page); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index aec26d972b9f..f25c66ea37ac 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -309,7 +309,10 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, struct page *alloc_migrate_target(struct page *page, unsigned long private) { - int nid = page_to_nid(page); + struct migration_target_control mtc = { + .nid = page_to_nid(page), + .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + }; - return new_page_nodemask(page, nid, &node_states[N_MEMORY]); + return alloc_migration_target(page, (unsigned long)&mtc); } -- cgit v1.2.3 From 41b4dc14ee807cb1bd15e67cad287534046f92dc Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:34 -0700 Subject: mm/gup: restrict CMA region by using allocation scope API We have well defined scope API to exclude CMA region. Use it rather than manipulating gfp_mask manually. With this change, we can now restore __GFP_MOVABLE for gfp_mask like as usual migration target allocation. It would result in that the ZONE_MOVABLE is also searched by page allocator. For hugetlb, gfp_mask is redefined since it has a regular allocation mask filter for migration target. __GPF_NOWARN is added to hugetlb gfp_mask filter since a new user for gfp_mask filter, gup, want to be silent when allocation fails. Note that this can be considered as a fix for the commit 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region"). However, "Fixes" tag isn't added here since it is just suboptimal but it doesn't cause any problem. Suggested-by: Michal Hocko Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Vlastimil Babka Cc: Christoph Hellwig Cc: Roman Gushchin Cc: Mike Kravetz Cc: Naoya Horiguchi Cc: "Aneesh Kumar K . V" Link: http://lkml.kernel.org/r/1596180906-8442-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 ++ mm/gup.c | 17 ++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 30e1f14119c8..d86c82749836 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -710,6 +710,8 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) /* Some callers might want to enforce node */ modified_mask |= (gfp_mask & __GFP_THISNODE); + modified_mask |= (gfp_mask & __GFP_NOWARN); + return modified_mask; } diff --git a/mm/gup.c b/mm/gup.c index d8a33dd1430d..6c20c6e37635 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1620,10 +1620,12 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) * Trying to allocate a page for migration. Ignore allocation * failure warnings. We don't force __GFP_THISNODE here because * this node here is the node where we have CMA reservation and - * in some case these nodes will have really less non movable + * in some case these nodes will have really less non CMA * allocation memory. + * + * Note that CMA region is prohibited by allocation scope. */ - gfp_t gfp_mask = GFP_USER | __GFP_NOWARN; + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN; if (PageHighMem(page)) gfp_mask |= __GFP_HIGHMEM; @@ -1631,6 +1633,8 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) #ifdef CONFIG_HUGETLB_PAGE if (PageHuge(page)) { struct hstate *h = page_hstate(page); + + gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); /* * We don't want to dequeue from the pool because pool pages will * mostly be from the CMA region. @@ -1645,11 +1649,6 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) */ gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN; - /* - * Remove the movable mask so that we don't allocate from - * CMA area again. - */ - thp_gfpmask &= ~__GFP_MOVABLE; thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER); if (!thp) return NULL; @@ -1795,7 +1794,6 @@ static long __gup_longterm_locked(struct task_struct *tsk, vmas_tmp, NULL, gup_flags); if (gup_flags & FOLL_LONGTERM) { - memalloc_nocma_restore(flags); if (rc < 0) goto out; @@ -1808,9 +1806,10 @@ static long __gup_longterm_locked(struct task_struct *tsk, rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, vmas_tmp, gup_flags); +out: + memalloc_nocma_restore(flags); } -out: if (vmas_tmp != vmas) kfree(vmas_tmp); return rc; -- cgit v1.2.3 From bbe88753bd42b1faf1458dde8f58ff1239990436 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 11 Aug 2020 18:37:38 -0700 Subject: mm/hugetlb: make hugetlb migration callback CMA aware new_non_cma_page() in gup.c requires to allocate the new page that is not on the CMA area. new_non_cma_page() implements it by using allocation scope APIs. However, there is a work-around for hugetlb. Normal hugetlb page allocation API for migration is alloc_huge_page_nodemask(). It consists of two steps. First is dequeing from the pool. Second is, if there is no available page on the queue, allocating by using the page allocator. new_non_cma_page() can't use this API since first step (deque) isn't aware of scope API to exclude CMA area. So, new_non_cma_page() exports hugetlb internal function for the second step, alloc_migrate_huge_page(), to global scope and uses it directly. This is suboptimal since hugetlb pages on the queue cannot be utilized. This patch tries to fix this situation by making the deque function on hugetlb CMA aware. In the deque function, CMA memory is skipped if PF_MEMALLOC_NOCMA flag is found. Signed-off-by: Joonsoo Kim Signed-off-by: Andrew Morton Acked-by: Mike Kravetz Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: "Aneesh Kumar K . V" Cc: Christoph Hellwig Cc: Naoya Horiguchi Cc: Roman Gushchin Link: http://lkml.kernel.org/r/1596180906-8442-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Linus Torvalds --- include/linux/hugetlb.h | 2 -- mm/gup.c | 6 +----- mm/hugetlb.c | 11 +++++++++-- 3 files changed, 10 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d86c82749836..d5cc5f802dd4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -511,8 +511,6 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); -struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, - int nid, nodemask_t *nmask); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); diff --git a/mm/gup.c b/mm/gup.c index 6c20c6e37635..c55427beeb26 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1635,11 +1635,7 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private) struct hstate *h = page_hstate(page); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); - /* - * We don't want to dequeue from the pool because pool pages will - * mostly be from the CMA region. - */ - return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); + return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask); } #endif if (PageTransHuge(page)) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eaab9ef88e9d..a301c2d672bf 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -1040,10 +1041,16 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) { struct page *page; + bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA); + + list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { + if (nocma && is_migrate_cma_page(page)) + continue; - list_for_each_entry(page, &h->hugepage_freelists[nid], lru) if (!PageHWPoison(page)) break; + } + /* * if 'non-isolated free hugepage' not found on the list, * the allocation fails. @@ -1935,7 +1942,7 @@ out_unlock: return page; } -struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, +static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page; -- cgit v1.2.3 From bce617edecada007aee8610fbe2c14d10b8de2f6 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 11 Aug 2020 18:37:44 -0700 Subject: mm: do page fault accounting in handle_mm_fault Patch series "mm: Page fault accounting cleanups", v5. This is v5 of the pf accounting cleanup series. It originates from Gerald Schaefer's report on an issue a week ago regarding to incorrect page fault accountings for retried page fault after commit 4064b9827063 ("mm: allow VM_FAULT_RETRY for multiple times"): https://lore.kernel.org/lkml/20200610174811.44b94525@thinkpad/ What this series did: - Correct page fault accounting: we do accounting for a page fault (no matter whether it's from #PF handling, or gup, or anything else) only with the one that completed the fault. For example, page fault retries should not be counted in page fault counters. Same to the perf events. - Unify definition of PERF_COUNT_SW_PAGE_FAULTS: currently this perf event is used in an adhoc way across different archs. Case (1): for many archs it's done at the entry of a page fault handler, so that it will also cover e.g. errornous faults. Case (2): for some other archs, it is only accounted when the page fault is resolved successfully. Case (3): there're still quite some archs that have not enabled this perf event. Since this series will touch merely all the archs, we unify this perf event to always follow case (1), which is the one that makes most sense. And since we moved the accounting into handle_mm_fault, the other two MAJ/MIN perf events are well taken care of naturally. - Unify definition of "major faults": the definition of "major fault" is slightly changed when used in accounting (not VM_FAULT_MAJOR). More information in patch 1. - Always account the page fault onto the one that triggered the page fault. This does not matter much for #PF handlings, but mostly for gup. More information on this in patch 25. Patchset layout: Patch 1: Introduced the accounting in handle_mm_fault(), not enabled. Patch 2-23: Enable the new accounting for arch #PF handlers one by one. Patch 24: Enable the new accounting for the rest outliers (gup, iommu, etc.) Patch 25: Cleanup GUP task_struct pointer since it's not needed any more This patch (of 25): This is a preparation patch to move page fault accountings into the general code in handle_mm_fault(). This includes both the per task flt_maj/flt_min counters, and the major/minor page fault perf events. To do this, the pt_regs pointer is passed into handle_mm_fault(). PERF_COUNT_SW_PAGE_FAULTS should still be kept in per-arch page fault handlers. So far, all the pt_regs pointer that passed into handle_mm_fault() is NULL, which means this patch should have no intented functional change. Suggested-by: Linus Torvalds Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Brian Cain Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Chris Zankel Cc: Dave Hansen Cc: David S. Miller Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Greentime Hu Cc: Guo Ren Cc: Heiko Carstens Cc: Helge Deller Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Ivan Kokshaysky Cc: James E.J. Bottomley Cc: John Hubbard Cc: Jonas Bonn Cc: Ley Foon Tan Cc: "Luck, Tony" Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Nick Hu Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Stafford Horne Cc: Stefan Kristiansson Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vincent Chen Cc: Vineet Gupta Cc: Will Deacon Cc: Yoshinori Sato Link: http://lkml.kernel.org/r/20200707225021.200906-1-peterx@redhat.com Link: http://lkml.kernel.org/r/20200707225021.200906-2-peterx@redhat.com Signed-off-by: Linus Torvalds --- arch/alpha/mm/fault.c | 2 +- arch/arc/mm/fault.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/csky/mm/fault.c | 3 +- arch/hexagon/mm/vm_fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/nds32/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/riscv/mm/fault.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 4 +-- arch/sparc/mm/fault_64.c | 2 +- arch/um/kernel/trap.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/iommu/amd/iommu_v2.c | 2 +- drivers/iommu/intel/svm.c | 3 +- include/linux/mm.h | 7 +++-- mm/gup.c | 4 +-- mm/hmm.c | 3 +- mm/ksm.c | 3 +- mm/memory.c | 64 ++++++++++++++++++++++++++++++++++++++++++- 31 files changed, 103 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index c2303a8c2b9f..1983e43a5e2f 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -148,7 +148,7 @@ retry: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 7287c793d1c9..587dea524e6b 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -130,7 +130,7 @@ retry: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index c6550eddfce1..01a8e0f8fef7 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -224,7 +224,7 @@ good_area: goto out; } - return handle_mm_fault(vma, addr & PAGE_MASK, flags); + return handle_mm_fault(vma, addr & PAGE_MASK, flags, NULL); check_stack: /* Don't allow expansion below FIRST_USER_ADDRESS */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 8afb238ff335..be29f4076fe3 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -428,7 +428,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, */ if (!(vma->vm_flags & vm_flags)) return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, NULL); } static bool is_el0_instruction_abort(unsigned int esr) diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index b1dce9f2f04d..b252e6e4d32f 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -150,7 +150,8 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, + NULL); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index cd3808f96b93..f12f330e7946 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -88,7 +88,7 @@ good_area: break; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3a4dec334cc5..abf2808f9b4b 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -143,7 +143,7 @@ retry: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 508abb63da67..08b35a318ebe 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -134,7 +134,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); pr_debug("handle_mm_fault returns %x\n", fault); if (fault_signal_pending(fault, regs)) diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index a2bfe587b491..1a3d4c4ca28b 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -214,7 +214,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 01b168a90434..b1db39784db9 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -152,7 +152,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index 8fb73f6401a0..d0ecc8fb5b23 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -206,7 +206,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags, NULL); /* * If we need to retry but a fatal signal is pending, handle the diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 4112ef0e247e..86beb9a2698e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -131,7 +131,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index d2224ccca294..3daa491d1edb 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -159,7 +159,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 66ac0719bd49..e32d06928c24 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -302,7 +302,7 @@ good_area: * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index b83abbead4a2..2d0276abe0a6 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -64,7 +64,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 925a7231abb3..c6a5225a3521 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -511,7 +511,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); major |= fault & VM_FAULT_MAJOR; diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 5873835a3e6b..30c1124d0fb6 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -109,7 +109,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags, NULL); /* * If we need to retry but a fatal signal is pending, handle the diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index aebf9183bedd..ad783aaaf649 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -476,7 +476,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) { fault = VM_FAULT_SIGNAL; if (flags & FAULT_FLAG_RETRY_NOWAIT) diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index fbe1f2fe9a8c..3c0a11827f7e 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -482,7 +482,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index cfef656eda0f..06af03db4417 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -234,7 +234,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; @@ -410,7 +410,7 @@ good_area: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags, NULL)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index a3806614e4dc..9ebee14ee893 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -422,7 +422,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) goto exit_exception; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 2b3afa354a90..8d9870d76da1 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -71,7 +71,7 @@ good_area: do { vm_fault_t fault; - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 0c7643d9f7cb..e1bf5555d80a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1291,7 +1291,7 @@ good_area: * userland). The return to userland is identified whenever * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); major |= fault & VM_FAULT_MAJOR; /* Quick path to respond to signals */ diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index c128dcc7c85b..e72c8c1359a6 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -107,7 +107,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, NULL); if (fault_signal_pending(fault, regs)) return; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index e4b025c5637c..c259108ab6dd 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -495,7 +495,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(vma, address, flags); + ret = handle_mm_fault(vma, address, flags, NULL); out: mmap_read_unlock(mm); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 6c87c807a0ab..5ae59a6ad681 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -872,7 +872,8 @@ static irqreturn_t prq_event_thread(int irq, void *d) goto invalid; ret = handle_mm_fault(vma, address, - req->wr_req ? FAULT_FLAG_WRITE : 0); + req->wr_req ? FAULT_FLAG_WRITE : 0, + NULL); if (ret & VM_FAULT_ERROR) goto invalid; diff --git a/include/linux/mm.h b/include/linux/mm.h index f97b10117d44..ec0ffb423769 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -38,6 +38,7 @@ struct file_ra_state; struct user_struct; struct writeback_control; struct bdi_writeback; +struct pt_regs; void init_mm_internals(void); @@ -1658,7 +1659,8 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags); + unsigned long address, unsigned int flags, + struct pt_regs *regs); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); @@ -1668,7 +1670,8 @@ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags) + unsigned long address, unsigned int flags, + struct pt_regs *regs) { /* should never happen if there's no MMU */ BUG(); diff --git a/mm/gup.c b/mm/gup.c index e9d1d0cc18f0..ae7121d729fa 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -884,7 +884,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, fault_flags |= FAULT_FLAG_TRIED; } - ret = handle_mm_fault(vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags, NULL); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); @@ -1238,7 +1238,7 @@ retry: fatal_signal_pending(current)) return -EINTR; - ret = handle_mm_fault(vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags, NULL); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); diff --git a/mm/hmm.c b/mm/hmm.c index bb279319bf40..943cb2ba4442 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -75,7 +75,8 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end, } for (; addr < end; addr += PAGE_SIZE) - if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR) + if (handle_mm_fault(vma, addr, fault_flags, NULL) & + VM_FAULT_ERROR) return -EFAULT; return -EBUSY; } diff --git a/mm/ksm.c b/mm/ksm.c index 217842a66912..0aa2247bddd7 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -480,7 +480,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) break; if (PageKsm(page)) ret = handle_mm_fault(vma, addr, - FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, + NULL); else ret = VM_FAULT_WRITE; put_page(page); diff --git a/mm/memory.c b/mm/memory.c index 325bb575e7ec..9b7d35734caa 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -71,6 +71,8 @@ #include #include #include +#include +#include #include @@ -4356,6 +4358,64 @@ retry_pud: return handle_pte_fault(&vmf); } +/** + * mm_account_fault - Do page fault accountings + * + * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting + * of perf event counters, but we'll still do the per-task accounting to + * the task who triggered this page fault. + * @address: the faulted address. + * @flags: the fault flags. + * @ret: the fault retcode. + * + * This will take care of most of the page fault accountings. Meanwhile, it + * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter + * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should + * still be in per-arch page fault handlers at the entry of page fault. + */ +static inline void mm_account_fault(struct pt_regs *regs, + unsigned long address, unsigned int flags, + vm_fault_t ret) +{ + bool major; + + /* + * We don't do accounting for some specific faults: + * + * - Unsuccessful faults (e.g. when the address wasn't valid). That + * includes arch_vma_access_permitted() failing before reaching here. + * So this is not a "this many hardware page faults" counter. We + * should use the hw profiling for that. + * + * - Incomplete faults (VM_FAULT_RETRY). They will only be counted + * once they're completed. + */ + if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY)) + return; + + /* + * We define the fault as a major fault when the final successful fault + * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't + * handle it immediately previously). + */ + major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); + + /* + * If the fault is done for GUP, regs will be NULL, and we will skip + * the fault accounting. + */ + if (!regs) + return; + + if (major) { + current->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); + } else { + current->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + } +} + /* * By the time we get here, we already hold the mm semaphore * @@ -4363,7 +4423,7 @@ retry_pud: * return value. See filemap_fault() and __lock_page_or_retry(). */ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags) + unsigned int flags, struct pt_regs *regs) { vm_fault_t ret; @@ -4404,6 +4464,8 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, mem_cgroup_oom_synchronize(false); } + mm_account_fault(regs, address, flags, ret); + return ret; } EXPORT_SYMBOL_GPL(handle_mm_fault); -- cgit v1.2.3 From 64019a2e467a288a16b65ab55ddcbf58c1b00187 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 11 Aug 2020 18:39:01 -0700 Subject: mm/gup: remove task_struct pointer for all gup code After the cleanup of page fault accounting, gup does not need to pass task_struct around any more. Remove that parameter in the whole gup stack. Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: John Hubbard Link: http://lkml.kernel.org/r/20200707225021.200906-26-peterx@redhat.com Signed-off-by: Linus Torvalds --- arch/arc/kernel/process.c | 2 +- arch/s390/kvm/interrupt.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/s390/kvm/priv.c | 8 +-- arch/s390/mm/gmap.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +- drivers/infiniband/core/umem_odp.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 4 +- fs/exec.c | 2 +- include/linux/mm.h | 9 ++- kernel/events/uprobes.c | 6 +- kernel/futex.c | 2 +- mm/gup.c | 101 ++++++++++++---------------- mm/memory.c | 2 +- mm/process_vm_access.c | 2 +- security/tomoyo/domain.c | 2 +- virt/kvm/async_pf.c | 2 +- virt/kvm/kvm_main.c | 2 +- 18 files changed, 69 insertions(+), 87 deletions(-) (limited to 'include/linux') diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index e12c80d71b78..efeba1fe7252 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -91,7 +91,7 @@ fault: goto fail; mmap_read_lock(current->mm); - ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, + ret = fixup_user_fault(current->mm, (unsigned long) uaddr, FAULT_FLAG_WRITE, NULL); mmap_read_unlock(current->mm); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 1608fd99bbee..2f177298c663 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2768,7 +2768,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) struct page *page = NULL; mmap_read_lock(kvm->mm); - get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE, + get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, &page, NULL, NULL); mmap_read_unlock(kvm->mm); return page; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 66da278a67fb..6b74b92c1a58 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1892,7 +1892,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) r = set_guest_storage_key(current->mm, hva, keys[i], 0); if (r) { - r = fixup_user_fault(current, current->mm, hva, + r = fixup_user_fault(current->mm, hva, FAULT_FLAG_WRITE, &unlocked); if (r) break; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 2f721a923b54..cd74989ce0b0 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -273,7 +273,7 @@ retry: rc = get_guest_storage_key(current->mm, vmaddr, &key); if (rc) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { mmap_read_unlock(current->mm); @@ -319,7 +319,7 @@ retry: mmap_read_lock(current->mm); rc = reset_guest_reference_bit(current->mm, vmaddr); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); if (!rc) { mmap_read_unlock(current->mm); @@ -390,7 +390,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) m3 & SSKE_MC); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } @@ -1094,7 +1094,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) rc = cond_set_guest_storage_key(current->mm, vmaddr, key, NULL, nq, mr, mc); if (rc < 0) { - rc = fixup_user_fault(current, current->mm, vmaddr, + rc = fixup_user_fault(current->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 190357ff86b3..8747487c50a8 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -649,7 +649,7 @@ retry: rc = vmaddr; goto out_up; } - if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, + if (fixup_user_fault(gmap->mm, vmaddr, fault_flags, &unlocked)) { rc = -EFAULT; goto out_up; @@ -879,7 +879,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, BUG_ON(gmap_is_shadow(gmap)); fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0; - if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked)) + if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked)) return -EFAULT; if (unlocked) /* lost mmap_lock, caller has to retry __gmap_translate */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index e946032b13e4..2c2bf24140c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) locked = 1; } ret = pin_user_pages_remote - (work->task, mm, + (mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 5e32f61a2fe4..cc6b4befde7c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, * complex (and doesn't gain us much performance in most use * cases). */ - npages = get_user_pages_remote(owning_process, owning_mm, + npages = get_user_pages_remote(owning_mm, user_virt, gup_num_pages, flags, local_page_list, NULL, NULL); mmap_read_unlock(owning_mm); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 5e556ac9102a..9d41105bfd01 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, if (ret) { bool unlocked = false; - ret = fixup_user_fault(NULL, mm, vaddr, + ret = fixup_user_fault(mm, vaddr, FAULT_FLAG_REMOTE | (write_fault ? FAULT_FLAG_WRITE : 0), &unlocked); @@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE; mmap_read_lock(mm); - ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM, + ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, page, NULL, NULL); if (ret == 1) { *pfn = page_to_pfn(page[0]); diff --git a/fs/exec.c b/fs/exec.c index a57d9785832b..a91003e28eaa 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -217,7 +217,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * We are doing an exec(). 'current' is the process * doing the exec and bprm->mm is the new process's mm. */ - ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, + ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, &page, NULL, NULL); if (ret <= 0) return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index ec0ffb423769..e7602a3bcef1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1661,7 +1661,7 @@ int invalidate_inode_page(struct page *page); extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs); -extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, +extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); void unmap_mapping_pages(struct address_space *mapping, @@ -1677,8 +1677,7 @@ static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, BUG(); return VM_FAULT_SIGBUS; } -static inline int fixup_user_fault(struct task_struct *tsk, - struct mm_struct *mm, unsigned long address, +static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ @@ -1704,11 +1703,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); -long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 49047d479c57..649fd53dc9ad 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -376,7 +376,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) if (!vaddr || !d) return -EINVAL; - ret = get_user_pages_remote(NULL, mm, vaddr, 1, + ret = get_user_pages_remote(mm, vaddr, 1, FOLL_WRITE, &page, &vma, NULL); if (unlikely(ret <= 0)) { /* @@ -477,7 +477,7 @@ retry: if (is_register) gup_flags |= FOLL_SPLIT_PMD; /* Read the page with vaddr into memory */ - ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags, + ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, &old_page, &vma, NULL); if (ret <= 0) return ret; @@ -2029,7 +2029,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) * but we treat this as a 'remote' access since it is * essentially a kernel access to the memory. */ - result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, + result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL, NULL); if (result < 0) return result; diff --git a/kernel/futex.c b/kernel/futex.c index 83404124b77b..61e8153e6c76 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -678,7 +678,7 @@ static int fault_in_user_writeable(u32 __user *uaddr) int ret; mmap_read_lock(mm); - ret = fixup_user_fault(current, mm, (unsigned long)uaddr, + ret = fixup_user_fault(mm, (unsigned long)uaddr, FAULT_FLAG_WRITE, NULL); mmap_read_unlock(mm); diff --git a/mm/gup.c b/mm/gup.c index d5d44c68fa19..39e58df6925d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -859,7 +859,7 @@ unmap: * does not include FOLL_NOWAIT, the mmap_lock may be released. If it * is, *@locked will be set to 0 and -EBUSY returned. */ -static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, +static int faultin_page(struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *locked) { unsigned int fault_flags = 0; @@ -962,7 +962,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) /** * __get_user_pages() - pin user pages in memory - * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin @@ -1021,7 +1020,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ -static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, +static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1103,8 +1102,7 @@ retry: page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { - ret = faultin_page(tsk, vma, start, &foll_flags, - locked); + ret = faultin_page(vma, start, &foll_flags, locked); switch (ret) { case 0: goto retry; @@ -1178,8 +1176,6 @@ static bool vma_permits_fault(struct vm_area_struct *vma, /** * fixup_user_fault() - manually resolve a user page fault - * @tsk: the task_struct to use for page fault accounting, or - * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() @@ -1207,7 +1203,7 @@ static bool vma_permits_fault(struct vm_area_struct *vma, * This function will not return with an unlocked mmap_lock. So it has not the * same semantics wrt the @mm->mmap_lock as does filemap_fault(). */ -int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, +int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { @@ -1256,8 +1252,7 @@ EXPORT_SYMBOL_GPL(fixup_user_fault); * Please note that this function, unlike __get_user_pages will not * return 0 for nr_pages > 0 without FOLL_NOWAIT */ -static __always_inline long __get_user_pages_locked(struct task_struct *tsk, - struct mm_struct *mm, +static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1290,7 +1285,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, pages_done = 0; lock_dropped = false; for (;;) { - ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, + ret = __get_user_pages(mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ @@ -1350,7 +1345,7 @@ retry: } *locked = 1; - ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, + ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, pages, NULL, locked); if (!*locked) { /* Continue to retry until we succeeded */ @@ -1437,7 +1432,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ - return __get_user_pages(current, mm, start, nr_pages, gup_flags, + return __get_user_pages(mm, start, nr_pages, gup_flags, NULL, NULL, locked); } @@ -1521,7 +1516,7 @@ struct page *get_dump_page(unsigned long addr) struct vm_area_struct *vma; struct page *page; - if (__get_user_pages(current, current->mm, addr, 1, + if (__get_user_pages(current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; @@ -1530,8 +1525,7 @@ struct page *get_dump_page(unsigned long addr) } #endif /* CONFIG_ELF_CORE */ #else /* CONFIG_MMU */ -static long __get_user_pages_locked(struct task_struct *tsk, - struct mm_struct *mm, unsigned long start, +static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int foll_flags) @@ -1596,8 +1590,7 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) } #ifdef CONFIG_CMA -static long check_and_migrate_cma_pages(struct task_struct *tsk, - struct mm_struct *mm, +static long check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1675,7 +1668,7 @@ check_again: * again migrating any new CMA pages which we failed to isolate * earlier. */ - ret = __get_user_pages_locked(tsk, mm, start, nr_pages, + ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, gup_flags); @@ -1689,8 +1682,7 @@ check_again: return ret; } #else -static long check_and_migrate_cma_pages(struct task_struct *tsk, - struct mm_struct *mm, +static long check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1705,8 +1697,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk, * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which * allows us to process the FOLL_LONGTERM flag. */ -static long __gup_longterm_locked(struct task_struct *tsk, - struct mm_struct *mm, +static long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, @@ -1731,7 +1722,7 @@ static long __gup_longterm_locked(struct task_struct *tsk, flags = memalloc_nocma_save(); } - rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, + rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas_tmp, NULL, gup_flags); if (gup_flags & FOLL_LONGTERM) { @@ -1745,7 +1736,7 @@ static long __gup_longterm_locked(struct task_struct *tsk, goto out; } - rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages, + rc = check_and_migrate_cma_pages(mm, start, rc, pages, vmas_tmp, gup_flags); out: memalloc_nocma_restore(flags); @@ -1756,22 +1747,20 @@ out: return rc; } #else /* !CONFIG_FS_DAX && !CONFIG_CMA */ -static __always_inline long __gup_longterm_locked(struct task_struct *tsk, - struct mm_struct *mm, +static __always_inline long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int flags) { - return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, flags); } #endif /* CONFIG_FS_DAX || CONFIG_CMA */ #ifdef CONFIG_MMU -static long __get_user_pages_remote(struct task_struct *tsk, - struct mm_struct *mm, +static long __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1790,20 +1779,18 @@ static long __get_user_pages_remote(struct task_struct *tsk, * This will check the vmas (even if our vmas arg is NULL) * and return -ENOTSUPP if DAX isn't allowed in this case: */ - return __gup_longterm_locked(tsk, mm, start, nr_pages, pages, + return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } - return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } /** * get_user_pages_remote() - pin user pages in memory - * @tsk: the task_struct to use for page fault accounting, or - * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin @@ -1862,7 +1849,7 @@ static long __get_user_pages_remote(struct task_struct *tsk, * should use get_user_pages_remote because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1874,13 +1861,13 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) return -EINVAL; - return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, + return __get_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked); } EXPORT_SYMBOL(get_user_pages_remote); #else /* CONFIG_MMU */ -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1888,8 +1875,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, return 0; } -static long __get_user_pages_remote(struct task_struct *tsk, - struct mm_struct *mm, +static long __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -1909,11 +1895,10 @@ static long __get_user_pages_remote(struct task_struct *tsk, * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * - * This is the same as get_user_pages_remote(), just with a - * less-flexible calling convention where we assume that the task - * and mm being operated on are the current task's and don't allow - * passing of a locked parameter. We also obviously don't pass - * FOLL_REMOTE in here. + * This is the same as get_user_pages_remote(), just with a less-flexible + * calling convention where we assume that the mm being operated on belongs to + * the current task, and doesn't allow passing of a locked parameter. We also + * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, @@ -1926,7 +1911,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) return -EINVAL; - return __gup_longterm_locked(current, current->mm, start, nr_pages, + return __gup_longterm_locked(current->mm, start, nr_pages, pages, vmas, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); @@ -1936,7 +1921,7 @@ EXPORT_SYMBOL(get_user_pages); * * mmap_read_lock(mm); * do_something() - * get_user_pages(tsk, mm, ..., pages, NULL); + * get_user_pages(mm, ..., pages, NULL); * mmap_read_unlock(mm); * * to: @@ -1944,7 +1929,7 @@ EXPORT_SYMBOL(get_user_pages); * int locked = 1; * mmap_read_lock(mm); * do_something() - * get_user_pages_locked(tsk, mm, ..., pages, &locked); + * get_user_pages_locked(mm, ..., pages, &locked); * if (locked) * mmap_read_unlock(mm); * @@ -1982,7 +1967,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) return -EINVAL; - return __get_user_pages_locked(current, current->mm, start, nr_pages, + return __get_user_pages_locked(current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } @@ -1992,12 +1977,12 @@ EXPORT_SYMBOL(get_user_pages_locked); * get_user_pages_unlocked() is suitable to replace the form: * * mmap_read_lock(mm); - * get_user_pages(tsk, mm, ..., pages, NULL); + * get_user_pages(mm, ..., pages, NULL); * mmap_read_unlock(mm); * * with: * - * get_user_pages_unlocked(tsk, mm, ..., pages); + * get_user_pages_unlocked(mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags @@ -2020,7 +2005,7 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, return -EINVAL; mmap_read_lock(mm); - ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, + ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) mmap_read_unlock(mm); @@ -2665,7 +2650,7 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, */ if (gup_flags & FOLL_LONGTERM) { mmap_read_lock(current->mm); - ret = __gup_longterm_locked(current, current->mm, + ret = __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL, gup_flags); mmap_read_unlock(current->mm); @@ -2908,10 +2893,8 @@ int pin_user_pages_fast_only(unsigned long start, int nr_pages, EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); /** - * pin_user_pages_remote() - pin pages of a remote process (task != current) + * pin_user_pages_remote() - pin pages of a remote process * - * @tsk: the task_struct to use for page fault accounting, or - * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin @@ -2932,7 +2915,7 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. */ -long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) @@ -2942,7 +2925,7 @@ long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, return -EINVAL; gup_flags |= FOLL_PIN; - return __get_user_pages_remote(tsk, mm, start, nr_pages, gup_flags, + return __get_user_pages_remote(mm, start, nr_pages, gup_flags, pages, vmas, locked); } EXPORT_SYMBOL(pin_user_pages_remote); @@ -2974,7 +2957,7 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, return -EINVAL; gup_flags |= FOLL_PIN; - return __gup_longterm_locked(current, current->mm, start, nr_pages, + return __gup_longterm_locked(current->mm, start, nr_pages, pages, vmas, gup_flags); } EXPORT_SYMBOL(pin_user_pages); @@ -3019,7 +3002,7 @@ long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, return -EINVAL; gup_flags |= FOLL_PIN; - return __get_user_pages_locked(current, current->mm, start, nr_pages, + return __get_user_pages_locked(current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } diff --git a/mm/memory.c b/mm/memory.c index 2b7f0e00f312..228efaca75d3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4742,7 +4742,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, void *maddr; struct page *page = NULL; - ret = get_user_pages_remote(tsk, mm, addr, 1, + ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, &vma, NULL); if (ret <= 0) { #ifndef CONFIG_HAVE_IOREMAP_PROT diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index cc85ce81914a..29c052099aff 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -105,7 +105,7 @@ static int process_vm_rw_single_vec(unsigned long addr, * current/current->mm */ mmap_read_lock(mm); - pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages, + pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, NULL, &locked); if (locked) diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 53b3e1f5f227..dc4ecc0b2038 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -914,7 +914,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, * (represented by bprm). 'current' is the process doing * the execve(). */ - if (get_user_pages_remote(current, bprm->mm, pos, 1, + if (get_user_pages_remote(bprm->mm, pos, 1, FOLL_FORCE, &page, NULL, NULL) <= 0) return false; #else diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 390f758d5a27..dd777688d14a 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -61,7 +61,7 @@ static void async_pf_execute(struct work_struct *work) * access remotely. */ mmap_read_lock(mm); - get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL, + get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL, &locked); if (locked) mmap_read_unlock(mm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2c2c0254c2d8..737666db02de 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1893,7 +1893,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * not call the fault handler, so do it here. */ bool unlocked = false; - r = fixup_user_fault(current, current->mm, addr, + r = fixup_user_fault(current->mm, addr, (write_fault ? FAULT_FLAG_WRITE : 0), &unlocked); if (unlocked) -- cgit v1.2.3 From 466a62d7642f02f36d37d9b30c19a725538a01ca Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 11 Jun 2020 07:35:33 +0100 Subject: mfd: core: Make a best effort attempt to match devices with the correct of_nodes Currently, when a child platform device (sometimes referred to as a sub-device) is registered via the Multi-Functional Device (MFD) API, the framework attempts to match the newly registered platform device with its associated Device Tree (OF) node. Until now, the device has been allocated the first node found with an identical OF compatible string. Unfortunately, if there are, say for example '3' devices which are to be handled by the same driver and therefore have the same compatible string, each of them will be allocated a pointer to the *first* node. An example Device Tree entry might look like this: mfd_of_test { compatible = "mfd,of-test-parent"; #address-cells = <0x02>; #size-cells = <0x02>; child@aaaaaaaaaaaaaaaa { compatible = "mfd,of-test-child"; reg = <0xaaaaaaaa 0xaaaaaaaa 0 0x11>, <0xbbbbbbbb 0xbbbbbbbb 0 0x22>; }; child@cccccccc { compatible = "mfd,of-test-child"; reg = <0x00000000 0xcccccccc 0 0x33>; }; child@dddddddd00000000 { compatible = "mfd,of-test-child"; reg = <0xdddddddd 0x00000000 0 0x44>; }; }; When used with example sub-device registration like this: static const struct mfd_cell mfd_of_test_cell[] = { OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 0, "mfd,of-test-child"), OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 1, "mfd,of-test-child"), OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 2, "mfd,of-test-child") }; ... the current implementation will result in all devices being allocated the first OF node found containing a matching compatible string: [0.712511] mfd-of-test-child mfd-of-test-child.0: Probing platform device: 0 [0.712710] mfd-of-test-child mfd-of-test-child.0: Using OF node: child@aaaaaaaaaaaaaaaa [0.713033] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.713381] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@aaaaaaaaaaaaaaaa [0.713691] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.713889] mfd-of-test-child mfd-of-test-child.2: Using OF node: child@aaaaaaaaaaaaaaaa After this patch each device will be allocated a unique OF node: [0.712511] mfd-of-test-child mfd-of-test-child.0: Probing platform device: 0 [0.712710] mfd-of-test-child mfd-of-test-child.0: Using OF node: child@aaaaaaaaaaaaaaaa [0.713033] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.713381] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@cccccccc [0.713691] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.713889] mfd-of-test-child mfd-of-test-child.2: Using OF node: child@dddddddd00000000 Which is fine if all OF nodes are identical. However if we wish to apply an attribute to particular device, we really need to ensure the correct OF node will be associated with the device containing the correct address. We accomplish this by matching the device's address expressed in DT with one provided during sub-device registration. Like this: static const struct mfd_cell mfd_of_test_cell[] = { OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 1, "mfd,of-test-child", 0xdddddddd00000000), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 2, "mfd,of-test-child", 0xaaaaaaaaaaaaaaaa), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 3, "mfd,of-test-child", 0x00000000cccccccc) }; This will ensure a specific device (designated here using the platform_ids; 1, 2 and 3) is matched with a particular OF node: [0.712511] mfd-of-test-child mfd-of-test-child.0: Probing platform device: 0 [0.712710] mfd-of-test-child mfd-of-test-child.0: Using OF node: child@dddddddd00000000 [0.713033] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.713381] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@aaaaaaaaaaaaaaaa [0.713691] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.713889] mfd-of-test-child mfd-of-test-child.2: Using OF node: child@cccccccc This implementation is still not infallible, hence the mention of "best effort" in the commit subject. Since we have not *insisted* on the existence of 'reg' properties (in some scenarios they just do not make sense) and no device currently uses the new 'of_reg' attribute, we have to make an on-the-fly judgement call whether to associate the OF node anyway. Which we do in cases where parent drivers haven't specified a particular OF node to match to. So there is a *slight* possibility of the following result (note: the implementation here is convoluted, but it shows you one means by which this process can still break): /* * First entry will match to the first OF node with matching compatible * Second will fail, since the first took its OF node and is no longer available * Third will succeed */ static const struct mfd_cell mfd_of_test_cell[] = { OF_MFD_CELL("mfd-of-test-child", NULL, NULL, 0, 1, "mfd,of-test-child"), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 2, "mfd,of-test-child", 0xaaaaaaaaaaaaaaaa), OF_MFD_CELL_REG("mfd-of-test-child", NULL, NULL, 0, 3, "mfd,of-test-child", 0x00000000cccccccc) }; The result: [0.753869] mfd-of-test-parent mfd_of_test: Registering 3 devices [0.756597] mfd-of-test-child: Failed to locate of_node [id: 2] [0.759999] mfd-of-test-child mfd-of-test-child.1: Probing platform device: 1 [0.760314] mfd-of-test-child mfd-of-test-child.1: Using OF node: child@aaaaaaaaaaaaaaaa [0.760908] mfd-of-test-child mfd-of-test-child.2: Probing platform device: 2 [0.761183] mfd-of-test-child mfd-of-test-child.2: No OF node associated with this device [0.761621] mfd-of-test-child mfd-of-test-child.3: Probing platform device: 3 [0.761899] mfd-of-test-child mfd-of-test-child.3: Using OF node: child@cccccccc We could code around this with some pre-parsing semantics, but the added complexity required to cover each and every corner-case is not justified. Merely patching the current failing (via this patch) is already working with some pretty small corner-cases. Other issues should be patched in the parent drivers which can be achieved simply by implementing OF_MFD_CELL_REG(). Signed-off-by: Lee Jones --- drivers/mfd/mfd-core.c | 95 ++++++++++++++++++++++++++++++++++++++++++------ include/linux/mfd/core.h | 10 +++++ 2 files changed, 93 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 720e5c8b1588..b201842f82ad 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -17,8 +18,17 @@ #include #include #include +#include #include +static LIST_HEAD(mfd_of_node_list); + +struct mfd_of_node_entry { + struct list_head list; + struct device *dev; + struct device_node *np; +}; + static struct device_type mfd_dev_type = { .name = "mfd_device", }; @@ -107,6 +117,55 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell, } #endif +static int mfd_match_of_node_to_dev(struct platform_device *pdev, + struct device_node *np, + const struct mfd_cell *cell) +{ +#if IS_ENABLED(CONFIG_OF) + struct mfd_of_node_entry *of_entry; + const __be32 *reg; + u64 of_node_addr; + + /* Skip devices 'disabled' by Device Tree */ + if (!of_device_is_available(np)) + return -ENODEV; + + /* Skip if OF node has previously been allocated to a device */ + list_for_each_entry(of_entry, &mfd_of_node_list, list) + if (of_entry->np == np) + return -EAGAIN; + + if (!cell->use_of_reg) + /* No of_reg defined - allocate first free compatible match */ + goto allocate_of_node; + + /* We only care about each node's first defined address */ + reg = of_get_address(np, 0, NULL, NULL); + if (!reg) + /* OF node does not contatin a 'reg' property to match to */ + return -EAGAIN; + + of_node_addr = of_read_number(reg, of_n_addr_cells(np)); + + if (cell->of_reg != of_node_addr) + /* No match */ + return -EAGAIN; + +allocate_of_node: + of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL); + if (!of_entry) + return -ENOMEM; + + of_entry->dev = &pdev->dev; + of_entry->np = np; + list_add_tail(&of_entry->list, &mfd_of_node_list); + + pdev->dev.of_node = np; + pdev->dev.fwnode = &np->fwnode; +#endif + return 0; +} + static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, @@ -115,6 +174,7 @@ static int mfd_add_device(struct device *parent, int id, struct resource *res; struct platform_device *pdev; struct device_node *np = NULL; + struct mfd_of_node_entry *of_entry, *tmp; int ret = -ENOMEM; int platform_id; int r; @@ -149,19 +209,22 @@ static int mfd_add_device(struct device *parent, int id, if (ret < 0) goto fail_res; - if (parent->of_node && cell->of_compatible) { + if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) { for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { - if (!of_device_is_available(np)) { - /* Ignore disabled devices error free */ - ret = 0; + ret = mfd_match_of_node_to_dev(pdev, np, cell); + if (ret == -EAGAIN) + continue; + if (ret) goto fail_alias; - } - pdev->dev.of_node = np; - pdev->dev.fwnode = &np->fwnode; + break; } } + + if (!pdev->dev.of_node) + pr_warn("%s: Failed to locate of_node [id: %d]\n", + cell->name, platform_id); } mfd_acpi_add_device(cell, pdev); @@ -170,13 +233,13 @@ static int mfd_add_device(struct device *parent, int id, ret = platform_device_add_data(pdev, cell->platform_data, cell->pdata_size); if (ret) - goto fail_alias; + goto fail_of_entry; } if (cell->properties) { ret = platform_device_add_properties(pdev, cell->properties); if (ret) - goto fail_alias; + goto fail_of_entry; } for (r = 0; r < cell->num_resources; r++) { @@ -213,18 +276,18 @@ static int mfd_add_device(struct device *parent, int id, if (has_acpi_companion(&pdev->dev)) { ret = acpi_check_resource_conflict(&res[r]); if (ret) - goto fail_alias; + goto fail_of_entry; } } } ret = platform_device_add_resources(pdev, res, cell->num_resources); if (ret) - goto fail_alias; + goto fail_of_entry; ret = platform_device_add(pdev); if (ret) - goto fail_alias; + goto fail_of_entry; if (cell->pm_runtime_no_callbacks) pm_runtime_no_callbacks(&pdev->dev); @@ -233,6 +296,12 @@ static int mfd_add_device(struct device *parent, int id, return 0; +fail_of_entry: + list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list) + if (of_entry->dev == &pdev->dev) { + list_del(&of_entry->list); + kfree(of_entry); + } fail_alias: regulator_bulk_unregister_supply_alias(&pdev->dev, cell->parent_supplies, @@ -297,6 +366,8 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); + kfree(cell); + platform_device_unregister(pdev); return 0; } diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index ab76cdd06199..c437a73b43a3 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -78,6 +78,16 @@ struct mfd_cell { */ const char *of_compatible; + /* + * Address as defined in Device Tree. Used to compement 'of_compatible' + * (above) when matching OF nodes with devices that have identical + * compatible strings + */ + const u64 of_reg; + + /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */ + bool use_of_reg; + /* Matches ACPI */ const struct mfd_cell_acpi_match *acpi_match; -- cgit v1.2.3 From d097965bb6682afe1f8481923b16c033f708923b Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 11 Jun 2020 10:18:43 +0100 Subject: mfd: core: Fix formatting of MFD helpers Remove unnecessary '\'s and leading tabs. This will help to clean-up future diffs when subsequent changes are made. Hint: The aforementioned changes follow this patch. Signed-off-by: Lee Jones --- include/linux/mfd/core.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index c437a73b43a3..da9b066e1594 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -26,20 +26,20 @@ .id = (_id), \ } -#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \ +#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) -#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \ +#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) -#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \ +#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) -#define MFD_CELL_RES(_name, _res) \ - MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \ +#define MFD_CELL_RES(_name, _res) \ + MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) -#define MFD_CELL_NAME(_name) \ - MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \ +#define MFD_CELL_NAME(_name) \ + MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) struct irq_domain; struct property_entry; -- cgit v1.2.3 From 44e6171ed04a0cd0378e2503f03a444ebdd4e8e3 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Thu, 11 Jun 2020 10:12:05 +0100 Subject: mfd: core: Add OF_MFD_CELL_REG() helper Extend current list of helpers to provide support for parent drivers wishing to match specific child devices to particular OF nodes. Signed-off-by: Lee Jones --- include/linux/mfd/core.h | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index da9b066e1594..6d68f44a26a1 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -14,7 +14,7 @@ #define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource)) -#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\ +#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \ { \ .name = (_name), \ .resources = (_res), \ @@ -22,24 +22,29 @@ .platform_data = (_pdata), \ .pdata_size = (_pdsize), \ .of_compatible = (_compat), \ + .of_reg = (_of_reg), \ + .use_of_reg = (_use_of_reg), \ .acpi_match = (_match), \ .id = (_id), \ } +#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL) + #define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL) #define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match) #define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL) #define MFD_CELL_RES(_name, _res) \ - MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) + MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL) #define MFD_CELL_NAME(_name) \ - MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) + MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL) struct irq_domain; struct property_entry; -- cgit v1.2.3 From 7d2594cd1fa0b03b2746ce811926ee150a3a14fa Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 1 Jul 2020 23:23:48 +0200 Subject: mfd: smsc-ece1099: Remove driver This MFD driver has no user. The keypad driver of this device never made it into the kernel. Therefore, this driver is useless. Remove it. Signed-off-by: Michael Walle Cc: Sourav Poddar Signed-off-by: Lee Jones --- Documentation/driver-api/index.rst | 1 - Documentation/driver-api/smsc_ece1099.rst | 60 ----------------- drivers/mfd/Kconfig | 12 ---- drivers/mfd/Makefile | 1 - drivers/mfd/smsc-ece1099.c | 87 ------------------------- include/linux/mfd/smsc.h | 104 ------------------------------ 6 files changed, 265 deletions(-) delete mode 100644 Documentation/driver-api/smsc_ece1099.rst delete mode 100644 drivers/mfd/smsc-ece1099.c delete mode 100644 include/linux/mfd/smsc.h (limited to 'include/linux') diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 6567187e7687..1397a30188eb 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -98,7 +98,6 @@ available subsections can be seen below. rfkill serial/index sm501 - smsc_ece1099 switchtec sync_file vfio-mediated-device diff --git a/Documentation/driver-api/smsc_ece1099.rst b/Documentation/driver-api/smsc_ece1099.rst deleted file mode 100644 index 079277421eaf..000000000000 --- a/Documentation/driver-api/smsc_ece1099.rst +++ /dev/null @@ -1,60 +0,0 @@ -================================================= -Msc Keyboard Scan Expansion/GPIO Expansion device -================================================= - -What is smsc-ece1099? ----------------------- - -The ECE1099 is a 40-Pin 3.3V Keyboard Scan Expansion -or GPIO Expansion device. The device supports a keyboard -scan matrix of 23x8. The device is connected to a Master -via the SMSC BC-Link interface or via the SMBus. -Keypad scan Input(KSI) and Keypad Scan Output(KSO) signals -are multiplexed with GPIOs. - -Interrupt generation --------------------- - -Interrupts can be generated by an edge detection on a GPIO -pin or an edge detection on one of the bus interface pins. -Interrupts can also be detected on the keyboard scan interface. -The bus interrupt pin (BC_INT# or SMBUS_INT#) is asserted if -any bit in one of the Interrupt Status registers is 1 and -the corresponding Interrupt Mask bit is also 1. - -In order for software to determine which device is the source -of an interrupt, it should first read the Group Interrupt Status Register -to determine which Status register group is a source for the interrupt. -Software should read both the Status register and the associated Mask register, -then AND the two values together. Bits that are 1 in the result of the AND -are active interrupts. Software clears an interrupt by writing a 1 to the -corresponding bit in the Status register. - -Communication Protocol ----------------------- - -- SMbus slave Interface - The host processor communicates with the ECE1099 device - through a series of read/write registers via the SMBus - interface. SMBus is a serial communication protocol between - a computer host and its peripheral devices. The SMBus data - rate is 10KHz minimum to 400 KHz maximum - -- Slave Bus Interface - The ECE1099 device SMBus implementation is a subset of the - SMBus interface to the host. The device is a slave-only SMBus device. - The implementation in the device is a subset of SMBus since it - only supports four protocols. - - The Write Byte, Read Byte, Send Byte, and Receive Byte protocols are the - only valid SMBus protocols for the device. - -- BC-LinkTM Interface - The BC-Link is a proprietary bus that allows communication - between a Master device and a Companion device. The Master - device uses this serial bus to read and write registers - located on the Companion device. The bus comprises three signals, - BC_CLK, BC_DAT and BC_INT#. The Master device always provides the - clock, BC_CLK, and the Companion device is the source for an - independent asynchronous interrupt signal, BC_INT#. The ECE1099 - supports BC-Link speeds up to 24MHz. diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index d13bb0abfd6f..33df0837ab41 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1193,18 +1193,6 @@ config MFD_SKY81452 This driver can also be built as a module. If so, the module will be called sky81452. -config MFD_SMSC - bool "SMSC ECE1099 series chips" - depends on I2C=y - select MFD_CORE - select REGMAP_I2C - help - If you say yes here you get support for the - ece1099 chips from SMSC. - - To compile this driver as a module, choose M here: the - module will be called smsc. - config MFD_SC27XX_PMIC tristate "Spreadtrum SC27xx PMICs" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 1c8d6be3347d..a60e5f835283 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -127,7 +127,6 @@ obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o obj-$(CONFIG_MCP) += mcp-core.o obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o -obj-$(CONFIG_MFD_SMSC) += smsc-ece1099.o obj-$(CONFIG_MCP_UCB1200_TS) += ucb1x00-ts.o ifeq ($(CONFIG_SA1100_ASSABET),y) diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c deleted file mode 100644 index 57b792eb58fd..000000000000 --- a/drivers/mfd/smsc-ece1099.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * TI SMSC MFD Driver - * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com - * - * Author: Sourav Poddar - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; GPL v2. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static const struct regmap_config smsc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - .max_register = SMSC_VEN_ID_H, - .cache_type = REGCACHE_RBTREE, -}; - -static int smsc_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) -{ - struct smsc *smsc; - int devid, rev, venid_l, venid_h; - int ret; - - smsc = devm_kzalloc(&i2c->dev, sizeof(*smsc), GFP_KERNEL); - if (!smsc) - return -ENOMEM; - - smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config); - if (IS_ERR(smsc->regmap)) - return PTR_ERR(smsc->regmap); - - i2c_set_clientdata(i2c, smsc); - smsc->dev = &i2c->dev; - -#ifdef CONFIG_OF - of_property_read_u32(i2c->dev.of_node, "clock", &smsc->clk); -#endif - - regmap_read(smsc->regmap, SMSC_DEV_ID, &devid); - regmap_read(smsc->regmap, SMSC_DEV_REV, &rev); - regmap_read(smsc->regmap, SMSC_VEN_ID_L, &venid_l); - regmap_read(smsc->regmap, SMSC_VEN_ID_H, &venid_h); - - dev_info(&i2c->dev, "SMSCxxx devid: %02x rev: %02x venid: %02x\n", - devid, rev, (venid_h << 8) | venid_l); - - ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk); - if (ret) - return ret; - -#ifdef CONFIG_OF - if (i2c->dev.of_node) - ret = devm_of_platform_populate(&i2c->dev); -#endif - - return ret; -} - -static const struct i2c_device_id smsc_i2c_id[] = { - { "smscece1099", 0}, - {}, -}; - -static struct i2c_driver smsc_i2c_driver = { - .driver = { - .name = "smsc", - }, - .probe = smsc_i2c_probe, - .id_table = smsc_i2c_id, -}; -builtin_i2c_driver(smsc_i2c_driver); diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h deleted file mode 100644 index 83944124e886..000000000000 --- a/include/linux/mfd/smsc.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * SMSC ECE1099 - * - * Copyright 2012 Texas Instruments Inc. - * - * Author: Sourav Poddar - */ - -#ifndef __LINUX_MFD_SMSC_H -#define __LINUX_MFD_SMSC_H - -#include - -#define SMSC_ID_ECE1099 1 -#define SMSC_NUM_CLIENTS 2 - -#define SMSC_BASE_ADDR 0x38 -#define OMAP_GPIO_SMSC_IRQ 151 - -#define SMSC_MAXGPIO 32 -#define SMSC_BANK(offs) ((offs) >> 3) -#define SMSC_BIT(offs) (1u << ((offs) & 0x7)) - -struct smsc { - struct device *dev; - struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS]; - struct regmap *regmap; - int clk; - /* Stored chip id */ - int id; -}; - -struct smsc_gpio; -struct smsc_keypad; - -static inline int smsc_read(struct device *child, unsigned int reg, - unsigned int *dest) -{ - struct smsc *smsc = dev_get_drvdata(child->parent); - - return regmap_read(smsc->regmap, reg, dest); -} - -static inline int smsc_write(struct device *child, unsigned int reg, - unsigned int value) -{ - struct smsc *smsc = dev_get_drvdata(child->parent); - - return regmap_write(smsc->regmap, reg, value); -} - -/* Registers for SMSC */ -#define SMSC_RESET 0xF5 -#define SMSC_GRP_INT 0xF9 -#define SMSC_CLK_CTRL 0xFA -#define SMSC_WKUP_CTRL 0xFB -#define SMSC_DEV_ID 0xFC -#define SMSC_DEV_REV 0xFD -#define SMSC_VEN_ID_L 0xFE -#define SMSC_VEN_ID_H 0xFF - -/* CLK VALUE */ -#define SMSC_CLK_VALUE 0x13 - -/* Registers for function GPIO INPUT */ -#define SMSC_GPIO_DATA_IN_START 0x00 - -/* Registers for function GPIO OUPUT */ -#define SMSC_GPIO_DATA_OUT_START 0x05 - -/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/ -#define SMSC_GPIO_INPUT_LOW 0x01 -#define SMSC_GPIO_INPUT_RISING 0x09 -#define SMSC_GPIO_INPUT_FALLING 0x11 -#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19 -#define SMSC_GPIO_OUTPUT_PP 0x21 -#define SMSC_GPIO_OUTPUT_OP 0x31 - -#define GRP_INT_STAT 0xf9 -#define SMSC_GPI_INT 0x0f -#define SMSC_CFG_START 0x0A - -/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/ -#define SMSC_GPIO_INT_STAT_START 0x32 - -/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/ -#define SMSC_GPIO_INT_MASK_START 0x37 - -/* Registers for SMSC function KEYPAD*/ -#define SMSC_KP_OUT 0x40 -#define SMSC_KP_IN 0x41 -#define SMSC_KP_INT_STAT 0x42 -#define SMSC_KP_INT_MASK 0x43 - -/* Definitions for keypad */ -#define SMSC_KP_KSO 0x70 -#define SMSC_KP_KSI 0x51 -#define SMSC_KSO_ALL_LOW 0x20 -#define SMSC_KP_SET_LOW_PWR 0x0B -#define SMSC_KP_SET_HIGH 0xFF -#define SMSC_KSO_EVAL 0x00 - -#endif /* __LINUX_MFD_SMSC_H */ -- cgit v1.2.3 From 091c6110862bce4e2380e353cb062dcb6a56bcb6 Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Mon, 13 Jul 2020 10:38:57 +0100 Subject: mfd: da9063: Fix revision handling to correctly select reg tables The current implementation performs checking in the i2c_probe() function of the variant_code but does this immediately after the containing struct has been initialised as all zero. This means the check for variant code will always default to using the BB tables and will never select AD. The variant code is subsequently set by device_init() and later used by the RTC so really it's a little fortunate this mismatch works. This update adds raw I2C read access functionality to read the chip and variant/revision information (common to all revisions) so that it can subsequently correctly choose the proper regmap tables for real initialisation. Signed-off-by: Adam Thomson Signed-off-by: Lee Jones --- drivers/mfd/da9063-core.c | 31 ------ drivers/mfd/da9063-i2c.c | 184 +++++++++++++++++++++++++++++++---- include/linux/mfd/da9063/registers.h | 15 ++- 3 files changed, 177 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c index b125f90dd375..a353d52210a9 100644 --- a/drivers/mfd/da9063-core.c +++ b/drivers/mfd/da9063-core.c @@ -160,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063) int da9063_device_init(struct da9063 *da9063, unsigned int irq) { - int model, variant_id, variant_code; int ret; ret = da9063_clear_fault_log(da9063); @@ -171,36 +170,6 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq) da9063->irq_base = -1; da9063->chip_irq = irq; - ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model); - if (ret < 0) { - dev_err(da9063->dev, "Cannot read chip model id.\n"); - return -EIO; - } - if (model != PMIC_CHIP_ID_DA9063) { - dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model); - return -ENODEV; - } - - ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &variant_id); - if (ret < 0) { - dev_err(da9063->dev, "Cannot read chip variant id.\n"); - return -EIO; - } - - variant_code = variant_id >> DA9063_CHIP_VARIANT_SHIFT; - - dev_info(da9063->dev, - "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", - model, variant_id); - - if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) { - dev_err(da9063->dev, - "Cannot support variant code: 0x%02X\n", variant_code); - return -ENODEV; - } - - da9063->variant_code = variant_code; - ret = da9063_irq_init(da9063); if (ret) { dev_err(da9063->dev, "Cannot initialize interrupts.\n"); diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 455de74c0dd2..481548948bc0 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -22,12 +22,124 @@ #include #include +/* + * Raw I2C access required for just accessing chip and variant info before we + * know which device is present. The info read from the device using this + * approach is then used to select the correct regmap tables. + */ + +#define DA9063_REG_PAGE_SIZE 0x100 +#define DA9063_REG_PAGED_ADDR_MASK 0xFF + +enum da9063_page_sel_buf_fmt { + DA9063_PAGE_SEL_BUF_PAGE_REG = 0, + DA9063_PAGE_SEL_BUF_PAGE_VAL, + DA9063_PAGE_SEL_BUF_SIZE, +}; + +enum da9063_paged_read_msgs { + DA9063_PAGED_READ_MSG_PAGE_SEL = 0, + DA9063_PAGED_READ_MSG_REG_SEL, + DA9063_PAGED_READ_MSG_DATA, + DA9063_PAGED_READ_MSG_CNT, +}; + +static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr, + u8 *buf, int count) +{ + struct i2c_msg xfer[DA9063_PAGED_READ_MSG_CNT]; + u8 page_sel_buf[DA9063_PAGE_SEL_BUF_SIZE]; + u8 page_num, paged_addr; + int ret; + + /* Determine page info based on register address */ + page_num = (addr / DA9063_REG_PAGE_SIZE); + if (page_num > 1) { + dev_err(&client->dev, "Invalid register address provided\n"); + return -EINVAL; + } + + paged_addr = (addr % DA9063_REG_PAGE_SIZE) & DA9063_REG_PAGED_ADDR_MASK; + page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_REG] = DA9063_REG_PAGE_CON; + page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_VAL] = + (page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK; + + /* Write reg address, page selection */ + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf; + + /* Select register address */ + xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_REG_SEL].flags = 0; + xfer[DA9063_PAGED_READ_MSG_REG_SEL].len = sizeof(paged_addr); + xfer[DA9063_PAGED_READ_MSG_REG_SEL].buf = &paged_addr; + + /* Read data */ + xfer[DA9063_PAGED_READ_MSG_DATA].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_DATA].flags = I2C_M_RD; + xfer[DA9063_PAGED_READ_MSG_DATA].len = count; + xfer[DA9063_PAGED_READ_MSG_DATA].buf = buf; + + ret = i2c_transfer(client->adapter, xfer, DA9063_PAGED_READ_MSG_CNT); + if (ret < 0) { + dev_err(&client->dev, "Paged block read failed: %d\n", ret); + return ret; + } + + if (ret != DA9063_PAGED_READ_MSG_CNT) { + dev_err(&client->dev, "Paged block read failed to complete\n"); + return -EIO; + } + + return 0; +} + +enum { + DA9063_DEV_ID_REG = 0, + DA9063_VAR_ID_REG, + DA9063_CHIP_ID_REGS, +}; + +static int da9063_get_device_type(struct i2c_client *i2c, struct da9063 *da9063) +{ + u8 buf[DA9063_CHIP_ID_REGS]; + int ret; + + ret = da9063_i2c_blockreg_read(i2c, DA9063_REG_DEVICE_ID, buf, + DA9063_CHIP_ID_REGS); + if (ret) + return ret; + + if (buf[DA9063_DEV_ID_REG] != PMIC_CHIP_ID_DA9063) { + dev_err(da9063->dev, + "Invalid chip device ID: 0x%02x\n", + buf[DA9063_DEV_ID_REG]); + return -ENODEV; + } + + dev_info(da9063->dev, + "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", + buf[DA9063_DEV_ID_REG], buf[DA9063_VAR_ID_REG]); + + da9063->variant_code = + (buf[DA9063_VAR_ID_REG] & DA9063_VARIANT_ID_MRC_MASK) + >> DA9063_VARIANT_ID_MRC_SHIFT; + + return 0; +} + +/* + * Variant specific regmap configs + */ + static const struct regmap_range da9063_ad_readable_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_AD_REG_SECOND_D), regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_AD_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063_ad_writeable_ranges[] = { @@ -72,7 +184,7 @@ static const struct regmap_range da9063_bb_readable_ranges[] = { regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063_bb_writeable_ranges[] = { @@ -117,7 +229,7 @@ static const struct regmap_range da9063l_bb_readable_ranges[] = { regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063l_bb_writeable_ranges[] = { @@ -159,7 +271,7 @@ static const struct regmap_access_table da9063l_bb_volatile_table = { static const struct regmap_range_cfg da9063_range_cfg[] = { { .range_min = DA9063_REG_PAGE_CON, - .range_max = DA9063_REG_CHIP_VARIANT, + .range_max = DA9063_REG_CONFIG_ID, .selector_reg = DA9063_REG_PAGE_CON, .selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT, .selector_shift = DA9063_I2C_PAGE_SEL_SHIFT, @@ -173,7 +285,7 @@ static struct regmap_config da9063_regmap_config = { .val_bits = 8, .ranges = da9063_range_cfg, .num_ranges = ARRAY_SIZE(da9063_range_cfg), - .max_register = DA9063_REG_CHIP_VARIANT, + .max_register = DA9063_REG_CONFIG_ID, .cache_type = REGCACHE_RBTREE, }; @@ -199,18 +311,56 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063->chip_irq = i2c->irq; da9063->type = id->driver_data; - if (da9063->variant_code == PMIC_DA9063_AD) { - da9063_regmap_config.rd_table = &da9063_ad_readable_table; - da9063_regmap_config.wr_table = &da9063_ad_writeable_table; - da9063_regmap_config.volatile_table = &da9063_ad_volatile_table; - } else if (da9063->type == PMIC_TYPE_DA9063L) { - da9063_regmap_config.rd_table = &da9063l_bb_readable_table; - da9063_regmap_config.wr_table = &da9063l_bb_writeable_table; - da9063_regmap_config.volatile_table = &da9063l_bb_volatile_table; - } else { - da9063_regmap_config.rd_table = &da9063_bb_readable_table; - da9063_regmap_config.wr_table = &da9063_bb_writeable_table; - da9063_regmap_config.volatile_table = &da9063_bb_volatile_table; + ret = da9063_get_device_type(i2c, da9063); + if (ret) + return ret; + + switch (da9063->type) { + case PMIC_TYPE_DA9063: + switch (da9063->variant_code) { + case PMIC_DA9063_AD: + da9063_regmap_config.rd_table = + &da9063_ad_readable_table; + da9063_regmap_config.wr_table = + &da9063_ad_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_ad_volatile_table; + break; + case PMIC_DA9063_BB: + case PMIC_DA9063_CA: + da9063_regmap_config.rd_table = + &da9063_bb_readable_table; + da9063_regmap_config.wr_table = + &da9063_bb_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_bb_volatile_table; + break; + default: + dev_err(da9063->dev, + "Chip variant not supported for DA9063\n"); + return -ENODEV; + } + break; + case PMIC_TYPE_DA9063L: + switch (da9063->variant_code) { + case PMIC_DA9063_BB: + case PMIC_DA9063_CA: + da9063_regmap_config.rd_table = + &da9063l_bb_readable_table; + da9063_regmap_config.wr_table = + &da9063l_bb_writeable_table; + da9063_regmap_config.volatile_table = + &da9063l_bb_volatile_table; + break; + default: + dev_err(da9063->dev, + "Chip variant not supported for DA9063L\n"); + return -ENODEV; + } + break; + default: + dev_err(da9063->dev, "Chip type not supported\n"); + return -ENODEV; } da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config); diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h index ba706b0e28c2..1dbabf1b3cb8 100644 --- a/include/linux/mfd/da9063/registers.h +++ b/include/linux/mfd/da9063/registers.h @@ -292,8 +292,10 @@ #define DA9063_BB_REG_GP_ID_19 0x134 /* Chip ID and variant */ -#define DA9063_REG_CHIP_ID 0x181 -#define DA9063_REG_CHIP_VARIANT 0x182 +#define DA9063_REG_DEVICE_ID 0x181 +#define DA9063_REG_VARIANT_ID 0x182 +#define DA9063_REG_CUSTOMER_ID 0x183 +#define DA9063_REG_CONFIG_ID 0x184 /* * PMIC registers bits @@ -929,9 +931,6 @@ #define DA9063_RTC_CLOCK 0x40 #define DA9063_OUT_32K_EN 0x80 -/* DA9063_REG_CHIP_VARIANT */ -#define DA9063_CHIP_VARIANT_SHIFT 4 - /* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */ #define DA9063_BIO_ILIM_MASK 0x0F #define DA9063_BMEM_ILIM_MASK 0xF0 @@ -1065,4 +1064,10 @@ #define DA9063_MON_A10_IDX_LDO9 0x04 #define DA9063_MON_A10_IDX_LDO10 0x05 +/* DA9063_REG_VARIANT_ID (addr=0x182) */ +#define DA9063_VARIANT_ID_VRC_SHIFT 0 +#define DA9063_VARIANT_ID_VRC_MASK 0x0F +#define DA9063_VARIANT_ID_MRC_SHIFT 4 +#define DA9063_VARIANT_ID_MRC_MASK 0xF0 + #endif /* _DA9063_REG_H */ -- cgit v1.2.3 From 9ece3601aed46f7b460b79cd7d60908b47b2b0d4 Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Mon, 13 Jul 2020 10:38:59 +0100 Subject: mfd: da9063: Add support for latest DA silicon revision This update adds new regmap tables to support the latest DA silicon which will automatically be selected based on the chip and variant information read from the device. Signed-off-by: Adam Thomson Signed-off-by: Lee Jones --- drivers/mfd/da9063-i2c.c | 91 ++++++++++++++++++++++++++++++++++++----- include/linux/mfd/da9063/core.h | 1 + 2 files changed, 82 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 481548948bc0..b8217ad303ce 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -197,7 +197,7 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = { regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19), }; -static const struct regmap_range da9063_bb_volatile_ranges[] = { +static const struct regmap_range da9063_bb_da_volatile_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D), regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B), regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F), @@ -219,9 +219,9 @@ static const struct regmap_access_table da9063_bb_writeable_table = { .n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges), }; -static const struct regmap_access_table da9063_bb_volatile_table = { - .yes_ranges = da9063_bb_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges), +static const struct regmap_access_table da9063_bb_da_volatile_table = { + .yes_ranges = da9063_bb_da_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_bb_da_volatile_ranges), }; static const struct regmap_range da9063l_bb_readable_ranges[] = { @@ -241,7 +241,7 @@ static const struct regmap_range da9063l_bb_writeable_ranges[] = { regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19), }; -static const struct regmap_range da9063l_bb_volatile_ranges[] = { +static const struct regmap_range da9063l_bb_da_volatile_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D), regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B), regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F), @@ -263,9 +263,64 @@ static const struct regmap_access_table da9063l_bb_writeable_table = { .n_yes_ranges = ARRAY_SIZE(da9063l_bb_writeable_ranges), }; -static const struct regmap_access_table da9063l_bb_volatile_table = { - .yes_ranges = da9063l_bb_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063l_bb_volatile_ranges), +static const struct regmap_access_table da9063l_bb_da_volatile_table = { + .yes_ranges = da9063l_bb_da_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_bb_da_volatile_ranges), +}; + +static const struct regmap_range da9063_da_readable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_BB_REG_SECOND_D), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), +}; + +static const struct regmap_range da9063_da_writeable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON), + regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON), + regmap_reg_range(DA9063_REG_COUNT_S, DA9063_BB_REG_ALARM_Y), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4), + regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11), +}; + +static const struct regmap_access_table da9063_da_readable_table = { + .yes_ranges = da9063_da_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_da_readable_ranges), +}; + +static const struct regmap_access_table da9063_da_writeable_table = { + .yes_ranges = da9063_da_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_da_writeable_ranges), +}; + +static const struct regmap_range da9063l_da_readable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_MON_A10_RES), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), +}; + +static const struct regmap_range da9063l_da_writeable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON), + regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4), + regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11), +}; + +static const struct regmap_access_table da9063l_da_readable_table = { + .yes_ranges = da9063l_da_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_da_readable_ranges), +}; + +static const struct regmap_access_table da9063l_da_writeable_table = { + .yes_ranges = da9063l_da_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_da_writeable_ranges), }; static const struct regmap_range_cfg da9063_range_cfg[] = { @@ -333,7 +388,15 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063_regmap_config.wr_table = &da9063_bb_writeable_table; da9063_regmap_config.volatile_table = - &da9063_bb_volatile_table; + &da9063_bb_da_volatile_table; + break; + case PMIC_DA9063_DA: + da9063_regmap_config.rd_table = + &da9063_da_readable_table; + da9063_regmap_config.wr_table = + &da9063_da_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_bb_da_volatile_table; break; default: dev_err(da9063->dev, @@ -350,7 +413,15 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063_regmap_config.wr_table = &da9063l_bb_writeable_table; da9063_regmap_config.volatile_table = - &da9063l_bb_volatile_table; + &da9063l_bb_da_volatile_table; + break; + case PMIC_DA9063_DA: + da9063_regmap_config.rd_table = + &da9063l_da_readable_table; + da9063_regmap_config.wr_table = + &da9063l_da_writeable_table; + da9063_regmap_config.volatile_table = + &da9063l_bb_da_volatile_table; break; default: dev_err(da9063->dev, diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index 5cd06ab26352..fa7a43f02f27 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -35,6 +35,7 @@ enum da9063_variant_codes { PMIC_DA9063_AD = 0x3, PMIC_DA9063_BB = 0x5, PMIC_DA9063_CA = 0x6, + PMIC_DA9063_DA = 0x7, }; /* Interrupts */ -- cgit v1.2.3 From 23ef2b642b85f03a109fbf5958134a5e40193dbd Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:17 -0700 Subject: mfd: da9055: pdata.h: Drop a duplicated word Drop the repeated word "that" in a comment. Signed-off-by: Randy Dunlap Acked-by: Adam Thomson Signed-off-by: Lee Jones --- include/linux/mfd/da9055/pdata.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h index eac48e483190..d3f126990ad0 100644 --- a/include/linux/mfd/da9055/pdata.h +++ b/include/linux/mfd/da9055/pdata.h @@ -35,7 +35,7 @@ struct da9055_pdata { int *gpio_rsel; /* * Regulator mode control bits value (GPI offset) that - * that controls the regulator state, 0 if not available. + * controls the regulator state, 0 if not available. */ enum gpio_select *reg_ren; /* -- cgit v1.2.3 From e7b85500885f2a70129f5d3a72153e23b37d0fe5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 18 Jul 2020 17:29:31 -0700 Subject: mfd: max77693-private: Drop a duplicated word Drop the repeated word "in" in a comment. Signed-off-by: Randy Dunlap Signed-off-by: Lee Jones --- include/linux/mfd/max77693-private.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index e798c81aec31..311f7d3d2323 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -131,7 +131,7 @@ enum max77693_pmic_reg { #define FLASH_INT_FLED1_SHORT BIT(3) #define FLASH_INT_OVER_CURRENT BIT(4) -/* Fast charge timer in in hours */ +/* Fast charge timer in hours */ #define DEFAULT_FAST_CHARGE_TIMER 4 /* microamps */ #define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000 -- cgit v1.2.3 From 114294d276279d6cda81f9c685452239ea89cdb8 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Thu, 23 Jul 2020 11:54:58 +0100 Subject: mfd: mfd-core: Add mechanism for removal of a subset of children Currently, the only way to remove MFD children is with a call to mfd_remove_devices, which will remove all the children. Under some circumstances it is useful to remove only a subset of the child devices. For example if some additional clean up is required between removal of certain child devices. To accomplish this a level field is added to mfd_cell, the normal mfd_remove_devices is modified to not remove devices that are set to a higher level and a corresponding mfd_remove_devices_late function is added to remove those children. See further discussion at: https://lore.kernel.org/lkml/20200616075834.GF2608702@dell/ Suggested-by: Lee Jones Signed-off-by: Charles Keepax Signed-off-by: Lee Jones --- drivers/mfd/mfd-core.c | 16 +++++++++++++++- include/linux/mfd/core.h | 5 +++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index b201842f82ad..c3651f06684f 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -356,6 +356,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) { struct platform_device *pdev; const struct mfd_cell *cell; + int *level = data; if (dev->type != &mfd_dev_type) return 0; @@ -363,6 +364,9 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) pdev = to_platform_device(dev); cell = mfd_get_cell(pdev); + if (level && cell->level > *level) + return 0; + regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); @@ -372,9 +376,19 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) return 0; } +void mfd_remove_devices_late(struct device *parent) +{ + int level = MFD_DEP_LEVEL_HIGH; + + device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); +} +EXPORT_SYMBOL(mfd_remove_devices_late); + void mfd_remove_devices(struct device *parent) { - device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn); + int level = MFD_DEP_LEVEL_NORMAL; + + device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); } EXPORT_SYMBOL(mfd_remove_devices); diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 6d68f44a26a1..4b35baa14d30 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -46,6 +46,9 @@ #define MFD_CELL_NAME(_name) \ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL) +#define MFD_DEP_LEVEL_NORMAL 0 +#define MFD_DEP_LEVEL_HIGH 1 + struct irq_domain; struct property_entry; @@ -63,6 +66,7 @@ struct mfd_cell_acpi_match { struct mfd_cell { const char *name; int id; + int level; int (*enable)(struct platform_device *dev); int (*disable)(struct platform_device *dev); @@ -150,6 +154,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent, } extern void mfd_remove_devices(struct device *parent); +extern void mfd_remove_devices_late(struct device *parent); extern int devm_mfd_add_devices(struct device *dev, int id, const struct mfd_cell *cells, int n_devs, -- cgit v1.2.3 From 4f4ed4543e2096dc0158ff79bd6d8bc09e27fa93 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Wed, 22 Jul 2020 21:24:54 +0200 Subject: mfd: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Acked-by: Rob Herring Signed-off-by: Lee Jones --- Documentation/devicetree/bindings/mfd/twl-family.txt | 2 +- drivers/mfd/hi6421-pmic-core.c | 2 +- drivers/mfd/lp873x.c | 2 +- drivers/mfd/lp87565.c | 2 +- drivers/mfd/omap-usb-host.c | 2 +- drivers/mfd/omap-usb-tll.c | 2 +- drivers/mfd/ti_am335x_tscadc.c | 2 +- drivers/mfd/tps65086.c | 2 +- drivers/mfd/tps65217.c | 2 +- drivers/mfd/tps65218.c | 2 +- drivers/mfd/tps65912-core.c | 2 +- drivers/mfd/tps65912-i2c.c | 2 +- drivers/mfd/tps65912-spi.c | 2 +- include/linux/mfd/hi6421-pmic.h | 2 +- include/linux/mfd/lp873x.h | 2 +- include/linux/mfd/lp87565.h | 2 +- include/linux/mfd/ti_am335x_tscadc.h | 2 +- include/linux/mfd/tps65086.h | 2 +- include/linux/mfd/tps65217.h | 2 +- include/linux/mfd/tps65218.h | 2 +- include/linux/mfd/tps65912.h | 2 +- 21 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/mfd/twl-family.txt b/Documentation/devicetree/bindings/mfd/twl-family.txt index 56f244b5d8a4..c2f9302965de 100644 --- a/Documentation/devicetree/bindings/mfd/twl-family.txt +++ b/Documentation/devicetree/bindings/mfd/twl-family.txt @@ -26,7 +26,7 @@ Optional node: Example: /* * Integrated Power Management Chip - * http://www.ti.com/lit/ds/symlink/twl6030.pdf + * https://www.ti.com/lit/ds/symlink/twl6030.pdf */ twl@48 { compatible = "ti,twl6030"; diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c index edfc172b8607..eba88b80d969 100644 --- a/drivers/mfd/hi6421-pmic-core.c +++ b/drivers/mfd/hi6421-pmic-core.c @@ -5,7 +5,7 @@ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2017> Linaro Ltd. - * http://www.linaro.org + * https://www.linaro.org * * Author: Guodong Xu */ diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c index 873c608e6a5d..858c9e0a49a4 100644 --- a/drivers/mfd/lp873x.c +++ b/drivers/mfd/lp873x.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * * Author: Keerthy * diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c index 4a5c8ade4ae0..2268be9113f1 100644 --- a/drivers/mfd/lp87565.c +++ b/drivers/mfd/lp87565.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ * * Author: Keerthy */ diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index aca5a160c1b2..1e6431cb8536 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -2,7 +2,7 @@ /** * omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI * - * Copyright (C) 2011-2013 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2011-2013 Texas Instruments Incorporated - https://www.ti.com * Author: Keshava Munegowda * Author: Roger Quadros */ diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 04a444007cf4..16fad79c73f1 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -2,7 +2,7 @@ /** * omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI * - * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012-2013 Texas Instruments Incorporated - https://www.ti.com * Author: Keshava Munegowda * Author: Roger Quadros */ diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 926c289cb040..0e6e25308190 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -1,7 +1,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c index 43119a6867fe..341466ef20cc 100644 --- a/drivers/mfd/tps65086.c +++ b/drivers/mfd/tps65086.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index 923602599549..2d9c282ec917 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c @@ -3,7 +3,7 @@ * * TPS65217 chip family multi-function driver * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index d41dd864b472..167e9fc308ef 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -1,7 +1,7 @@ /* * Driver for TPS65218 Integrated power management chipsets * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c index f33567bc428d..b55b1d5d6955 100644 --- a/drivers/mfd/tps65912-core.c +++ b/drivers/mfd/tps65912-core.c @@ -1,7 +1,7 @@ /* * Core functions for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c index 785d19f6f7c9..f7c22ea7b36c 100644 --- a/drivers/mfd/tps65912-i2c.c +++ b/drivers/mfd/tps65912-i2c.c @@ -1,7 +1,7 @@ /* * I2C access driver for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index f78be039e463..21a8d6ac5c4a 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -1,7 +1,7 @@ /* * SPI access driver for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h index bbc64484c021..2cadf8897c64 100644 --- a/include/linux/mfd/hi6421-pmic.h +++ b/include/linux/mfd/hi6421-pmic.h @@ -5,7 +5,7 @@ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2014> Linaro Ltd. - * http://www.linaro.org + * https://www.linaro.org * * Author: Guodong Xu */ diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h index edbec8350a49..5546688c7da7 100644 --- a/include/linux/mfd/lp873x.h +++ b/include/linux/mfd/lp873x.h @@ -1,7 +1,7 @@ /* * Functions to access LP873X power management chip. * - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index ce965354bbad..43716aca46fa 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -2,7 +2,7 @@ /* * Functions to access LP87565 power management chip. * - * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MFD_LP87565_H diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 483168403ae5..ffc091b77633 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -4,7 +4,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h index a228ae4c88d9..e0a417e53766 100644 --- a/include/linux/mfd/tps65086.h +++ b/include/linux/mfd/tps65086.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index b5dd108421c8..db7091824ed0 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -3,7 +3,7 @@ * * Functions to access TPS65217 power management chip. * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index b0470c35162d..f4ca367e3473 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -3,7 +3,7 @@ * * Functions to access TPS65219 power management chip. * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index b25d0297ba88..7943e413deae 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis * * This program is free software; you can redistribute it and/or -- cgit v1.2.3 From 5848dc5b1b76d83599dcec1b39f188a7cbdca7e2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 14 Aug 2020 15:22:43 -0700 Subject: dma-debug: remove debug_dma_assert_idle() function This remoes the code from the COW path to call debug_dma_assert_idle(), which was added many years ago. Google shows that it hasn't caught anything in the 6+ years we've had it apart from a false positive, and Hugh just noticed how it had a very unfortunate spinlock serialization in the COW path. He fixed that issue the previous commit (a85ffd59bd36: "dma-debug: fix debug_dma_assert_idle(), use rcu_read_lock()"), but let's see if anybody even notices when we remove this function entirely. NOTE! We keep the dma tracking infrastructure that was added by the commit that introduced it. Partly to make it easier to resurrect this debug code if we ever deside to, and partly because that tracking by pfn and offset looks quite reasonable. The problem with this debug code was simply that it was expensive and didn't seem worth it, not that it was wrong per se. Acked-by: Dan Williams Acked-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- include/linux/dma-debug.h | 6 ------ kernel/dma/Kconfig | 5 ----- kernel/dma/debug.c | 46 +--------------------------------------------- mm/memory.c | 2 -- 4 files changed, 1 insertion(+), 58 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 4208f94d93f7..7b3b04ba60f3 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev, extern void debug_dma_dump_mappings(struct device *dev); -extern void debug_dma_assert_idle(struct page *page); - #else /* CONFIG_DMA_API_DEBUG */ static inline void dma_debug_add_bus(struct bus_type *bus) @@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev) { } -static inline void debug_dma_assert_idle(struct page *page) -{ -} - #endif /* CONFIG_DMA_API_DEBUG */ #endif /* __DMA_DEBUG_H */ diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index f4770fcfa62b..5732b2b3ef17 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -186,11 +186,6 @@ config DMA_API_DEBUG drivers like double-freeing of DMA mappings or freeing mappings that were never allocated. - This also attempts to catch cases where a page owned by DMA is - accessed by the cpu in a way that could cause data corruption. For - example, this enables cow_user_page() to check that the source page is - not undergoing DMA. - This option causes a performance degradation. Use only if you want to debug device drivers and dma interactions. diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 6f4f4b9d2d03..8e9f7b301c6d 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -448,9 +448,6 @@ void debug_dma_dump_mappings(struct device *dev) * dma_active_cacheline entry to track per event. dma_map_sg(), on the * other hand, consumes a single dma_debug_entry, but inserts 'nents' * entries into the tree. - * - * At any time debug_dma_assert_idle() can be called to trigger a - * warning if any cachelines in the given page are in the active set. */ static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); static DEFINE_SPINLOCK(radix_lock); @@ -497,10 +494,7 @@ static void active_cacheline_inc_overlap(phys_addr_t cln) overlap = active_cacheline_set_overlap(cln, ++overlap); /* If we overflowed the overlap counter then we're potentially - * leaking dma-mappings. Otherwise, if maps and unmaps are - * balanced then this overflow may cause false negatives in - * debug_dma_assert_idle() as the cacheline may be marked idle - * prematurely. + * leaking dma-mappings. */ WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), @@ -555,44 +549,6 @@ static void active_cacheline_remove(struct dma_debug_entry *entry) spin_unlock_irqrestore(&radix_lock, flags); } -/** - * debug_dma_assert_idle() - assert that a page is not undergoing dma - * @page: page to lookup in the dma_active_cacheline tree - * - * Place a call to this routine in cases where the cpu touching the page - * before the dma completes (page is dma_unmapped) will lead to data - * corruption. - */ -void debug_dma_assert_idle(struct page *page) -{ - struct dma_debug_entry *entry; - unsigned long pfn; - phys_addr_t cln; - - if (dma_debug_disabled()) - return; - - if (!page) - return; - - pfn = page_to_pfn(page); - cln = (phys_addr_t) pfn << CACHELINE_PER_PAGE_SHIFT; - - rcu_read_lock(); - if (!radix_tree_gang_lookup(&dma_active_cacheline, (void **) &entry, - cln, 1) || entry->pfn != pfn) - entry = NULL; - rcu_read_unlock(); - - if (!entry) - return; - - cln = to_cacheline_number(entry); - err_printk(entry->dev, entry, - "cpu touching an active dma mapped cacheline [cln=%pa]\n", - &cln); -} - /* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. diff --git a/mm/memory.c b/mm/memory.c index 228efaca75d3..d3c3bbd65a7e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2411,8 +2411,6 @@ static inline bool cow_user_page(struct page *dst, struct page *src, struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; - debug_dma_assert_idle(src); - if (likely(src)) { copy_user_highpage(dst, src, addr, vma); return true; -- cgit v1.2.3 From 1378a5ee451a5e87d0d8dd6356a0b7844db231f6 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:23 -0700 Subject: mm: store compound_nr as well as compound_order Patch series "THP prep patches". These are some generic cleanups and improvements, which I would like merged into mmotm soon. The first one should be a performance improvement for all users of compound pages, and the others are aimed at getting code to compile away when CONFIG_TRANSPARENT_HUGEPAGE is disabled (ie small systems). Also better documented / less confusing than the current prefix mixture of compound, hpage and thp. This patch (of 7): This removes a few instructions from functions which need to know how many pages are in a compound page. The storage used is either page->mapping on 64-bit or page->index on 32-bit. Both of these are fine to overlay on tail pages. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-1-willy@infradead.org Link: http://lkml.kernel.org/r/20200629151959.15779-2-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 5 ++++- include/linux/mm_types.h | 1 + mm/page_alloc.c | 5 +++-- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index e7602a3bcef1..10b1e6e5f885 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -922,12 +922,15 @@ static inline int compound_pincount(struct page *page) static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; + page[1].compound_nr = 1U << order; } /* Returns the number of pages in this potentially compound page. */ static inline unsigned long compound_nr(struct page *page) { - return 1UL << compound_order(page); + if (!PageHead(page)) + return 1; + return page[1].compound_nr; } /* Returns the number of bytes in this potentially compound page. */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0277fbab7c93..496c3ff97cce 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -134,6 +134,7 @@ struct page { unsigned char compound_dtor; unsigned char compound_order; atomic_t compound_mapcount; + unsigned int compound_nr; /* 1 << compound_order */ }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8b7d0ecf30b1..0e2bab486fea 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -666,8 +666,6 @@ void prep_compound_page(struct page *page, unsigned int order) int i; int nr_pages = 1 << order; - set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); - set_compound_order(page, order); __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; @@ -675,6 +673,9 @@ void prep_compound_page(struct page *page, unsigned int order) p->mapping = TAIL_MAPPING; set_compound_head(p, page); } + + set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); + set_compound_order(page, order); atomic_set(compound_mapcount_ptr(page), -1); if (hpage_pincount_available(page)) atomic_set(compound_pincount_ptr(page), 0); -- cgit v1.2.3 From 419015675fef6c4b28689bd2ace559564c2e106c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:26 -0700 Subject: mm: move page-flags include to top of file Give up on the notion that we can remove page-flags.h from mm.h. There are currently 14 inline functions which use a PageFoo function. Also, two of the files directly included by mm.h include page-flags.h themselves, and there are probably more indirect inclusions. So just include it at the top like any other header file. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-3-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 10b1e6e5f885..fd0cd4e93029 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -668,11 +669,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; -/* - * FIXME: take this include out, include page-flags.h in - * files which need it (119 of them) - */ -#include #include /* -- cgit v1.2.3 From 6ffbb45826f5d9ae09aa60cd88594b7816c96190 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:30 -0700 Subject: mm: add thp_order This function returns the order of a transparent huge page. It compiles to 0 if CONFIG_TRANSPARENT_HUGEPAGE is disabled. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-4-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 467302056e17..9521cfdf18ca 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -258,6 +258,19 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, else return NULL; } + +/** + * thp_order - Order of a transparent huge page. + * @page: Head page of a transparent huge page. + */ +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + if (PageHead(page)) + return HPAGE_PMD_ORDER; + return 0; +} + static inline int hpage_nr_pages(struct page *page) { if (unlikely(PageTransHuge(page))) @@ -317,6 +330,12 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return 0; +} + static inline int hpage_nr_pages(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); -- cgit v1.2.3 From af3bbc12df80e8c279b94c752b6edca29841f4f5 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:33 -0700 Subject: mm: add thp_size This function returns the number of bytes in a THP. It is like page_size(), but compiles to just PAGE_SIZE if CONFIG_TRANSPARENT_HUGEPAGE is disabled. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-5-willy@infradead.org Signed-off-by: Linus Torvalds --- drivers/nvdimm/btt.c | 4 +--- drivers/nvdimm/pmem.c | 6 ++---- include/linux/huge_mm.h | 11 +++++++++++ mm/internal.h | 2 +- mm/page_io.c | 2 +- mm/page_vma_mapped.c | 4 ++-- 6 files changed, 18 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 412d21d8f643..0ff610e728ff 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1490,10 +1490,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, { struct btt *btt = bdev->bd_disk->private_data; int rc; - unsigned int len; - len = hpage_nr_pages(page) * PAGE_SIZE; - rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector); + rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector); if (rc == 0) page_endio(page, op_is_write(op), 0); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 94790e6e0e4c..fab29b514372 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -238,11 +238,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, blk_status_t rc; if (op_is_write(op)) - rc = pmem_do_write(pmem, page, 0, sector, - hpage_nr_pages(page) * PAGE_SIZE); + rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); else - rc = pmem_do_read(pmem, page, 0, sector, - hpage_nr_pages(page) * PAGE_SIZE); + rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); /* * The ->rw_page interface is subtle and tricky. The core * retries on any error, so we can only invoke page_endio() in diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9521cfdf18ca..9b33ac774fdd 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -462,4 +462,15 @@ static inline bool thp_migration_supported(void) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +/** + * thp_size - Size of a transparent huge page. + * @page: Head page of a transparent huge page. + * + * Return: Number of bytes in this page. + */ +static inline unsigned long thp_size(struct page *page) +{ + return PAGE_SIZE << thp_order(page); +} + #endif /* _LINUX_HUGE_MM_H */ diff --git a/mm/internal.h b/mm/internal.h index d11a9a8d2135..912bb1a1c10e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -396,7 +396,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long start, end; start = __vma_address(page, vma); - end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); + end = start + thp_size(page) - PAGE_SIZE; /* page should be within @vma mapping range */ VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); diff --git a/mm/page_io.c b/mm/page_io.c index 9e362567d454..f5e8bec8a8c7 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -40,7 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_end_io = end_io; - bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); + bio_add_page(bio, page, thp_size(page), 0); } return bio; } diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 719c35246cfa..e65629c056e8 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -227,7 +227,7 @@ next_pte: if (pvmw->address >= pvmw->vma->vm_end || pvmw->address >= __vma_address(pvmw->page, pvmw->vma) + - hpage_nr_pages(pvmw->page) * PAGE_SIZE) + thp_size(pvmw->page)) return not_found(pvmw); /* Did we cross page table boundary? */ if (pvmw->address % PMD_SIZE == 0) { @@ -268,7 +268,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) unsigned long start, end; start = __vma_address(page, vma); - end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); + end = start + thp_size(page) - PAGE_SIZE; if (unlikely(end < vma->vm_start || start >= vma->vm_end)) return 0; -- cgit v1.2.3 From 6c357848b44b4016ca422178aa368a7472245f6f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:37 -0700 Subject: mm: replace hpage_nr_pages with thp_nr_pages The thp prefix is more frequently used than hpage and we should be consistent between the various functions. [akpm@linux-foundation.org: fix mm/migrate.c] Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: Mike Kravetz Cc: David Hildenbrand Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-6-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 13 +++++++++---- include/linux/mm_inline.h | 6 +++--- include/linux/pagemap.h | 6 +++--- mm/compaction.c | 2 +- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/internal.h | 2 +- mm/memcontrol.c | 10 +++++----- mm/memory_hotplug.c | 7 +++---- mm/mempolicy.c | 2 +- mm/migrate.c | 18 +++++++++--------- mm/mlock.c | 9 ++++----- mm/page_io.c | 2 +- mm/page_vma_mapped.c | 2 +- mm/rmap.c | 8 ++++---- mm/swap.c | 16 ++++++++-------- mm/swap_state.c | 6 +++--- mm/swapfile.c | 2 +- mm/vmscan.c | 6 +++--- mm/workingset.c | 6 +++--- 20 files changed, 65 insertions(+), 62 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b33ac774fdd..229f986d535a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -271,9 +271,14 @@ static inline unsigned int thp_order(struct page *page) return 0; } -static inline int hpage_nr_pages(struct page *page) +/** + * thp_nr_pages - The number of regular pages in this huge page. + * @page: The head page of a huge page. + */ +static inline int thp_nr_pages(struct page *page) { - if (unlikely(PageTransHuge(page))) + VM_BUG_ON_PGFLAGS(PageTail(page), page); + if (PageHead(page)) return HPAGE_PMD_NR; return 1; } @@ -336,9 +341,9 @@ static inline unsigned int thp_order(struct page *page) return 0; } -static inline int hpage_nr_pages(struct page *page) +static inline int thp_nr_pages(struct page *page) { - VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PGFLAGS(PageTail(page), page); return 1; } diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 219bef41d87c..8fc71e9d7bb0 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } @@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); } /** diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d1f4eff605ad..7de11dcd534d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) if (PageHuge(head)) return head; - return head + (index & (hpage_nr_pages(head) - 1)); + return head + (index & (thp_nr_pages(head) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); @@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac) page = xa_load(&rac->mapping->i_pages, rac->_index); VM_BUG_ON_PAGE(!PageLocked(page), page); - rac->_batch_count = hpage_nr_pages(page); + rac->_batch_count = thp_nr_pages(page); return page; } @@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac, VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageTail(page), page); array[i++] = page; - rac->_batch_count += hpage_nr_pages(page); + rac->_batch_count += thp_nr_pages(page); /* * The page cache isn't using multi-index entries yet, diff --git a/mm/compaction.c b/mm/compaction.c index b89581bf859c..176dcded298e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1009,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, del_page_from_lru_list(page, lruvec, page_lru(page)); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), - hpage_nr_pages(page)); + thp_nr_pages(page)); isolate_success: list_add(&page->lru, &cc->migratepages); diff --git a/mm/filemap.c b/mm/filemap.c index 8e75bce0346d..653190943aa7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping, if (PageHuge(page)) return; - nr = hpage_nr_pages(page); + nr = thp_nr_pages(page); __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) { diff --git a/mm/gup.c b/mm/gup.c index 39e58df6925d..ae096ea7583f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1637,7 +1637,7 @@ check_again: mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), - hpage_nr_pages(head)); + thp_nr_pages(head)); } } } diff --git a/mm/internal.h b/mm/internal.h index 912bb1a1c10e..10c677655912 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -369,7 +369,7 @@ extern void clear_page_mlock(struct page *page); static inline void mlock_migrate_page(struct page *newpage, struct page *page) { if (TestClearPageMlocked(page)) { - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); /* Holding pmd lock, no change in irq context: __mod is safe */ __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9d87082e64aa..b807952b4d43 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5589,7 +5589,7 @@ static int mem_cgroup_move_account(struct page *page, { struct lruvec *from_vec, *to_vec; struct pglist_data *pgdat; - unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; + unsigned int nr_pages = compound ? thp_nr_pages(page) : 1; int ret; VM_BUG_ON(from == to); @@ -6682,7 +6682,7 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root, */ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - unsigned int nr_pages = hpage_nr_pages(page); + unsigned int nr_pages = thp_nr_pages(page); struct mem_cgroup *memcg = NULL; int ret = 0; @@ -6912,7 +6912,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) return; /* Force-charge the new page. The old one will be freed soon */ - nr_pages = hpage_nr_pages(newpage); + nr_pages = thp_nr_pages(newpage); page_counter_charge(&memcg->memory, nr_pages); if (do_memsw_account()) @@ -7114,7 +7114,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * ancestor for the swap instead and transfer the memory+swap charge. */ swap_memcg = mem_cgroup_id_get_online(memcg); - nr_entries = hpage_nr_pages(page); + nr_entries = thp_nr_pages(page); /* Get references for the tail pages, too */ if (nr_entries > 1) mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); @@ -7158,7 +7158,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) */ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) { - unsigned int nr_pages = hpage_nr_pages(page); + unsigned int nr_pages = thp_nr_pages(page); struct page_counter *counter; struct mem_cgroup *memcg; unsigned short oldid; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c32ead89c911..e9d5ab5d3ca0 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1299,7 +1299,7 @@ static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; - struct page *page; + struct page *page, *head; int ret = 0; LIST_HEAD(source); @@ -1307,15 +1307,14 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); + head = compound_head(page); if (PageHuge(page)) { - struct page *head = compound_head(page); pfn = page_to_pfn(head) + compound_nr(head) - 1; isolate_huge_page(head, &source); continue; } else if (PageTransHuge(page)) - pfn = page_to_pfn(compound_head(page)) - + hpage_nr_pages(page) - 1; + pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; /* * HWPoison pages have elevated reference counts so the migration would diff --git a/mm/mempolicy.c b/mm/mempolicy.c index afaa09ff9f6c..eddbe4e56c73 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1049,7 +1049,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist, list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), - hpage_nr_pages(head)); + thp_nr_pages(head)); } else if (flags & MPOL_MF_STRICT) { /* * Non-movable page may reach here. And, there may be diff --git a/mm/migrate.c b/mm/migrate.c index 5053439be6ab..34a842a8eb6a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l) put_page(page); } else { mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -hpage_nr_pages(page)); + page_is_file_lru(page), -thp_nr_pages(page)); putback_lru_page(page); } } @@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page) */ expected_count += is_device_private_page(page); if (mapping) - expected_count += hpage_nr_pages(page) + page_has_private(page); + expected_count += thp_nr_pages(page) + page_has_private(page); return expected_count; } @@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ + page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); + page_ref_unfreeze(page, expected_count - thp_nr_pages(page)); xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */ @@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src) } else { /* thp page */ BUG_ON(!PageTransHuge(src)); - nr_pages = hpage_nr_pages(src); + nr_pages = thp_nr_pages(src); } for (i = 0; i < nr_pages; i++) { @@ -1213,7 +1213,7 @@ out: */ if (likely(!__PageMovable(page))) mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -hpage_nr_pages(page)); + page_is_file_lru(page), -thp_nr_pages(page)); } /* @@ -1446,7 +1446,7 @@ retry: * during migration. */ is_thp = PageTransHuge(page); - nr_subpages = hpage_nr_pages(page); + nr_subpages = thp_nr_pages(page); cond_resched(); if (PageHuge(page)) @@ -1670,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), - hpage_nr_pages(head)); + thp_nr_pages(head)); } out_putpage: /* @@ -2034,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) page_lru = page_is_file_lru(page); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, - hpage_nr_pages(page)); + thp_nr_pages(page)); /* * Isolating the page has taken another reference, so the diff --git a/mm/mlock.c b/mm/mlock.c index f8736136fad7..93ca2bf30b4f 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page) if (!TestClearPageMlocked(page)) return; - mod_zone_page_state(page_zone(page), NR_MLOCK, - -hpage_nr_pages(page)); + mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page)); count_vm_event(UNEVICTABLE_PGCLEARED); /* * The previous TestClearPageMlocked() corresponds to the smp_mb() @@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page) if (!TestSetPageMlocked(page)) { mod_zone_page_state(page_zone(page), NR_MLOCK, - hpage_nr_pages(page)); + thp_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); if (!isolate_lru_page(page)) putback_lru_page(page); @@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page) /* * Serialize with any parallel __split_huge_page_refcount() which * might otherwise copy PageMlocked to part of the tail pages before - * we clear it in the head page. It also stabilizes hpage_nr_pages(). + * we clear it in the head page. It also stabilizes thp_nr_pages(). */ spin_lock_irq(&pgdat->lru_lock); @@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page) goto unlock_out; } - nr_pages = hpage_nr_pages(page); + nr_pages = thp_nr_pages(page); __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); if (__munlock_isolate_lru_page(page, true)) { diff --git a/mm/page_io.c b/mm/page_io.c index f5e8bec8a8c7..454b70d8cda7 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -274,7 +274,7 @@ static inline void count_swpout_vm_event(struct page *page) if (unlikely(PageTransHuge(page))) count_vm_event(THP_SWPOUT); #endif - count_vm_events(PSWPOUT, hpage_nr_pages(page)); + count_vm_events(PSWPOUT, thp_nr_pages(page)); } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index e65629c056e8..5e77b269c330 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -61,7 +61,7 @@ static inline bool pfn_is_match(struct page *page, unsigned long pfn) return page_pfn == pfn; /* THP can be referenced by any subpage */ - return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page); + return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page); } /** diff --git a/mm/rmap.c b/mm/rmap.c index 6cce9ef06753..4ace1e32f705 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1130,7 +1130,7 @@ void do_page_add_anon_rmap(struct page *page, } if (first) { - int nr = compound ? hpage_nr_pages(page) : 1; + int nr = compound ? thp_nr_pages(page) : 1; /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and @@ -1169,7 +1169,7 @@ void do_page_add_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { - int nr = compound ? hpage_nr_pages(page) : 1; + int nr = compound ? thp_nr_pages(page) : 1; VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); __SetPageSwapBacked(page); @@ -1860,7 +1860,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, return; pgoff_start = page_to_pgoff(page); - pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; + pgoff_end = pgoff_start + thp_nr_pages(page) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; @@ -1913,7 +1913,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, return; pgoff_start = page_to_pgoff(page); - pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; + pgoff_end = pgoff_start + thp_nr_pages(page) - 1; if (!locked) i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, diff --git a/mm/swap.c b/mm/swap.c index 9285e60c7d6e..d26c22baf7c5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, del_page_from_lru_list(page, lruvec, page_lru(page)); ClearPageActive(page); add_page_to_lru_list_tail(page, lruvec, page_lru(page)); - (*pgmoved) += hpage_nr_pages(page); + (*pgmoved) += thp_nr_pages(page); } } @@ -312,7 +312,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) void lru_note_cost_page(struct page *page) { lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), - page_is_file_lru(page), hpage_nr_pages(page)); + page_is_file_lru(page), thp_nr_pages(page)); } static void __activate_page(struct page *page, struct lruvec *lruvec, @@ -320,7 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int lru = page_lru_base_type(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec, lru); SetPageActive(page); @@ -500,7 +500,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, * lock is held(spinlock), which implies preemption disabled. */ __mod_zone_page_state(page_zone(page), NR_MLOCK, - hpage_nr_pages(page)); + thp_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); } lru_cache_add(page); @@ -532,7 +532,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, { int lru; bool active; - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); if (!PageLRU(page)) return; @@ -580,7 +580,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, { if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { int lru = page_lru_base_type(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); ClearPageActive(page); @@ -599,7 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { bool active = PageActive(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec, LRU_INACTIVE_ANON + active); @@ -972,7 +972,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, { enum lru_list lru; int was_unevictable = TestClearPageUnevictable(page); - int nr_pages = hpage_nr_pages(page); + int nr_pages = thp_nr_pages(page); VM_BUG_ON_PAGE(PageLRU(page), page); diff --git a/mm/swap_state.c b/mm/swap_state.c index b73aabdfd35a..d9d4a49f3241 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -130,7 +130,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swp_offset(entry); XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); - unsigned long i, nr = hpage_nr_pages(page); + unsigned long i, nr = thp_nr_pages(page); void *old; VM_BUG_ON_PAGE(!PageLocked(page), page); @@ -183,7 +183,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry, void *shadow) { struct address_space *address_space = swap_address_space(entry); - int i, nr = hpage_nr_pages(page); + int i, nr = thp_nr_pages(page); pgoff_t idx = swp_offset(entry); XA_STATE(xas, &address_space->i_pages, idx); @@ -278,7 +278,7 @@ void delete_from_swap_cache(struct page *page) xa_unlock_irq(&address_space->i_pages); put_swap_page(page, entry); - page_ref_sub(page, hpage_nr_pages(page)); + page_ref_sub(page, thp_nr_pages(page)); } void clear_shadow_from_swap_cache(int type, unsigned long begin, diff --git a/mm/swapfile.c b/mm/swapfile.c index e653eea1eb88..eb410d3c8de8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1370,7 +1370,7 @@ void put_swap_page(struct page *page, swp_entry_t entry) unsigned char *map; unsigned int i, free_entries = 0; unsigned char val; - int size = swap_entry_size(hpage_nr_pages(page)); + int size = swap_entry_size(thp_nr_pages(page)); si = _swap_info_get(entry); if (!si) diff --git a/mm/vmscan.c b/mm/vmscan.c index 738115ed75e2..99e1796eb833 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1354,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: - stat->nr_pageout += hpage_nr_pages(page); + stat->nr_pageout += thp_nr_pages(page); if (PageWriteback(page)) goto keep; @@ -1862,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, SetPageLRU(page); lru = page_lru(page); - nr_pages = hpage_nr_pages(page); + nr_pages = thp_nr_pages(page); update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); list_move(&page->lru, &lruvec->lists[lru]); @@ -2065,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { - nr_rotated += hpage_nr_pages(page); + nr_rotated += thp_nr_pages(page); list_add(&page->lru, &l_active); continue; } diff --git a/mm/workingset.c b/mm/workingset.c index 8cbe4e3cbe5c..92e66113a577 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -263,7 +263,7 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) VM_BUG_ON_PAGE(!PageLocked(page), page); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); - workingset_age_nonresident(lruvec, hpage_nr_pages(page)); + workingset_age_nonresident(lruvec, thp_nr_pages(page)); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->nonresident_age); @@ -374,7 +374,7 @@ void workingset_refault(struct page *page, void *shadow) goto out; SetPageActive(page); - workingset_age_nonresident(lruvec, hpage_nr_pages(page)); + workingset_age_nonresident(lruvec, thp_nr_pages(page)); inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file); /* Page was active prior to eviction */ @@ -411,7 +411,7 @@ void workingset_activation(struct page *page) if (!mem_cgroup_disabled() && !memcg) goto out; lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); - workingset_age_nonresident(lruvec, hpage_nr_pages(page)); + workingset_age_nonresident(lruvec, thp_nr_pages(page)); out: rcu_read_unlock(); } -- cgit v1.2.3 From 2be1d71841b7ecfb01ce4c59f6e1d082c3c18a8a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:40 -0700 Subject: mm: add thp_head This is like compound_head() but compiles away when CONFIG_TRANSPARENT_HUGEPAGE is not enabled. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-7-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 229f986d535a..8a8bc46a2432 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -259,6 +259,15 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, return NULL; } +/** + * thp_head - Head page of a transparent huge page. + * @page: Any page (tail, head or regular) found in the page cache. + */ +static inline struct page *thp_head(struct page *page) +{ + return compound_head(page); +} + /** * thp_order - Order of a transparent huge page. * @page: Head page of a transparent huge page. @@ -335,6 +344,12 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) +static inline struct page *thp_head(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return page; +} + static inline unsigned int thp_order(struct page *page) { VM_BUG_ON_PGFLAGS(PageTail(page), page); -- cgit v1.2.3 From ee6c400f5c05459b8c5f2884a176a1287ce2f68f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 14 Aug 2020 17:30:43 -0700 Subject: mm: introduce offset_in_thp Mirroring offset_in_page(), this gives you the offset within this particular page, no matter what size page it is. It optimises down to offset_in_page() if CONFIG_TRANSPARENT_HUGEPAGE is not set. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Reviewed-by: William Kucharski Reviewed-by: Zi Yan Cc: Mike Kravetz Cc: "Kirill A. Shutemov" Link: http://lkml.kernel.org/r/20200629151959.15779-8-willy@infradead.org Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index fd0cd4e93029..2e7ec3ee34cf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1594,6 +1594,7 @@ static inline void clear_page_pfmemalloc(struct page *page) extern void pagefault_out_of_memory(void); #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) +#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) /* * Flags passed to show_mem() and show_free_areas() to suppress output in -- cgit v1.2.3 From 88db0aa2421666d2f73486d15b239a4521983d55 Mon Sep 17 00:00:00 2001 From: Xiaoming Ni Date: Fri, 14 Aug 2020 17:31:07 -0700 Subject: all arch: remove system call sys_sysctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit 61a47c1ad3a4dc ("sysctl: Remove the sysctl system call"), sys_sysctl is actually unavailable: any input can only return an error. We have been warning about people using the sysctl system call for years and believe there are no more users. Even if there are users of this interface if they have not complained or fixed their code by now they probably are not going to, so there is no point in warning them any longer. So completely remove sys_sysctl on all architectures. [nixiaoming@huawei.com: s390: fix build error for sys_call_table_emu] Link: http://lkml.kernel.org/r/20200618141426.16884-1-nixiaoming@huawei.com Signed-off-by: Xiaoming Ni Signed-off-by: Andrew Morton Acked-by: Will Deacon [arm/arm64] Acked-by: "Eric W. Biederman" Cc: Aleksa Sarai Cc: Alexander Shishkin Cc: Al Viro Cc: Andi Kleen Cc: Andrew Morton Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Bin Meng Cc: Borislav Petkov Cc: Brian Gerst Cc: Catalin Marinas Cc: chenzefeng Cc: Christian Borntraeger Cc: Christian Brauner Cc: Chris Zankel Cc: David Howells Cc: David S. Miller Cc: Diego Elio Pettenò Cc: Dmitry Vyukov Cc: Dominik Brodowski Cc: Fenghua Yu Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Iurii Zaikin Cc: Ivan Kokshaysky Cc: James Bottomley Cc: Jens Axboe Cc: Jiri Olsa Cc: Kars de Jong Cc: Kees Cook Cc: Krzysztof Kozlowski Cc: Luis Chamberlain Cc: Marco Elver Cc: Mark Rutland Cc: Martin K. Petersen Cc: Masahiro Yamada Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Michal Simek Cc: Miklos Szeredi Cc: Minchan Kim Cc: Namhyung Kim Cc: Naveen N. Rao Cc: Nick Piggin Cc: Oleg Nesterov Cc: Olof Johansson Cc: Paul Burton Cc: "Paul E. McKenney" Cc: Paul Mackerras Cc: Peter Zijlstra (Intel) Cc: Randy Dunlap Cc: Ravi Bangoria Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sami Tolvanen Cc: Sargun Dhillon Cc: Stephen Rothwell Cc: Sudeep Holla Cc: Sven Schnelle Cc: Thiago Jung Bauermann Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Tony Luck Cc: Vasily Gorbik Cc: Vlastimil Babka Cc: Yoshinori Sato Cc: Zhou Yanjie Link: http://lkml.kernel.org/r/20200616030734.87257-1-nixiaoming@huawei.com Signed-off-by: Linus Torvalds --- arch/alpha/kernel/syscalls/syscall.tbl | 2 +- arch/arm/configs/am200epdkit_defconfig | 1 - arch/arm/tools/syscall.tbl | 2 +- arch/arm64/include/asm/unistd32.h | 4 +- arch/ia64/kernel/syscalls/syscall.tbl | 2 +- arch/m68k/kernel/syscalls/syscall.tbl | 2 +- arch/microblaze/kernel/syscalls/syscall.tbl | 2 +- arch/mips/configs/cu1000-neo_defconfig | 1 - arch/mips/kernel/syscalls/syscall_n32.tbl | 2 +- arch/mips/kernel/syscalls/syscall_n64.tbl | 2 +- arch/mips/kernel/syscalls/syscall_o32.tbl | 2 +- arch/parisc/kernel/syscalls/syscall.tbl | 2 +- arch/powerpc/kernel/syscalls/syscall.tbl | 2 +- arch/s390/kernel/syscalls/syscall.tbl | 2 +- arch/sh/configs/dreamcast_defconfig | 1 - arch/sh/configs/espt_defconfig | 1 - arch/sh/configs/hp6xx_defconfig | 1 - arch/sh/configs/landisk_defconfig | 1 - arch/sh/configs/lboxre2_defconfig | 1 - arch/sh/configs/microdev_defconfig | 1 - arch/sh/configs/migor_defconfig | 1 - arch/sh/configs/r7780mp_defconfig | 1 - arch/sh/configs/r7785rp_defconfig | 1 - arch/sh/configs/rts7751r2d1_defconfig | 1 - arch/sh/configs/rts7751r2dplus_defconfig | 1 - arch/sh/configs/se7206_defconfig | 1 - arch/sh/configs/se7343_defconfig | 1 - arch/sh/configs/se7619_defconfig | 1 - arch/sh/configs/se7705_defconfig | 1 - arch/sh/configs/se7750_defconfig | 1 - arch/sh/configs/se7751_defconfig | 1 - arch/sh/configs/secureedge5410_defconfig | 1 - arch/sh/configs/sh03_defconfig | 1 - arch/sh/configs/sh7710voipgw_defconfig | 1 - arch/sh/configs/sh7757lcr_defconfig | 1 - arch/sh/configs/sh7763rdp_defconfig | 1 - arch/sh/configs/shmin_defconfig | 1 - arch/sh/configs/titan_defconfig | 1 - arch/sh/kernel/syscalls/syscall.tbl | 2 +- arch/sparc/kernel/syscalls/syscall.tbl | 2 +- arch/x86/entry/syscalls/syscall_32.tbl | 2 +- arch/x86/entry/syscalls/syscall_64.tbl | 2 +- arch/xtensa/kernel/syscalls/syscall.tbl | 2 +- include/linux/compat.h | 1 - include/linux/syscalls.h | 2 - include/linux/sysctl.h | 6 +- kernel/Makefile | 2 +- kernel/sys_ni.c | 1 - kernel/sysctl_binary.c | 171 --------------------- tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 2 +- tools/perf/arch/s390/entry/syscalls/syscall.tbl | 2 +- tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | 2 +- 52 files changed, 24 insertions(+), 227 deletions(-) delete mode 100644 kernel/sysctl_binary.c (limited to 'include/linux') diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index a28fb211881d..ec8bed9e7b75 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -249,7 +249,7 @@ 316 common mlockall sys_mlockall 317 common munlockall sys_munlockall 318 common sysinfo sys_sysinfo -319 common _sysctl sys_sysctl +319 common _sysctl sys_ni_syscall # 320 was sys_idle 321 common oldumount sys_oldumount 322 common swapon sys_swapon diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig index f56ac394caf1..4e49d6cb2f62 100644 --- a/arch/arm/configs/am200epdkit_defconfig +++ b/arch/arm/configs/am200epdkit_defconfig @@ -3,7 +3,6 @@ CONFIG_LOCALVERSION="gum" CONFIG_SYSVIPC=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 7e8ee4adf269..171077cbf419 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -162,7 +162,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 17e81bd9a2d3..734860ac7cf9 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -308,8 +308,8 @@ __SYSCALL(__NR_writev, compat_sys_writev) __SYSCALL(__NR_getsid, sys_getsid) #define __NR_fdatasync 148 __SYSCALL(__NR_fdatasync, sys_fdatasync) -#define __NR__sysctl 149 -__SYSCALL(__NR__sysctl, compat_sys_sysctl) + /* 149 was sys_sysctl */ +__SYSCALL(149, sys_ni_syscall) #define __NR_mlock 150 __SYSCALL(__NR_mlock, sys_mlock) #define __NR_munlock 151 diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index ced9c83e47c9..f52a41f4c340 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -135,7 +135,7 @@ 123 common writev sys_writev 124 common pread64 sys_pread64 125 common pwrite64 sys_pwrite64 -126 common _sysctl sys_sysctl +126 common _sysctl sys_ni_syscall 127 common mmap sys_mmap 128 common munmap sys_munmap 129 common mlock sys_mlock diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 1a4822de7292..81fc799d8392 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index a3f4be8e7238..b4e263916f41 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig index 6b471cdb16cf..e924c817f73d 100644 --- a/arch/mips/configs/cu1000-neo_defconfig +++ b/arch/mips/configs/cu1000-neo_defconfig @@ -17,7 +17,6 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 6b4ee92e3aed..f9df9edb67a4 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -159,7 +159,7 @@ 149 n32 munlockall sys_munlockall 150 n32 vhangup sys_vhangup 151 n32 pivot_root sys_pivot_root -152 n32 _sysctl compat_sys_sysctl +152 n32 _sysctl sys_ni_syscall 153 n32 prctl sys_prctl 154 n32 adjtimex sys_adjtimex_time32 155 n32 setrlimit compat_sys_setrlimit diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 391acbf425a0..557f9954a2b9 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -159,7 +159,7 @@ 149 n64 munlockall sys_munlockall 150 n64 vhangup sys_vhangup 151 n64 pivot_root sys_pivot_root -152 n64 _sysctl sys_sysctl +152 n64 _sysctl sys_ni_syscall 153 n64 prctl sys_prctl 154 n64 adjtimex sys_adjtimex 155 n64 setrlimit sys_setrlimit diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 5727c5187508..195b43cf27c8 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -164,7 +164,7 @@ 150 o32 unused150 sys_ni_syscall 151 o32 getsid sys_getsid 152 o32 fdatasync sys_fdatasync -153 o32 _sysctl sys_sysctl compat_sys_sysctl +153 o32 _sysctl sys_ni_syscall 154 o32 mlock sys_mlock 155 o32 munlock sys_munlock 156 o32 mlockall sys_mlockall diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 292baabefade..def64d221cd4 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -163,7 +163,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index be9f74546068..c2d737ff2e7b 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -197,7 +197,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 nospu _sysctl sys_sysctl compat_sys_sysctl +149 nospu _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index f1fda4375526..10456bc936fb 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -138,7 +138,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl - - 150 common mlock sys_mlock sys_mlock 151 common munlock sys_munlock sys_munlock 152 common mlockall sys_mlockall sys_mlockall diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig index ae067e0b15e3..6a82c7b8ff32 100644 --- a/arch/sh/configs/dreamcast_defconfig +++ b/arch/sh/configs/dreamcast_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_MODULES=y diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index a5b865a75d22..9a988c347e9d 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig index a92db6694ce2..70e6605d7f7e 100644 --- a/arch/sh/configs/hp6xx_defconfig +++ b/arch/sh/configs/hp6xx_defconfig @@ -3,7 +3,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH7709=y diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 567af752b1bb..ba6ec042606f 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig index 10f6d371ce2c..05e4ac6fed5f 100644 --- a/arch/sh/configs/lboxre2_defconfig +++ b/arch/sh/configs/lboxre2_defconfig @@ -1,6 +1,5 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/microdev_defconfig b/arch/sh/configs/microdev_defconfig index ed84d1303acf..c65667d00313 100644 --- a/arch/sh/configs/microdev_defconfig +++ b/arch/sh/configs/microdev_defconfig @@ -2,7 +2,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set CONFIG_CPU_SUBTYPE_SH4_202=y diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index 37e9521a99e5..a24cf8cd2cea 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index c97ec60cff27..e922659fdadb 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set CONFIG_SLAB=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index 55fce65eb454..5978866358ec 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -7,7 +7,6 @@ CONFIG_RCU_TRACE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index 6a3cfe08295f..fc9c22152b08 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index 2b3d7d280672..ff3fd6787fd6 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig @@ -1,7 +1,6 @@ CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 21a43f14ffac..ff5bb4489922 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -18,7 +18,6 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_BLK_DEV_INITRD=y # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y # CONFIG_ELF_CORE is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 4e794e719a28..5d6c19338ebf 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig index 3264415a5931..71a672c30716 100644 --- a/arch/sh/configs/se7619_defconfig +++ b/arch/sh/configs/se7619_defconfig @@ -1,7 +1,6 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_ELF_CORE is not set diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig index 4496b94b7d88..ed00a6eeadf5 100644 --- a/arch/sh/configs/se7705_defconfig +++ b/arch/sh/configs/se7705_defconfig @@ -2,7 +2,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y diff --git a/arch/sh/configs/se7750_defconfig b/arch/sh/configs/se7750_defconfig index b23f67542728..3f1c13799d79 100644 --- a/arch/sh/configs/se7750_defconfig +++ b/arch/sh/configs/se7750_defconfig @@ -5,7 +5,6 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig index 162343683937..4a024065bb75 100644 --- a/arch/sh/configs/se7751_defconfig +++ b/arch/sh/configs/se7751_defconfig @@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/secureedge5410_defconfig b/arch/sh/configs/secureedge5410_defconfig index 360592d63a2f..8422599cfb04 100644 --- a/arch/sh/configs/secureedge5410_defconfig +++ b/arch/sh/configs/secureedge5410_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_HOTPLUG is not set CONFIG_SLAB=y # CONFIG_BLK_DEV_BSG is not set diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 87db9a84b5ec..f0073ed39947 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -3,7 +3,6 @@ CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=m diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig index 08426913c0e3..0d814770b07f 100644 --- a/arch/sh/configs/sh7710voipgw_defconfig +++ b/arch/sh/configs/sh7710voipgw_defconfig @@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_FUTEX is not set # CONFIG_EPOLL is not set # CONFIG_SHMEM is not set diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index d0933a9b9799..a2700ab165af 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y CONFIG_MODULES=y diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index d0a0aa74cecf..26c5fd02c87a 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_IPC_NS=y -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig index a27b129b93c5..c0b6f40d01cc 100644 --- a/arch/sh/configs/shmin_defconfig +++ b/arch/sh/configs/shmin_defconfig @@ -1,7 +1,6 @@ # CONFIG_SWAP is not set CONFIG_LOG_BUF_SHIFT=14 # CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_HOTPLUG is not set # CONFIG_BUG is not set diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index 4ec961ace688..ba887f1351be 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 CONFIG_BLK_DEV_INITRD=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -# CONFIG_SYSCTL_SYSCALL is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 96848db9659e..ae0a00beea5f 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -156,7 +156,7 @@ 146 common writev sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 common _sysctl sys_sysctl +149 common _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 46024e80ee86..4af114e84f20 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -300,7 +300,7 @@ 249 64 nanosleep sys_nanosleep 250 32 mremap sys_mremap 250 64 mremap sys_64_mremap -251 common _sysctl sys_sysctl compat_sys_sysctl +251 common _sysctl sys_ni_syscall 252 common getsid sys_getsid 253 common fdatasync sys_fdatasync 254 32 nfsservctl sys_ni_syscall sys_nis_syscall diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index e31a75262c9c..9d1102873666 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -160,7 +160,7 @@ 146 i386 writev sys_writev compat_sys_writev 147 i386 getsid sys_getsid 148 i386 fdatasync sys_fdatasync -149 i386 _sysctl sys_sysctl compat_sys_sysctl +149 i386 _sysctl sys_ni_syscall 150 i386 mlock sys_mlock 151 i386 munlock sys_munlock 152 i386 mlockall sys_mlockall diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 9d82078c949a..f30d6ae9a688 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -164,7 +164,7 @@ 153 common vhangup sys_vhangup 154 common modify_ldt sys_modify_ldt 155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl +156 64 _sysctl sys_ni_syscall 157 common prctl sys_prctl 158 common arch_prctl sys_arch_prctl 159 common adjtimex sys_adjtimex diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index d216ccba42f7..6276e3c2d3fc 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -222,7 +222,7 @@ 204 common quotactl sys_quotactl # 205 was old nfsservctl 205 common nfsservctl sys_ni_syscall -206 common _sysctl sys_sysctl +206 common _sysctl sys_ni_syscall 207 common bdflush sys_bdflush 208 common uname sys_newuname 209 common sysinfo sys_sysinfo diff --git a/include/linux/compat.h b/include/linux/compat.h index c4255d8a4a8a..d38c4d7e83bd 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -851,7 +851,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, unsigned flags); -asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); /* obsolete: fs/readdir.c */ asmlinkage long compat_sys_old_readdir(unsigned int fd, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index dc2b827c81e5..75ac7f8ae93c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -47,7 +47,6 @@ struct stat64; struct statfs; struct statfs64; struct statx; -struct __sysctl_args; struct sysinfo; struct timespec; struct __kernel_old_timeval; @@ -1117,7 +1116,6 @@ asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_bdflush(int func, long data); asmlinkage long sys_oldumount(char __user *name); asmlinkage long sys_uselib(const char __user *library); -asmlinkage long sys_sysctl(struct __sysctl_args __user *args); asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2); asmlinkage long sys_fork(void); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 50bb7f383a1b..51298a4f4623 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -74,15 +74,13 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer, * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. * - * The table's mode will be honoured both for sys_sysctl(2) and - * proc-fs access. + * The table's mode will be honoured for proc-fs access. * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. A * null procname disables /proc mirroring at this node. * - * sysctl(2) can automatically manage read and write requests through - * the sysctl table. The data and maxlen fields of the ctl_table + * The data and maxlen fields of the ctl_table * struct enable minimal validation of the values being written to be * performed, and the mode field allows minimal authentication. * diff --git a/kernel/Makefile b/kernel/Makefile index b3da548691c9..9a20016d4900 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,7 +5,7 @@ obj-y = fork.o exec_domain.o panic.o \ cpu.o exit.o softirq.o resource.o \ - sysctl.o sysctl_binary.o capability.o ptrace.o user.o \ + sysctl.o capability.o ptrace.o user.o \ signal.o sys.o umh.o workqueue.o pid.o task_work.o \ extable.o params.o \ kthread.o sys_ni.o nsproxy.o \ diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 3b69a560a7ac..4d59775ea79c 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -364,7 +364,6 @@ COND_SYSCALL(socketcall); COND_SYSCALL_COMPAT(socketcall); /* compat syscalls for arm64, x86, ... */ -COND_SYSCALL_COMPAT(sysctl); COND_SYSCALL_COMPAT(fanotify_mark); /* x86 */ diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c deleted file mode 100644 index 7d550cc76a3b..000000000000 --- a/kernel/sysctl_binary.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include "../fs/xfs/xfs_sysctl.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static ssize_t binary_sysctl(const int *name, int nlen, - void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) -{ - return -ENOSYS; -} - -static void deprecated_sysctl_warning(const int *name, int nlen) -{ - int i; - - /* - * CTL_KERN/KERN_VERSION is used by older glibc and cannot - * ever go away. - */ - if (nlen >= 2 && name[0] == CTL_KERN && name[1] == KERN_VERSION) - return; - - if (printk_ratelimit()) { - printk(KERN_INFO - "warning: process `%s' used the deprecated sysctl " - "system call with ", current->comm); - for (i = 0; i < nlen; i++) - printk(KERN_CONT "%d.", name[i]); - printk(KERN_CONT "\n"); - } - return; -} - -#define WARN_ONCE_HASH_BITS 8 -#define WARN_ONCE_HASH_SIZE (1<nlen. */ - if (nlen < 0 || nlen > CTL_MAXNAME) - return -ENOTDIR; - /* Read in the sysctl name for simplicity */ - for (i = 0; i < nlen; i++) - if (get_user(name[i], args_name + i)) - return -EFAULT; - - warn_on_bintable(name, nlen); - - return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen); -} - -SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args) -{ - struct __sysctl_args tmp; - size_t oldlen = 0; - ssize_t result; - - if (copy_from_user(&tmp, args, sizeof(tmp))) - return -EFAULT; - - if (tmp.oldval && !tmp.oldlenp) - return -EFAULT; - - if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp)) - return -EFAULT; - - result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen, - tmp.newval, tmp.newlen); - - if (result >= 0) { - oldlen = result; - result = 0; - } - - if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp)) - return -EFAULT; - - return result; -} - - -#ifdef CONFIG_COMPAT - -struct compat_sysctl_args { - compat_uptr_t name; - int nlen; - compat_uptr_t oldval; - compat_uptr_t oldlenp; - compat_uptr_t newval; - compat_size_t newlen; - compat_ulong_t __unused[4]; -}; - -COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args) -{ - struct compat_sysctl_args tmp; - compat_size_t __user *compat_oldlenp; - size_t oldlen = 0; - ssize_t result; - - if (copy_from_user(&tmp, args, sizeof(tmp))) - return -EFAULT; - - if (tmp.oldval && !tmp.oldlenp) - return -EFAULT; - - compat_oldlenp = compat_ptr(tmp.oldlenp); - if (compat_oldlenp && get_user(oldlen, compat_oldlenp)) - return -EFAULT; - - result = do_sysctl(compat_ptr(tmp.name), tmp.nlen, - compat_ptr(tmp.oldval), oldlen, - compat_ptr(tmp.newval), tmp.newlen); - - if (result >= 0) { - oldlen = result; - result = 0; - } - - if (compat_oldlenp && put_user(oldlen, compat_oldlenp)) - return -EFAULT; - - return result; -} - -#endif /* CONFIG_COMPAT */ diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index b190f2eb2611..3ca6fe057a0b 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -193,7 +193,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid 148 common fdatasync sys_fdatasync -149 nospu _sysctl sys_sysctl compat_sys_sysctl +149 nospu _sysctl sys_ni_syscall 150 common mlock sys_mlock 151 common munlock sys_munlock 152 common mlockall sys_mlockall diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index 56ae24b6e4be..6a0bbea225db 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -138,7 +138,7 @@ 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync -149 common _sysctl sys_sysctl compat_sys_sysctl +149 common _sysctl - - 150 common mlock sys_mlock compat_sys_mlock 151 common munlock sys_munlock compat_sys_munlock 152 common mlockall sys_mlockall sys_mlockall diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 9d82078c949a..f30d6ae9a688 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -164,7 +164,7 @@ 153 common vhangup sys_vhangup 154 common modify_ldt sys_modify_ldt 155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl +156 64 _sysctl sys_ni_syscall 157 common prctl sys_prctl 158 common arch_prctl sys_arch_prctl 159 common adjtimex sys_adjtimex -- cgit v1.2.3 From e0e3f42fd96cf830c61aa720f0abb4eef41c5ce3 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Fri, 14 Aug 2020 17:31:37 -0700 Subject: mm/memcontrol: fix a data race in scan count struct mem_cgroup_per_node mz.lru_zone_size[zone_idx][lru] could be accessed concurrently as noticed by KCSAN, BUG: KCSAN: data-race in lruvec_lru_size / mem_cgroup_update_lru_size write to 0xffff9c804ca285f8 of 8 bytes by task 50951 on cpu 12: mem_cgroup_update_lru_size+0x11c/0x1d0 mem_cgroup_update_lru_size at mm/memcontrol.c:1266 isolate_lru_pages+0x6a9/0xf30 shrink_active_list+0x123/0xcc0 shrink_lruvec+0x8fd/0x1380 shrink_node+0x317/0xd80 do_try_to_free_pages+0x1f7/0xa10 try_to_free_pages+0x26c/0x5e0 __alloc_pages_slowpath+0x458/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x170/0x700 __handle_mm_fault+0xc9f/0xd00 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 read to 0xffff9c804ca285f8 of 8 bytes by task 50964 on cpu 95: lruvec_lru_size+0xbb/0x270 mem_cgroup_get_zone_lru_size at include/linux/memcontrol.h:536 (inlined by) lruvec_lru_size at mm/vmscan.c:326 shrink_lruvec+0x1d0/0x1380 shrink_node+0x317/0xd80 do_try_to_free_pages+0x1f7/0xa10 try_to_free_pages+0x26c/0x5e0 __alloc_pages_slowpath+0x458/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_current+0xa6/0x120 alloc_slab_page+0x3b1/0x540 allocate_slab+0x70/0x660 new_slab+0x46/0x70 ___slab_alloc+0x4ad/0x7d0 __slab_alloc+0x43/0x70 kmem_cache_alloc+0x2c3/0x420 getname_flags+0x4c/0x230 getname+0x22/0x30 do_sys_openat2+0x205/0x3b0 do_sys_open+0x9a/0xf0 __x64_sys_openat+0x62/0x80 do_syscall_64+0x91/0xb47 entry_SYSCALL_64_after_hwframe+0x49/0xbe Reported by Kernel Concurrency Sanitizer on: CPU: 95 PID: 50964 Comm: cc1 Tainted: G W O L 5.5.0-next-20200204+ #6 Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019 The write is under lru_lock, but the read is done as lockless. The scan count is used to determine how aggressively the anon and file LRU lists should be scanned. Load tearing could generate an inefficient heuristic, so fix it by adding READ_ONCE() for the read. Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Cc: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Link: http://lkml.kernel.org/r/20200206034945.2481-1-cai@lca.pw Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 385237e4cb44..d0b036123c6a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -630,7 +630,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, struct mem_cgroup_per_node *mz; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return mz->lru_zone_size[zone_idx][lru]; + return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); } void mem_cgroup_handle_over_high(void); -- cgit v1.2.3 From c403f6a3a792a6601185497c12b0bdf4be880439 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Fri, 14 Aug 2020 17:31:53 -0700 Subject: mm: annotate a data race in page_zonenum() BUG: KCSAN: data-race in page_cpupid_xchg_last / put_page write (marked) to 0xfffffc0d48ec1a00 of 8 bytes by task 91442 on cpu 3: page_cpupid_xchg_last+0x51/0x80 page_cpupid_xchg_last at mm/mmzone.c:109 (discriminator 11) wp_page_reuse+0x3e/0xc0 wp_page_reuse at mm/memory.c:2453 do_wp_page+0x472/0x7b0 do_wp_page at mm/memory.c:2798 __handle_mm_fault+0xcb0/0xd00 handle_pte_fault at mm/memory.c:4049 (inlined by) __handle_mm_fault at mm/memory.c:4163 handle_mm_fault+0xfc/0x2f0 handle_mm_fault at mm/memory.c:4200 do_page_fault+0x263/0x6f9 do_user_addr_fault at arch/x86/mm/fault.c:1465 (inlined by) do_page_fault at arch/x86/mm/fault.c:1539 page_fault+0x34/0x40 read to 0xfffffc0d48ec1a00 of 8 bytes by task 94817 on cpu 69: put_page+0x15a/0x1f0 page_zonenum at include/linux/mm.h:923 (inlined by) is_zone_device_page at include/linux/mm.h:929 (inlined by) page_is_devmap_managed at include/linux/mm.h:948 (inlined by) put_page at include/linux/mm.h:1023 wp_page_copy+0x571/0x930 wp_page_copy at mm/memory.c:2615 do_wp_page+0x107/0x7b0 __handle_mm_fault+0xcb0/0xd00 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 Reported by Kernel Concurrency Sanitizer on: CPU: 69 PID: 94817 Comm: systemd-udevd Tainted: G W O L 5.5.0-next-20200204+ #6 Hardware name: HPE ProLiant DL385 Gen10/ProLiant DL385 Gen10, BIOS A40 07/10/2019 A page never changes its zone number. The zone number happens to be stored in the same word as other bits which are modified, but the zone number bits will never be modified by any other write, so it can accept a reload of the zone bits after an intervening write and it don't need to use READ_ONCE(). Thus, annotate this data race using ASSERT_EXCLUSIVE_BITS() to also assert that there are no concurrent writes to it. Suggested-by: Marco Elver Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Cc: Paul E. McKenney Cc: David Hildenbrand Cc: Jan Kara Cc: John Hubbard Cc: Ira Weiny Cc: Dan Williams Link: http://lkml.kernel.org/r/1581619089-14472-1-git-send-email-cai@lca.pw Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e7ec3ee34cf..1983e08f5906 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1067,6 +1067,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); static inline enum zone_type page_zonenum(const struct page *page) { + ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } -- cgit v1.2.3 From 8f28ca6bd8211214faf717677bbffe375c2a6072 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 14 Aug 2020 17:32:07 -0700 Subject: iomap: constify ioreadX() iomem argument (as in generic implementation) Patch series "iomap: Constify ioreadX() iomem argument", v3. The ioread8/16/32() and others have inconsistent interface among the architectures: some taking address as const, some not. It seems there is nothing really stopping all of them to take pointer to const. This patch (of 4): The ioreadX() and ioreadX_rep() helpers have inconsistent interface. On some architectures void *__iomem address argument is a pointer to const, on some not. Implementations of ioreadX() do not modify the memory under the address so they can be converted to a "const" version for const-safety and consistency among architectures. [krzk@kernel.org: sh: clk: fix assignment from incompatible pointer type for ioreadX()] Link: http://lkml.kernel.org/r/20200723082017.24053-1-krzk@kernel.org [akpm@linux-foundation.org: fix drivers/mailbox/bcm-pdc-mailbox.c] Link: http://lkml.kernel.org/r/202007132209.Rxmv4QyS%25lkp@intel.com Suggested-by: Geert Uytterhoeven Signed-off-by: Krzysztof Kozlowski Signed-off-by: Andrew Morton Reviewed-by: Geert Uytterhoeven Reviewed-by: Arnd Bergmann Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Yoshinori Sato Cc: Rich Felker Cc: Kalle Valo Cc: "David S. Miller" Cc: Jakub Kicinski Cc: Dave Jiang Cc: Jon Mason Cc: Allen Hubbe Cc: "Michael S. Tsirkin" Cc: Jason Wang Link: http://lkml.kernel.org/r/20200709072837.5869-1-krzk@kernel.org Link: http://lkml.kernel.org/r/20200709072837.5869-2-krzk@kernel.org Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/core_apecs.h | 6 +-- arch/alpha/include/asm/core_cia.h | 6 +-- arch/alpha/include/asm/core_lca.h | 6 +-- arch/alpha/include/asm/core_marvel.h | 4 +- arch/alpha/include/asm/core_mcpcia.h | 6 +-- arch/alpha/include/asm/core_t2.h | 2 +- arch/alpha/include/asm/io.h | 12 +++--- arch/alpha/include/asm/io_trivial.h | 16 ++++---- arch/alpha/include/asm/jensen.h | 2 +- arch/alpha/include/asm/machvec.h | 6 +-- arch/alpha/kernel/core_marvel.c | 2 +- arch/alpha/kernel/io.c | 12 +++--- arch/parisc/include/asm/io.h | 4 +- arch/parisc/lib/iomap.c | 72 +++++++++++++++++------------------ arch/powerpc/kernel/iomap.c | 28 +++++++------- arch/sh/kernel/iomap.c | 22 +++++------ drivers/mailbox/bcm-pdc-mailbox.c | 2 +- drivers/sh/clk/cpg.c | 2 +- include/asm-generic/iomap.h | 28 +++++++------- include/linux/io-64-nonatomic-hi-lo.h | 4 +- include/linux/io-64-nonatomic-lo-hi.h | 4 +- lib/iomap.c | 30 +++++++-------- 22 files changed, 138 insertions(+), 138 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h index 0a07055bc0fe..2d9726fc02ef 100644 --- a/arch/alpha/include/asm/core_apecs.h +++ b/arch/alpha/include/asm/core_apecs.h @@ -384,7 +384,7 @@ struct el_apecs_procdata } \ } while (0) -__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -420,7 +420,7 @@ __EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -456,7 +456,7 @@ __EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int apecs_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < APECS_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_cia.h b/arch/alpha/include/asm/core_cia.h index c706a7f2b061..cb22991f6761 100644 --- a/arch/alpha/include/asm/core_cia.h +++ b/arch/alpha/include/asm/core_cia.h @@ -342,7 +342,7 @@ struct el_CIA_sysdata_mcheck { #define vuip volatile unsigned int __force * #define vulp volatile unsigned long __force * -__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -374,7 +374,7 @@ __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -404,7 +404,7 @@ __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int cia_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < CIA_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h index 84d5e5b84f4f..ec86314418cb 100644 --- a/arch/alpha/include/asm/core_lca.h +++ b/arch/alpha/include/asm/core_lca.h @@ -230,7 +230,7 @@ union el_lca { } while (0) -__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -266,7 +266,7 @@ __EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; unsigned long result, base_and_type; @@ -302,7 +302,7 @@ __EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + base_and_type) = w; } -__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int lca_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr < LCA_DENSE_MEM) diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h index cc6fd92d5fa9..b266e02e284b 100644 --- a/arch/alpha/include/asm/core_marvel.h +++ b/arch/alpha/include/asm/core_marvel.h @@ -332,10 +332,10 @@ struct io7 { #define vucp volatile unsigned char __force * #define vusp volatile unsigned short __force * -extern unsigned int marvel_ioread8(void __iomem *); +extern unsigned int marvel_ioread8(const void __iomem *); extern void marvel_iowrite8(u8 b, void __iomem *); -__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) +__EXTERN_INLINE unsigned int marvel_ioread16(const void __iomem *addr) { return __kernel_ldwu(*(vusp)addr); } diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index b30dc128210d..cb24d1bd6141 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -267,7 +267,7 @@ extern inline int __mcpcia_is_mmio(unsigned long addr) return (addr & 0x80000000UL) == 0; } -__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; @@ -291,7 +291,7 @@ __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) *(vuip) ((addr << 5) + hose + 0x00) = w; } -__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread16(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; @@ -315,7 +315,7 @@ __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) *(vuip) ((addr << 5) + hose + 0x08) = w; } -__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) +__EXTERN_INLINE unsigned int mcpcia_ioread32(const void __iomem *xaddr) { unsigned long addr = (unsigned long)xaddr; diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index e0b33d09e93a..12bb7addc789 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -572,7 +572,7 @@ __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) it doesn't make sense to merge the pio and mmio routines. */ #define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ +__EXTERN_INLINE unsigned int t2_ioread##NS(const void __iomem *xaddr) \ { \ if (t2_is_mmio(xaddr)) \ return t2_read##OS(xaddr); \ diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 640e1a2f57b4..1f6a909d1fa5 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -150,9 +150,9 @@ static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ alpha_mv.mv_##NAME(b, addr); \ } -REMAP1(unsigned int, ioread8, /**/) -REMAP1(unsigned int, ioread16, /**/) -REMAP1(unsigned int, ioread32, /**/) +REMAP1(unsigned int, ioread8, const) +REMAP1(unsigned int, ioread16, const) +REMAP1(unsigned int, ioread32, const) REMAP1(u8, readb, const volatile) REMAP1(u16, readw, const volatile) REMAP1(u32, readl, const volatile) @@ -307,7 +307,7 @@ static inline int __is_mmio(const volatile void __iomem *addr) */ #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) -extern inline unsigned int ioread8(void __iomem *addr) +extern inline unsigned int ioread8(const void __iomem *addr) { unsigned int ret; mb(); @@ -316,7 +316,7 @@ extern inline unsigned int ioread8(void __iomem *addr) return ret; } -extern inline unsigned int ioread16(void __iomem *addr) +extern inline unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); @@ -359,7 +359,7 @@ extern inline void outw(u16 b, unsigned long port) #endif #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) -extern inline unsigned int ioread32(void __iomem *addr) +extern inline unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); diff --git a/arch/alpha/include/asm/io_trivial.h b/arch/alpha/include/asm/io_trivial.h index ba3d8f0cfe0c..a1a29cbe02fa 100644 --- a/arch/alpha/include/asm/io_trivial.h +++ b/arch/alpha/include/asm/io_trivial.h @@ -7,15 +7,15 @@ #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a) { - return __kernel_ldbu(*(volatile u8 __force *)a); + return __kernel_ldbu(*(const volatile u8 __force *)a); } __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a) { - return __kernel_ldwu(*(volatile u16 __force *)a); + return __kernel_ldwu(*(const volatile u16 __force *)a); } __EXTERN_INLINE void @@ -33,9 +33,9 @@ IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) __EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) +IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a) { - return *(volatile u32 __force *)a; + return *(const volatile u32 __force *)a; } __EXTERN_INLINE void @@ -73,14 +73,14 @@ IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) __EXTERN_INLINE u8 IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) { - void __iomem *addr = (void __iomem *)a; + const void __iomem *addr = (const void __iomem *)a; return IO_CONCAT(__IO_PREFIX,ioread8)(addr); } __EXTERN_INLINE u16 IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) { - void __iomem *addr = (void __iomem *)a; + const void __iomem *addr = (const void __iomem *)a; return IO_CONCAT(__IO_PREFIX,ioread16)(addr); } diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h index 436dc905b6ad..916895155a88 100644 --- a/arch/alpha/include/asm/jensen.h +++ b/arch/alpha/include/asm/jensen.h @@ -305,7 +305,7 @@ __EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) that it doesn't make sense to merge them. */ #define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ +__EXTERN_INLINE unsigned int jensen_ioread##NS(const void __iomem *xaddr) \ { \ if (jensen_is_mmio(xaddr)) \ return jensen_read##OS(xaddr - 0x100000000ul); \ diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index a6b73c6d10ee..a4e96e2bec74 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -46,9 +46,9 @@ struct alpha_machine_vector void (*mv_pci_tbi)(struct pci_controller *hose, dma_addr_t start, dma_addr_t end); - unsigned int (*mv_ioread8)(void __iomem *); - unsigned int (*mv_ioread16)(void __iomem *); - unsigned int (*mv_ioread32)(void __iomem *); + unsigned int (*mv_ioread8)(const void __iomem *); + unsigned int (*mv_ioread16)(const void __iomem *); + unsigned int (*mv_ioread32)(const void __iomem *); void (*mv_iowrite8)(u8, void __iomem *); void (*mv_iowrite16)(u16, void __iomem *); diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 4c80d992a659..4485b77f8658 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -806,7 +806,7 @@ void __iomem *marvel_ioportmap (unsigned long addr) } unsigned int -marvel_ioread8(void __iomem *xaddr) +marvel_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (__marvel_is_port_kbd(addr)) diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c index 938de13adfbf..838586abb1e0 100644 --- a/arch/alpha/kernel/io.c +++ b/arch/alpha/kernel/io.c @@ -14,7 +14,7 @@ "generic", which bumps through the machine vector. */ unsigned int -ioread8(void __iomem *addr) +ioread8(const void __iomem *addr) { unsigned int ret; mb(); @@ -23,7 +23,7 @@ ioread8(void __iomem *addr) return ret; } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); @@ -32,7 +32,7 @@ unsigned int ioread16(void __iomem *addr) return ret; } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); @@ -257,7 +257,7 @@ EXPORT_SYMBOL(readq_relaxed); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. */ -void ioread8_rep(void __iomem *port, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) { while ((unsigned long)dst & 0x3) { if (!count) @@ -300,7 +300,7 @@ EXPORT_SYMBOL(insb); * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things. */ -void ioread16_rep(void __iomem *port, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { if (!count) @@ -340,7 +340,7 @@ EXPORT_SYMBOL(insw); * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things. */ -void ioread32_rep(void __iomem *port, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { while (count--) { diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 116effe26143..45e20d38dc59 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -303,8 +303,8 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); #define ioread64be ioread64be #define iowrite64 iowrite64 #define iowrite64be iowrite64be -extern u64 ioread64(void __iomem *addr); -extern u64 ioread64be(void __iomem *addr); +extern u64 ioread64(const void __iomem *addr); +extern u64 ioread64be(const void __iomem *addr); extern void iowrite64(u64 val, void __iomem *addr); extern void iowrite64be(u64 val, void __iomem *addr); diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 0195aec657e2..ce400417d54e 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -43,13 +43,13 @@ #endif struct iomap_ops { - unsigned int (*read8)(void __iomem *); - unsigned int (*read16)(void __iomem *); - unsigned int (*read16be)(void __iomem *); - unsigned int (*read32)(void __iomem *); - unsigned int (*read32be)(void __iomem *); - u64 (*read64)(void __iomem *); - u64 (*read64be)(void __iomem *); + unsigned int (*read8)(const void __iomem *); + unsigned int (*read16)(const void __iomem *); + unsigned int (*read16be)(const void __iomem *); + unsigned int (*read32)(const void __iomem *); + unsigned int (*read32be)(const void __iomem *); + u64 (*read64)(const void __iomem *); + u64 (*read64be)(const void __iomem *); void (*write8)(u8, void __iomem *); void (*write16)(u16, void __iomem *); void (*write16be)(u16, void __iomem *); @@ -57,9 +57,9 @@ struct iomap_ops { void (*write32be)(u32, void __iomem *); void (*write64)(u64, void __iomem *); void (*write64be)(u64, void __iomem *); - void (*read8r)(void __iomem *, void *, unsigned long); - void (*read16r)(void __iomem *, void *, unsigned long); - void (*read32r)(void __iomem *, void *, unsigned long); + void (*read8r)(const void __iomem *, void *, unsigned long); + void (*read16r)(const void __iomem *, void *, unsigned long); + void (*read32r)(const void __iomem *, void *, unsigned long); void (*write8r)(void __iomem *, const void *, unsigned long); void (*write16r)(void __iomem *, const void *, unsigned long); void (*write32r)(void __iomem *, const void *, unsigned long); @@ -69,17 +69,17 @@ struct iomap_ops { #define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff) -static unsigned int ioport_read8(void __iomem *addr) +static unsigned int ioport_read8(const void __iomem *addr) { return inb(ADDR2PORT(addr)); } -static unsigned int ioport_read16(void __iomem *addr) +static unsigned int ioport_read16(const void __iomem *addr) { return inw(ADDR2PORT(addr)); } -static unsigned int ioport_read32(void __iomem *addr) +static unsigned int ioport_read32(const void __iomem *addr) { return inl(ADDR2PORT(addr)); } @@ -99,17 +99,17 @@ static void ioport_write32(u32 datum, void __iomem *addr) outl(datum, ADDR2PORT(addr)); } -static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count) { insb(ADDR2PORT(addr), dst, count); } -static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count) { insw(ADDR2PORT(addr), dst, count); } -static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count) +static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count) { insl(ADDR2PORT(addr), dst, count); } @@ -150,37 +150,37 @@ static const struct iomap_ops ioport_ops = { /* Legacy I/O memory ops */ -static unsigned int iomem_read8(void __iomem *addr) +static unsigned int iomem_read8(const void __iomem *addr) { return readb(addr); } -static unsigned int iomem_read16(void __iomem *addr) +static unsigned int iomem_read16(const void __iomem *addr) { return readw(addr); } -static unsigned int iomem_read16be(void __iomem *addr) +static unsigned int iomem_read16be(const void __iomem *addr) { return __raw_readw(addr); } -static unsigned int iomem_read32(void __iomem *addr) +static unsigned int iomem_read32(const void __iomem *addr) { return readl(addr); } -static unsigned int iomem_read32be(void __iomem *addr) +static unsigned int iomem_read32be(const void __iomem *addr) { return __raw_readl(addr); } -static u64 iomem_read64(void __iomem *addr) +static u64 iomem_read64(const void __iomem *addr) { return readq(addr); } -static u64 iomem_read64be(void __iomem *addr) +static u64 iomem_read64be(const void __iomem *addr) { return __raw_readq(addr); } @@ -220,7 +220,7 @@ static void iomem_write64be(u64 datum, void __iomem *addr) __raw_writel(datum, addr); } -static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u8 *)dst = __raw_readb(addr); @@ -228,7 +228,7 @@ static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u16 *)dst = __raw_readw(addr); @@ -236,7 +236,7 @@ static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count) } } -static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count) +static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count) { while (count--) { *(u32 *)dst = __raw_readl(addr); @@ -297,49 +297,49 @@ static const struct iomap_ops *iomap_ops[8] = { }; -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr); return *((u8 *)addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr); return le16_to_cpup((u16 *)addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr); return *((u16 *)addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr); return le32_to_cpup((u32 *)addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr); return *((u32 *)addr); } -u64 ioread64(void __iomem *addr) +u64 ioread64(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr); return le64_to_cpup((u64 *)addr); } -u64 ioread64be(void __iomem *addr) +u64 ioread64be(const void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr); @@ -411,7 +411,7 @@ void iowrite64be(u64 datum, void __iomem *addr) /* Repeating interfaces */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count); @@ -423,7 +423,7 @@ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count); @@ -435,7 +435,7 @@ void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) } } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { if (unlikely(INDIRECT_ADDR(addr))) { iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count); diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 5ac84efc6ede..9fe4fb3b08aa 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -15,23 +15,23 @@ * Here comes the ppc64 implementation of the IOMAP * interfaces. */ -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { return readb(addr); } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { return readw(addr); } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { return readw_be(addr); } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { return readl(addr); } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { return readl_be(addr); } @@ -41,27 +41,27 @@ EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); #ifdef __powerpc64__ -u64 ioread64(void __iomem *addr) +u64 ioread64(const void __iomem *addr) { return readq(addr); } -u64 ioread64_lo_hi(void __iomem *addr) +u64 ioread64_lo_hi(const void __iomem *addr) { return readq(addr); } -u64 ioread64_hi_lo(void __iomem *addr) +u64 ioread64_hi_lo(const void __iomem *addr) { return readq(addr); } -u64 ioread64be(void __iomem *addr) +u64 ioread64be(const void __iomem *addr) { return readq_be(addr); } -u64 ioread64be_lo_hi(void __iomem *addr) +u64 ioread64be_lo_hi(const void __iomem *addr) { return readq_be(addr); } -u64 ioread64be_hi_lo(void __iomem *addr) +u64 ioread64be_hi_lo(const void __iomem *addr) { return readq_be(addr); } @@ -139,15 +139,15 @@ EXPORT_SYMBOL(iowrite64be_hi_lo); * FIXME! We could make these do EEH handling if we really * wanted. Not clear if we do. */ -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { readsb(addr, dst, count); } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { readsw(addr, dst, count); } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { readsl(addr, dst, count); } diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c index ef9e2c97cbb7..0a0dff4e66de 100644 --- a/arch/sh/kernel/iomap.c +++ b/arch/sh/kernel/iomap.c @@ -8,31 +8,31 @@ #include #include -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { return readb(addr); } EXPORT_SYMBOL(ioread8); -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { return readw(addr); } EXPORT_SYMBOL(ioread16); -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { return be16_to_cpu(__raw_readw(addr)); } EXPORT_SYMBOL(ioread16be); -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { return readl(addr); } EXPORT_SYMBOL(ioread32); -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { return be32_to_cpu(__raw_readl(addr)); } @@ -74,7 +74,7 @@ EXPORT_SYMBOL(iowrite32be); * convert to CPU byte order. We write in "IO byte * order" (we also don't have IO barriers). */ -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) +static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); @@ -83,7 +83,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) } } -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) +static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); @@ -92,7 +92,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) } } -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) +static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); @@ -125,19 +125,19 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) } } -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insb(addr, dst, count); } EXPORT_SYMBOL(ioread8_rep); -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insw(addr, dst, count); } EXPORT_SYMBOL(ioread16_rep); -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { mmio_insl(addr, dst, count); } diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index c10a9318a4b7..53945ca5d785 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c @@ -679,7 +679,7 @@ pdc_receive(struct pdc_state *pdcs) /* read last_rx_curr from register once */ pdcs->last_rx_curr = - (ioread32(&pdcs->rxregs_64->status0) & + (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) & CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; do { diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index a5cacfe24a42..fd72d9088bdc 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -40,7 +40,7 @@ static int sh_clk_mstp_enable(struct clk *clk) { sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); if (clk->status_reg) { - unsigned int (*read)(void __iomem *addr); + unsigned int (*read)(const void __iomem *addr); int i; void __iomem *mapped_status = (phys_addr_t)clk->status_reg - (phys_addr_t)clk->enable_reg + clk->mapped_reg; diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 9d28a5e82f73..649224664969 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -26,14 +26,14 @@ * in the low address range. Architectures for which this is not * true can't use this generic implementation. */ -extern unsigned int ioread8(void __iomem *); -extern unsigned int ioread16(void __iomem *); -extern unsigned int ioread16be(void __iomem *); -extern unsigned int ioread32(void __iomem *); -extern unsigned int ioread32be(void __iomem *); +extern unsigned int ioread8(const void __iomem *); +extern unsigned int ioread16(const void __iomem *); +extern unsigned int ioread16be(const void __iomem *); +extern unsigned int ioread32(const void __iomem *); +extern unsigned int ioread32be(const void __iomem *); #ifdef CONFIG_64BIT -extern u64 ioread64(void __iomem *); -extern u64 ioread64be(void __iomem *); +extern u64 ioread64(const void __iomem *); +extern u64 ioread64be(const void __iomem *); #endif #ifdef readq @@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *); #define ioread64_hi_lo ioread64_hi_lo #define ioread64be_lo_hi ioread64be_lo_hi #define ioread64be_hi_lo ioread64be_hi_lo -extern u64 ioread64_lo_hi(void __iomem *addr); -extern u64 ioread64_hi_lo(void __iomem *addr); -extern u64 ioread64be_lo_hi(void __iomem *addr); -extern u64 ioread64be_hi_lo(void __iomem *addr); +extern u64 ioread64_lo_hi(const void __iomem *addr); +extern u64 ioread64_hi_lo(const void __iomem *addr); +extern u64 ioread64be_lo_hi(const void __iomem *addr); +extern u64 ioread64be_hi_lo(const void __iomem *addr); #endif extern void iowrite8(u8, void __iomem *); @@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); * memory across multiple ports, use "memcpy_toio()" * and friends. */ -extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); -extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); -extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count); extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index ae21b72cce85..f32522bb3aa5 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) #ifndef ioread64_hi_lo #define ioread64_hi_lo ioread64_hi_lo -static inline u64 ioread64_hi_lo(void __iomem *addr) +static inline u64 ioread64_hi_lo(const void __iomem *addr) { u32 low, high; @@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) #ifndef ioread64be_hi_lo #define ioread64be_hi_lo ioread64be_hi_lo -static inline u64 ioread64be_hi_lo(void __iomem *addr) +static inline u64 ioread64be_hi_lo(const void __iomem *addr) { u32 low, high; diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index faaa842dbdb9..448a21435dba 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) #ifndef ioread64_lo_hi #define ioread64_lo_hi ioread64_lo_hi -static inline u64 ioread64_lo_hi(void __iomem *addr) +static inline u64 ioread64_lo_hi(const void __iomem *addr) { u32 low, high; @@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) #ifndef ioread64be_lo_hi #define ioread64be_lo_hi ioread64be_lo_hi -static inline u64 ioread64be_lo_hi(void __iomem *addr) +static inline u64 ioread64be_lo_hi(const void __iomem *addr) { u32 low, high; diff --git a/lib/iomap.c b/lib/iomap.c index e909ab71e995..fbaa3e8f19d6 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -70,27 +70,27 @@ static void bad_io_access(unsigned long port, const char *access) #define mmio_read64be(addr) swab64(readq(addr)) #endif -unsigned int ioread8(void __iomem *addr) +unsigned int ioread8(const void __iomem *addr) { IO_COND(addr, return inb(port), return readb(addr)); return 0xff; } -unsigned int ioread16(void __iomem *addr) +unsigned int ioread16(const void __iomem *addr) { IO_COND(addr, return inw(port), return readw(addr)); return 0xffff; } -unsigned int ioread16be(void __iomem *addr) +unsigned int ioread16be(const void __iomem *addr) { IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr)); return 0xffff; } -unsigned int ioread32(void __iomem *addr) +unsigned int ioread32(const void __iomem *addr) { IO_COND(addr, return inl(port), return readl(addr)); return 0xffffffff; } -unsigned int ioread32be(void __iomem *addr) +unsigned int ioread32be(const void __iomem *addr) { IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr)); return 0xffffffff; @@ -142,26 +142,26 @@ static u64 pio_read64be_hi_lo(unsigned long port) return lo | (hi << 32); } -u64 ioread64_lo_hi(void __iomem *addr) +u64 ioread64_lo_hi(const void __iomem *addr) { IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr)); return 0xffffffffffffffffULL; } -u64 ioread64_hi_lo(void __iomem *addr) +u64 ioread64_hi_lo(const void __iomem *addr) { IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr)); return 0xffffffffffffffffULL; } -u64 ioread64be_lo_hi(void __iomem *addr) +u64 ioread64be_lo_hi(const void __iomem *addr) { IO_COND(addr, return pio_read64be_lo_hi(port), return mmio_read64be(addr)); return 0xffffffffffffffffULL; } -u64 ioread64be_hi_lo(void __iomem *addr) +u64 ioread64be_hi_lo(const void __iomem *addr) { IO_COND(addr, return pio_read64be_hi_lo(port), return mmio_read64be(addr)); @@ -275,7 +275,7 @@ EXPORT_SYMBOL(iowrite64be_hi_lo); * order" (we also don't have IO barriers). */ #ifndef mmio_insb -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) +static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) { while (--count >= 0) { u8 data = __raw_readb(addr); @@ -283,7 +283,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) dst++; } } -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) +static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) { while (--count >= 0) { u16 data = __raw_readw(addr); @@ -291,7 +291,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) dst++; } } -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) +static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) { while (--count >= 0) { u32 data = __raw_readl(addr); @@ -325,15 +325,15 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) } #endif -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count)); } -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count)); } -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) { IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count)); } -- cgit v1.2.3 From 4d2d4ce001f283ed8127173543b4cfb65641e357 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 4 Aug 2020 19:06:01 +0200 Subject: KVM: arm64: Drop type input from kvm_put_guest We can use typeof() to avoid the need for the type input. Suggested-by: Marc Zyngier Signed-off-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200804170604.42662-4-drjones@redhat.com --- arch/arm64/kvm/pvtime.c | 2 +- include/linux/kvm_host.h | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index 95f9580275b1..241ded7ee0ad 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -32,7 +32,7 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) steal_le = cpu_to_le64(steal); idx = srcu_read_lock(&kvm->srcu); offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); - kvm_put_guest(kvm, base + offset, steal_le, u64); + kvm_put_guest(kvm, base + offset, steal_le); srcu_read_unlock(&kvm->srcu, idx); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a23076765b4c..84371fb06209 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -749,25 +749,26 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); -#define __kvm_put_guest(kvm, gfn, offset, value, type) \ +#define __kvm_put_guest(kvm, gfn, offset, v) \ ({ \ unsigned long __addr = gfn_to_hva(kvm, gfn); \ - type __user *__uaddr = (type __user *)(__addr + offset); \ + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ int __ret = -EFAULT; \ \ if (!kvm_is_error_hva(__addr)) \ - __ret = put_user(value, __uaddr); \ + __ret = put_user(v, __uaddr); \ if (!__ret) \ mark_page_dirty(kvm, gfn); \ __ret; \ }) -#define kvm_put_guest(kvm, gpa, value, type) \ +#define kvm_put_guest(kvm, gpa, v) \ ({ \ gpa_t __gpa = gpa; \ struct kvm *__kvm = kvm; \ + \ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ - offset_in_page(__gpa), (value), type); \ + offset_in_page(__gpa), v); \ }) int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); -- cgit v1.2.3 From 53f985584e3c2ebe5f2455530fbf87a001528db8 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 4 Aug 2020 19:06:02 +0200 Subject: KVM: arm64: pvtime: Fix stolen time accounting across migration When updating the stolen time we should always read the current stolen time from the user provided memory, not from a kernel cache. If we use a cache then we'll end up resetting stolen time to zero on the first update after migration. Signed-off-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200804170604.42662-5-drjones@redhat.com --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/pvtime.c | 23 +++++++++-------------- include/linux/kvm_host.h | 20 ++++++++++++++++++++ 3 files changed, 29 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 65568b23868a..dd9c3b25aa1e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -368,7 +368,6 @@ struct kvm_vcpu_arch { /* Guest PV state */ struct { - u64 steal; u64 last_steal; gpa_t base; } steal; diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index 241ded7ee0ad..75234321d896 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -13,26 +13,22 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; + u64 base = vcpu->arch.steal.base; u64 last_steal = vcpu->arch.steal.last_steal; - u64 steal; - __le64 steal_le; - u64 offset; + u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); + u64 steal = 0; int idx; - u64 base = vcpu->arch.steal.base; if (base == GPA_INVALID) return; - /* Let's do the local bookkeeping */ - steal = vcpu->arch.steal.steal; - vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); - steal += vcpu->arch.steal.last_steal - last_steal; - vcpu->arch.steal.steal = steal; - - steal_le = cpu_to_le64(steal); idx = srcu_read_lock(&kvm->srcu); - offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); - kvm_put_guest(kvm, base + offset, steal_le); + if (!kvm_get_guest(kvm, base + offset, steal)) { + steal = le64_to_cpu(steal); + vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); + steal += vcpu->arch.steal.last_steal - last_steal; + kvm_put_guest(kvm, base + offset, cpu_to_le64(steal)); + } srcu_read_unlock(&kvm->srcu, idx); } @@ -66,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) * Start counting stolen time from the time the guest requests * the feature enabled. */ - vcpu->arch.steal.steal = 0; vcpu->arch.steal.last_steal = current->sched_info.run_delay; idx = srcu_read_lock(&kvm->srcu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 84371fb06209..05e3c2fb3ef7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -749,6 +749,26 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); +#define __kvm_get_guest(kvm, gfn, offset, v) \ +({ \ + unsigned long __addr = gfn_to_hva(kvm, gfn); \ + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ + int __ret = -EFAULT; \ + \ + if (!kvm_is_error_hva(__addr)) \ + __ret = get_user(v, __uaddr); \ + __ret; \ +}) + +#define kvm_get_guest(kvm, gpa, v) \ +({ \ + gpa_t __gpa = gpa; \ + struct kvm *__kvm = kvm; \ + \ + __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ + offset_in_page(__gpa), v); \ +}) + #define __kvm_put_guest(kvm, gfn, offset, v) \ ({ \ unsigned long __addr = gfn_to_hva(kvm, gfn); \ -- cgit v1.2.3