diff options
268 files changed, 10665 insertions, 6989 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 2db2cdf42d54..7eead5f97e02 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -119,6 +119,15 @@ Description: unique to allow association with event codes. Units after application of scale and offset are milliamps. +What: /sys/bus/iio/devices/iio:deviceX/in_powerY_raw +KernelVersion: 4.5 +Contact: linux-iio@vger.kernel.org +Description: + Raw (unscaled no bias removal etc.) power measurement from + channel Y. The number must always be specified and + unique to allow association with event codes. Units after + application of scale and offset are milliwatts. + What: /sys/bus/iio/devices/iio:deviceX/in_capacitanceY_raw KernelVersion: 3.2 Contact: linux-iio@vger.kernel.org diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt index 3223684a643b..552e7a83951d 100644 --- a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt +++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt @@ -11,6 +11,11 @@ Required properties: - atmel,min-sample-rate-hz: Minimum sampling rate, it depends on SoC. - atmel,max-sample-rate-hz: Maximum sampling rate, it depends on SoC. - atmel,startup-time-ms: Startup time expressed in ms, it depends on SoC. + - atmel,trigger-edge-type: One of possible edge types for the ADTRG hardware + trigger pin. When the specific edge type is detected, the conversion will + start. Possible values are rising, falling, or both. + This property uses the IRQ edge types values: IRQ_TYPE_EDGE_RISING , + IRQ_TYPE_EDGE_FALLING or IRQ_TYPE_EDGE_BOTH Example: @@ -25,4 +30,5 @@ adc: adc@fc030000 { atmel,startup-time-ms = <4>; vddana-supply = <&vdd_3v3_lp_reg>; vref-supply = <&vdd_3v3_lp_reg>; + atmel,trigger-edge-type = <IRQ_TYPE_EDGE_BOTH>; } diff --git a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt index 68c45cbbe3d9..64dc4843c180 100644 --- a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt +++ b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt @@ -12,6 +12,7 @@ for the Thermal Controller which holds a phandle to the AUXADC. Required properties: - compatible: Should be one of: - "mediatek,mt2701-auxadc": For MT2701 family of SoCs + - "mediatek,mt7622-auxadc": For MT7622 family of SoCs - "mediatek,mt8173-auxadc": For MT8173 family of SoCs - reg: Address range of the AUXADC unit. - clocks: Should contain a clock specifier for each entry in clock-names diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt index bcee71f808d0..bf2925c671c6 100644 --- a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt +++ b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt @@ -10,7 +10,9 @@ current. Contents of a stm32 dac root node: ----------------------------------- Required properties: -- compatible: Must be "st,stm32h7-dac-core". +- compatible: Should be one of: + "st,stm32f4-dac-core" + "st,stm32h7-dac-core" - reg: Offset and length of the device's register set. - clocks: Must contain an entry for pclk (which feeds the peripheral bus interface) diff --git a/Documentation/devicetree/bindings/iio/humidity/hdc100x.txt b/Documentation/devicetree/bindings/iio/humidity/hdc100x.txt new file mode 100644 index 000000000000..c52333bdfd19 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/humidity/hdc100x.txt @@ -0,0 +1,17 @@ +* HDC100x temperature + humidity sensors + +Required properties: + - compatible: Should contain one of the following: + ti,hdc1000 + ti,hdc1008 + ti,hdc1010 + ti,hdc1050 + ti,hdc1080 + - reg: i2c address of the sensor + +Example: + +hdc100x@40 { + compatible = "ti,hdc1000"; + reg = <0x40>; +}; diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt index b20ab9c12080..10adeb0d703d 100644 --- a/Documentation/devicetree/bindings/iio/humidity/hts221.txt +++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt @@ -5,9 +5,18 @@ Required properties: - reg: i2c address of the sensor / spi cs line Optional properties: +- drive-open-drain: the interrupt/data ready line will be configured + as open drain, which is useful if several sensors share the same + interrupt line. This is a boolean property. + If the requested interrupt is configured as IRQ_TYPE_LEVEL_HIGH or + IRQ_TYPE_EDGE_RISING a pull-down resistor is needed to drive the line + when it is not active, whereas a pull-up one is needed when interrupt + line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING. + Refer to pinctrl/pinctrl-bindings.txt for the property description. - interrupt-parent: should be the phandle for the interrupt controller - interrupts: interrupt mapping for IRQ. It should be configured with - flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING. + flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or + IRQ_TYPE_EDGE_FALLING. Refer to interrupt-controller/interrupts.txt for generic interrupt client node bindings. diff --git a/Documentation/devicetree/bindings/iio/humidity/htu21.txt b/Documentation/devicetree/bindings/iio/humidity/htu21.txt new file mode 100644 index 000000000000..97d79636f7ae --- /dev/null +++ b/Documentation/devicetree/bindings/iio/humidity/htu21.txt @@ -0,0 +1,13 @@ +*HTU21 - Measurement-Specialties htu21 temperature & humidity sensor and humidity part of MS8607 sensor + +Required properties: + + - compatible: should be "meas,htu21" or "meas,ms8607-humidity" + - reg: I2C address of the sensor + +Example: + +htu21@40 { + compatible = "meas,htu21"; + reg = <0x40>; +}; diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt index 6f28ff55f3ec..1ff1af799c76 100644 --- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt +++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt @@ -11,6 +11,14 @@ Required properties: Optional properties: - st,drdy-int-pin: the pin on the package that will be used to signal "data ready" (valid values: 1 or 2). +- drive-open-drain: the interrupt/data ready line will be configured + as open drain, which is useful if several sensors share the same + interrupt line. This is a boolean property. + (This binding is taken from pinctrl/pinctrl-bindings.txt) + If the requested interrupt is configured as IRQ_TYPE_LEVEL_HIGH or + IRQ_TYPE_EDGE_RISING a pull-down resistor is needed to drive the line + when it is not active, whereas a pull-up one is needed when interrupt + line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING. - interrupt-parent: should be the phandle for the interrupt controller - interrupts: interrupt mapping for IRQ. It should be configured with flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or diff --git a/Documentation/devicetree/bindings/iio/pressure/ms5637.txt b/Documentation/devicetree/bindings/iio/pressure/ms5637.txt new file mode 100644 index 000000000000..1f43ffa068ac --- /dev/null +++ b/Documentation/devicetree/bindings/iio/pressure/ms5637.txt @@ -0,0 +1,17 @@ +* MS5637 - Measurement-Specialties MS5637, MS5805, MS5837 and MS8607 pressure & temperature sensor + +Required properties: + + -compatible: should be one of the following + meas,ms5637 + meas,ms5805 + meas,ms5837 + meas,ms8607-temppressure + -reg: I2C address of the sensor + +Example: + +ms5637@76 { + compatible = "meas,ms5637"; + reg = <0x76>; +}; diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt index eaa8fbba34e2..1e305f61f3df 100644 --- a/Documentation/devicetree/bindings/iio/st-sensors.txt +++ b/Documentation/devicetree/bindings/iio/st-sensors.txt @@ -45,6 +45,7 @@ Accelerometers: - st,lis2dh12-accel - st,h3lis331dl-accel - st,lng2dm-accel +- st,lis3l02dq Gyroscopes: - st,l3g4200d-gyro @@ -52,6 +53,7 @@ Gyroscopes: - st,lsm330dl-gyro - st,lsm330dlc-gyro - st,l3gd20-gyro +- st,l3gd20h-gyro - st,l3g4is-gyro - st,lsm330-gyro - st,lsm9ds0-gyro diff --git a/Documentation/devicetree/bindings/iio/temperature/tsys01.txt b/Documentation/devicetree/bindings/iio/temperature/tsys01.txt new file mode 100644 index 000000000000..0d5cc5595d0c --- /dev/null +++ b/Documentation/devicetree/bindings/iio/temperature/tsys01.txt @@ -0,0 +1,19 @@ +* TSYS01 - Measurement Specialties temperature sensor + +Required properties: + + - compatible: should be "meas,tsys01" + - reg: I2C address of the sensor (changeable via CSB pin) + + ------------------------ + | CSB | Device Address | + ------------------------ + 1 0x76 + 0 0x77 + +Example: + +tsys01@76 { + compatible = "meas,tsys01"; + reg = <0x76>; +}; diff --git a/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt b/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt index 55a653d15303..6abc755dbf94 100644 --- a/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt +++ b/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt @@ -14,7 +14,7 @@ Example: compatible = "st,stm32-timers"; reg = <0x40010000 0x400>; clocks = <&rcc 0 160>; - clock-names = "clk_int"; + clock-names = "int"; timer@0 { compatible = "st,stm32-timer-trigger"; diff --git a/Documentation/iio/ep93xx_adc.txt b/Documentation/iio/ep93xx_adc.txt new file mode 100644 index 000000000000..23053e7817bd --- /dev/null +++ b/Documentation/iio/ep93xx_adc.txt @@ -0,0 +1,29 @@ +Cirrus Logic EP93xx ADC driver. + +1. Overview + +The driver is intended to work on both low-end (EP9301, EP9302) devices with +5-channel ADC and high-end (EP9307, EP9312, EP9315) devices with 10-channel +touchscreen/ADC module. + +2. Channel numbering + +Numbering scheme for channels 0..4 is defined in EP9301 and EP9302 datasheets. +EP9307, EP9312 and EP9312 have 3 channels more (total 8), but the numbering is +not defined. So the last three are numbered randomly, let's say. + +Assuming ep93xx_adc is IIO device0, you'd find the following entries under +/sys/bus/iio/devices/iio:device0/: + + +-----------------+---------------+ + | sysfs entry | ball/pin name | + +-----------------+---------------+ + | in_voltage0_raw | YM | + | in_voltage1_raw | SXP | + | in_voltage2_raw | SXM | + | in_voltage3_raw | SYP | + | in_voltage4_raw | SYM | + | in_voltage5_raw | XP | + | in_voltage6_raw | XM | + | in_voltage7_raw | YP | + +-----------------+---------------+ diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c index 8ca8041267ef..f85014fbaa12 100644 --- a/drivers/iio/accel/bmc150-accel-i2c.c +++ b/drivers/iio/accel/bmc150-accel-i2c.c @@ -64,6 +64,7 @@ static const struct acpi_device_id bmc150_accel_acpi_match[] = { {"BMA250E", bma250e}, {"BMA222E", bma222e}, {"BMA0280", bma280}, + {"BOSC0200"}, { }, }; MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match); diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c index 537cfa8b6edf..c0c1620d2a2f 100644 --- a/drivers/iio/accel/da311.c +++ b/drivers/iio/accel/da311.c @@ -139,7 +139,7 @@ static int da311_register_mask_write(struct i2c_client *client, u16 addr, /* Init sequence taken from the android driver */ static int da311_reset(struct i2c_client *client) { - const struct { + static const struct { u16 addr; u8 mask; u8 data; diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index cb1d83fa19a0..39ab210c44f6 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -36,7 +36,7 @@ #define SCA3000_LOCKED BIT(5) #define SCA3000_EEPROM_CS_ERROR BIT(1) #define SCA3000_SPI_FRAME_ERROR BIT(0) - + /* All reads done using register decrement so no need to directly access LSBs */ #define SCA3000_REG_X_MSB_ADDR 0x05 #define SCA3000_REG_Y_MSB_ADDR 0x07 @@ -74,7 +74,7 @@ #define SCA3000_REG_INT_STATUS_ADDR 0x16 #define SCA3000_REG_INT_STATUS_THREE_QUARTERS BIT(7) #define SCA3000_REG_INT_STATUS_HALF BIT(6) - + #define SCA3000_INT_STATUS_FREE_FALL BIT(3) #define SCA3000_INT_STATUS_Y_TRIGGER BIT(2) #define SCA3000_INT_STATUS_X_TRIGGER BIT(1) @@ -124,7 +124,7 @@ #define SCA3000_REG_INT_MASK_ADDR 0x21 #define SCA3000_REG_INT_MASK_PROT_MASK 0x1C - + #define SCA3000_REG_INT_MASK_RING_THREE_QUARTER BIT(7) #define SCA3000_REG_INT_MASK_RING_HALF BIT(6) diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h index 3ad44ce7ae82..0fe521609a3a 100644 --- a/drivers/iio/accel/st_accel.h +++ b/drivers/iio/accel/st_accel.h @@ -29,10 +29,13 @@ enum st_accel_type { LIS2DH12, LIS3L02DQ, LNG2DM, + H3LIS331DL, + LIS331DL, + LIS3LV02DL, ST_ACCEL_MAX, }; -#define H3LIS331DL_DRIVER_NAME "h3lis331dl_accel" +#define H3LIS331DL_ACCEL_DEV_NAME "h3lis331dl_accel" #define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel" #define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel" #define LIS3DH_ACCEL_DEV_NAME "lis3dh" diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index e44f62bf9caa..1e0eea110fa9 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -464,7 +464,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .wai = 0x32, .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, .sensors_supported = { - [0] = H3LIS331DL_DRIVER_NAME, + [0] = H3LIS331DL_ACCEL_DEV_NAME, }, .ch = (struct iio_chan_spec *)st_accel_12bit_channels, .odr = { diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c index 543f0ad7fd7e..18cafb9f2468 100644 --- a/drivers/iio/accel/st_accel_i2c.c +++ b/drivers/iio/accel/st_accel_i2c.c @@ -84,7 +84,7 @@ static const struct of_device_id st_accel_of_match[] = { }, { .compatible = "st,h3lis331dl-accel", - .data = H3LIS331DL_DRIVER_NAME, + .data = H3LIS331DL_ACCEL_DEV_NAME, }, { .compatible = "st,lis3l02dq", @@ -126,6 +126,9 @@ static const struct i2c_device_id st_accel_id_table[] = { { LIS2DH12_ACCEL_DEV_NAME, LIS2DH12 }, { LIS3L02DQ_ACCEL_DEV_NAME, LIS3L02DQ }, { LNG2DM_ACCEL_DEV_NAME, LNG2DM }, + { H3LIS331DL_ACCEL_DEV_NAME, H3LIS331DL }, + { LIS331DL_ACCEL_DEV_NAME, LIS331DL }, + { LIS3LV02DL_ACCEL_DEV_NAME, LIS3LV02DL }, {}, }; MODULE_DEVICE_TABLE(i2c, st_accel_id_table); @@ -144,7 +147,8 @@ static int st_accel_i2c_probe(struct i2c_client *client, adata = iio_priv(indio_dev); if (client->dev.of_node) { - st_sensors_of_i2c_probe(client, st_accel_of_match); + st_sensors_of_name_probe(&client->dev, st_accel_of_match, + client->name, sizeof(client->name)); } else if (ACPI_HANDLE(&client->dev)) { ret = st_sensors_match_acpi_device(&client->dev); if ((ret < 0) || (ret >= ST_ACCEL_MAX)) diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c index 1a867f5563a4..915fa49085f7 100644 --- a/drivers/iio/accel/st_accel_spi.c +++ b/drivers/iio/accel/st_accel_spi.c @@ -18,6 +18,77 @@ #include <linux/iio/common/st_sensors_spi.h> #include "st_accel.h" +#ifdef CONFIG_OF +/* + * For new single-chip sensors use <device_name> as compatible string. + * For old single-chip devices keep <device_name>-accel to maintain + * compatibility + */ +static const struct of_device_id st_accel_of_match[] = { + { + /* An older compatible */ + .compatible = "st,lis302dl-spi", + .data = LIS3LV02DL_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lis3lv02dl-accel", + .data = LIS3LV02DL_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lis3dh-accel", + .data = LIS3DH_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lsm330d-accel", + .data = LSM330D_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lsm330dl-accel", + .data = LSM330DL_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lsm330dlc-accel", + .data = LSM330DLC_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lis331dlh-accel", + .data = LIS331DLH_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lsm330-accel", + .data = LSM330_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lsm303agr-accel", + .data = LSM303AGR_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lis2dh12-accel", + .data = LIS2DH12_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lis3l02dq", + .data = LIS3L02DQ_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lng2dm-accel", + .data = LNG2DM_ACCEL_DEV_NAME, + }, + { + .compatible = "st,h3lis331dl-accel", + .data = H3LIS331DL_ACCEL_DEV_NAME, + }, + { + .compatible = "st,lis331dl-accel", + .data = LIS331DL_ACCEL_DEV_NAME, + }, + {} +}; +MODULE_DEVICE_TABLE(of, st_accel_of_match); +#else +#define st_accel_of_match NULL +#endif + static int st_accel_spi_probe(struct spi_device *spi) { struct iio_dev *indio_dev; @@ -30,6 +101,8 @@ static int st_accel_spi_probe(struct spi_device *spi) adata = iio_priv(indio_dev); + st_sensors_of_name_probe(&spi->dev, st_accel_of_match, + spi->modalias, sizeof(spi->modalias)); st_sensors_spi_configure(indio_dev, spi, adata); err = st_accel_common_probe(indio_dev); @@ -57,22 +130,17 @@ static const struct spi_device_id st_accel_id_table[] = { { LIS2DH12_ACCEL_DEV_NAME }, { LIS3L02DQ_ACCEL_DEV_NAME }, { LNG2DM_ACCEL_DEV_NAME }, + { H3LIS331DL_ACCEL_DEV_NAME }, + { LIS331DL_ACCEL_DEV_NAME }, + { LIS3LV02DL_ACCEL_DEV_NAME }, {}, }; MODULE_DEVICE_TABLE(spi, st_accel_id_table); -#ifdef CONFIG_OF -static const struct of_device_id lis302dl_spi_dt_ids[] = { - { .compatible = "st,lis302dl-spi" }, - {} -}; -MODULE_DEVICE_TABLE(of, lis302dl_spi_dt_ids); -#endif - static struct spi_driver st_accel_driver = { .driver = { .name = "st-accel-spi", - .of_match_table = of_match_ptr(lis302dl_spi_dt_ids), + .of_match_table = of_match_ptr(st_accel_of_match), }, .probe = st_accel_spi_probe, .remove = st_accel_spi_remove, diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 614fa41559b1..e4eeebac5297 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -239,6 +239,15 @@ config DA9150_GPADC To compile this driver as a module, choose M here: the module will be called berlin2-adc. +config DLN2_ADC + tristate "Diolan DLN-2 ADC driver support" + depends on MFD_DLN2 + help + Say yes here to build support for Diolan DLN-2 ADC. + + This driver can also be built as a module. If so, the module will be + called adc_dln2. + config ENVELOPE_DETECTOR tristate "Envelope detector using a DAC and a comparator" depends on OF @@ -249,6 +258,17 @@ config ENVELOPE_DETECTOR To compile this driver as a module, choose M here: the module will be called envelope-detector. +config EP93XX_ADC + tristate "Cirrus Logic EP93XX ADC driver" + depends on ARCH_EP93XX + help + Driver for the ADC module on the EP93XX series of SoC from Cirrus Logic. + It's recommended to switch on CONFIG_HIGH_RES_TIMERS option, in this + case driver will reduce its CPU usage by 90% in some use cases. + + To compile this driver as a module, choose M here: the module will be + called ep93xx_adc. + config EXYNOS_ADC tristate "Exynos ADC driver support" depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST) @@ -322,7 +342,7 @@ config INA2XX_ADC This driver is mutually exclusive with the HWMON version. config IMX7D_ADC - tristate "IMX7D ADC driver" + tristate "Freescale IMX7D ADC driver" depends on ARCH_MXC || COMPILE_TEST depends on HAS_IOMEM help @@ -362,6 +382,16 @@ config LPC32XX_ADC activate only one via device tree selection. Provides direct access via sysfs. +config LTC2471 + tristate "Linear Technology LTC2471 and LTC2473 ADC driver" + depends on I2C + help + Say yes here to build support for Linear Technology LTC2471 and + LTC2473 16-bit I2C ADC. + + This driver can also be built as a module. If so, the module will + be called ltc2471. + config LTC2485 tristate "Linear Technology LTC2485 ADC driver" depends on I2C diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index b546736a5541..9874e05f52d7 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -24,7 +24,9 @@ obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o obj-$(CONFIG_CPCAP_ADC) += cpcap-adc.o obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o +obj-$(CONFIG_DLN2_ADC) += dln2-adc.o obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o +obj-$(CONFIG_EP93XX_ADC) += ep93xx_adc.o obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o obj-$(CONFIG_HI8435) += hi8435.o @@ -34,6 +36,7 @@ obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o +obj-$(CONFIG_LTC2471) += ltc2471.o obj-$(CONFIG_LTC2485) += ltc2485.o obj-$(CONFIG_LTC2497) += ltc2497.o obj-$(CONFIG_MAX1027) += max1027.o diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c index 75cca42b6e70..ce45037295d8 100644 --- a/drivers/iio/adc/ad7766.c +++ b/drivers/iio/adc/ad7766.c @@ -103,8 +103,7 @@ static int ad7766_preenable(struct iio_dev *indio_dev) return ret; } - if (ad7766->pd_gpio) - gpiod_set_value(ad7766->pd_gpio, 0); + gpiod_set_value(ad7766->pd_gpio, 0); return 0; } @@ -113,8 +112,7 @@ static int ad7766_postdisable(struct iio_dev *indio_dev) { struct ad7766 *ad7766 = iio_priv(indio_dev); - if (ad7766->pd_gpio) - gpiod_set_value(ad7766->pd_gpio, 1); + gpiod_set_value(ad7766->pd_gpio, 1); /* * The PD pin is synchronous to the clock, so give it some time to diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index e10dca3ed74b..bc5b38e3a147 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -25,6 +25,11 @@ #include <linux/wait.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> +#include <linux/iio/buffer.h> +#include <linux/iio/trigger.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/pinctrl/consumer.h> #include <linux/regulator/consumer.h> /* Control Register */ @@ -132,6 +137,17 @@ #define AT91_SAMA5D2_PRESSR 0xbc /* Trigger Register */ #define AT91_SAMA5D2_TRGR 0xc0 +/* Mask for TRGMOD field of TRGR register */ +#define AT91_SAMA5D2_TRGR_TRGMOD_MASK GENMASK(2, 0) +/* No trigger, only software trigger can start conversions */ +#define AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER 0 +/* Trigger Mode external trigger rising edge */ +#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE 1 +/* Trigger Mode external trigger falling edge */ +#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL 2 +/* Trigger Mode external trigger any edge */ +#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY 3 + /* Correction Select Register */ #define AT91_SAMA5D2_COSR 0xd0 /* Correction Value Register */ @@ -145,14 +161,29 @@ /* Version Register */ #define AT91_SAMA5D2_VERSION 0xfc +#define AT91_SAMA5D2_HW_TRIG_CNT 3 +#define AT91_SAMA5D2_SINGLE_CHAN_CNT 12 +#define AT91_SAMA5D2_DIFF_CHAN_CNT 6 + +/* + * Maximum number of bytes to hold conversion from all channels + * plus the timestamp + */ +#define AT91_BUFFER_MAX_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \ + AT91_SAMA5D2_DIFF_CHAN_CNT) * 2 + 8) + +#define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2) + #define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \ { \ .type = IIO_VOLTAGE, \ .channel = num, \ .address = addr, \ + .scan_index = num, \ .scan_type = { \ .sign = 'u', \ .realbits = 12, \ + .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ @@ -168,9 +199,11 @@ .channel = num, \ .channel2 = num2, \ .address = addr, \ + .scan_index = num + AT91_SAMA5D2_SINGLE_CHAN_CNT, \ .scan_type = { \ .sign = 's', \ .realbits = 12, \ + .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ @@ -188,6 +221,12 @@ struct at91_adc_soc_info { unsigned max_sample_rate; }; +struct at91_adc_trigger { + char *name; + unsigned int trgmod_value; + unsigned int edge_type; +}; + struct at91_adc_state { void __iomem *base; int irq; @@ -195,11 +234,14 @@ struct at91_adc_state { struct regulator *reg; struct regulator *vref; int vref_uv; + struct iio_trigger *trig; + const struct at91_adc_trigger *selected_trig; const struct iio_chan_spec *chan; bool conversion_done; u32 conversion_value; struct at91_adc_soc_info soc_info; wait_queue_head_t wq_data_available; + u16 buffer[AT91_BUFFER_MAX_HWORDS]; /* * lock to prevent concurrent 'single conversion' requests through * sysfs. @@ -207,6 +249,24 @@ struct at91_adc_state { struct mutex lock; }; +static const struct at91_adc_trigger at91_adc_trigger_list[] = { + { + .name = "external_rising", + .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE, + .edge_type = IRQ_TYPE_EDGE_RISING, + }, + { + .name = "external_falling", + .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL, + .edge_type = IRQ_TYPE_EDGE_FALLING, + }, + { + .name = "external_any", + .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY, + .edge_type = IRQ_TYPE_EDGE_BOTH, + }, +}; + static const struct iio_chan_spec at91_adc_channels[] = { AT91_SAMA5D2_CHAN_SINGLE(0, 0x50), AT91_SAMA5D2_CHAN_SINGLE(1, 0x54), @@ -226,12 +286,132 @@ static const struct iio_chan_spec at91_adc_channels[] = { AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68), AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70), AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78), + IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_SINGLE_CHAN_CNT + + AT91_SAMA5D2_DIFF_CHAN_CNT + 1), +}; + +static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) +{ + struct iio_dev *indio = iio_trigger_get_drvdata(trig); + struct at91_adc_state *st = iio_priv(indio); + u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR); + u8 bit; + + /* clear TRGMOD */ + status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK; + + if (state) + status |= st->selected_trig->trgmod_value; + + /* set/unset hw trigger */ + at91_adc_writel(st, AT91_SAMA5D2_TRGR, status); + + for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { + struct iio_chan_spec const *chan = indio->channels + bit; + + if (state) { + at91_adc_writel(st, AT91_SAMA5D2_CHER, + BIT(chan->channel)); + at91_adc_writel(st, AT91_SAMA5D2_IER, + BIT(chan->channel)); + } else { + at91_adc_writel(st, AT91_SAMA5D2_IDR, + BIT(chan->channel)); + at91_adc_writel(st, AT91_SAMA5D2_CHDR, + BIT(chan->channel)); + } + } + + return 0; +} + +static int at91_adc_reenable_trigger(struct iio_trigger *trig) +{ + struct iio_dev *indio = iio_trigger_get_drvdata(trig); + struct at91_adc_state *st = iio_priv(indio); + + enable_irq(st->irq); + + /* Needed to ACK the DRDY interruption */ + at91_adc_readl(st, AT91_SAMA5D2_LCDR); + return 0; +} + +static const struct iio_trigger_ops at91_adc_trigger_ops = { + .owner = THIS_MODULE, + .set_trigger_state = &at91_adc_configure_trigger, + .try_reenable = &at91_adc_reenable_trigger, }; +static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio, + char *trigger_name) +{ + struct iio_trigger *trig; + int ret; + + trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name, + indio->id, trigger_name); + if (!trig) + return NULL; + + trig->dev.parent = indio->dev.parent; + iio_trigger_set_drvdata(trig, indio); + trig->ops = &at91_adc_trigger_ops; + + ret = devm_iio_trigger_register(&indio->dev, trig); + if (ret) + return ERR_PTR(ret); + + return trig; +} + +static int at91_adc_trigger_init(struct iio_dev *indio) +{ + struct at91_adc_state *st = iio_priv(indio); + + st->trig = at91_adc_allocate_trigger(indio, st->selected_trig->name); + if (IS_ERR(st->trig)) { + dev_err(&indio->dev, + "could not allocate trigger\n"); + return PTR_ERR(st->trig); + } + + return 0; +} + +static irqreturn_t at91_adc_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio = pf->indio_dev; + struct at91_adc_state *st = iio_priv(indio); + int i = 0; + u8 bit; + + for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { + struct iio_chan_spec const *chan = indio->channels + bit; + + st->buffer[i] = at91_adc_readl(st, chan->address); + i++; + } + + iio_push_to_buffers_with_timestamp(indio, st->buffer, pf->timestamp); + + iio_trigger_notify_done(indio->trig); + + return IRQ_HANDLED; +} + +static int at91_adc_buffer_init(struct iio_dev *indio) +{ + return devm_iio_triggered_buffer_setup(&indio->dev, indio, + &iio_pollfunc_store_time, + &at91_adc_trigger_handler, NULL); +} + static unsigned at91_adc_startup_time(unsigned startup_time_min, unsigned adc_clk_khz) { - const unsigned startup_lookup[] = { + static const unsigned int startup_lookup[] = { 0, 8, 16, 24, 64, 80, 96, 112, 512, 576, 640, 704, @@ -293,14 +473,18 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private) u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR); u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR); - if (status & imr) { + if (!(status & imr)) + return IRQ_NONE; + + if (iio_buffer_enabled(indio)) { + disable_irq_nosync(irq); + iio_trigger_poll(indio->trig); + } else { st->conversion_value = at91_adc_readl(st, st->chan->address); st->conversion_done = true; wake_up_interruptible(&st->wq_data_available); - return IRQ_HANDLED; } - - return IRQ_NONE; + return IRQ_HANDLED; } static int at91_adc_read_raw(struct iio_dev *indio_dev, @@ -313,6 +497,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: + /* we cannot use software trigger if hw trigger enabled */ + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + mutex_lock(&st->lock); st->chan = chan; @@ -344,6 +533,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev, at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel)); mutex_unlock(&st->lock); + + iio_device_release_direct_mode(indio_dev); return ret; case IIO_CHAN_INFO_SCALE: @@ -386,12 +577,27 @@ static const struct iio_info at91_adc_info = { .driver_module = THIS_MODULE, }; +static void at91_adc_hw_init(struct at91_adc_state *st) +{ + at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST); + at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff); + /* + * Transfer field must be set to 2 according to the datasheet and + * allows different analog settings for each channel. + */ + at91_adc_writel(st, AT91_SAMA5D2_MR, + AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH); + + at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate); +} + static int at91_adc_probe(struct platform_device *pdev) { struct iio_dev *indio_dev; struct at91_adc_state *st; struct resource *res; - int ret; + int ret, i; + u32 edge_type; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); if (!indio_dev) @@ -432,6 +638,27 @@ static int at91_adc_probe(struct platform_device *pdev) return ret; } + ret = of_property_read_u32(pdev->dev.of_node, + "atmel,trigger-edge-type", &edge_type); + if (ret) { + dev_err(&pdev->dev, + "invalid or missing value for atmel,trigger-edge-type\n"); + return ret; + } + + st->selected_trig = NULL; + + for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++) + if (at91_adc_trigger_list[i].edge_type == edge_type) { + st->selected_trig = &at91_adc_trigger_list[i]; + break; + } + + if (!st->selected_trig) { + dev_err(&pdev->dev, "invalid external trigger edge value\n"); + return -EINVAL; + } + init_waitqueue_head(&st->wq_data_available); mutex_init(&st->lock); @@ -482,16 +709,7 @@ static int at91_adc_probe(struct platform_device *pdev) goto vref_disable; } - at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST); - at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff); - /* - * Transfer field must be set to 2 according to the datasheet and - * allows different analog settings for each channel. - */ - at91_adc_writel(st, AT91_SAMA5D2_MR, - AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH); - - at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate); + at91_adc_hw_init(st); ret = clk_prepare_enable(st->per_clk); if (ret) @@ -499,10 +717,25 @@ static int at91_adc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, indio_dev); + ret = at91_adc_buffer_init(indio_dev); + if (ret < 0) { + dev_err(&pdev->dev, "couldn't initialize the buffer.\n"); + goto per_clk_disable_unprepare; + } + + ret = at91_adc_trigger_init(indio_dev); + if (ret < 0) { + dev_err(&pdev->dev, "couldn't setup the triggers.\n"); + goto per_clk_disable_unprepare; + } + ret = iio_device_register(indio_dev); if (ret < 0) goto per_clk_disable_unprepare; + dev_info(&pdev->dev, "setting up trigger as %s\n", + st->selected_trig->name); + dev_info(&pdev->dev, "version: %x\n", readl_relaxed(st->base + AT91_SAMA5D2_VERSION)); @@ -532,6 +765,69 @@ static int at91_adc_remove(struct platform_device *pdev) return 0; } +static __maybe_unused int at91_adc_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = + platform_get_drvdata(to_platform_device(dev)); + struct at91_adc_state *st = iio_priv(indio_dev); + + /* + * Do a sofware reset of the ADC before we go to suspend. + * this will ensure that all pins are free from being muxed by the ADC + * and can be used by for other devices. + * Otherwise, ADC will hog them and we can't go to suspend mode. + */ + at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST); + + clk_disable_unprepare(st->per_clk); + regulator_disable(st->vref); + regulator_disable(st->reg); + + return pinctrl_pm_select_sleep_state(dev); +} + +static __maybe_unused int at91_adc_resume(struct device *dev) +{ + struct iio_dev *indio_dev = + platform_get_drvdata(to_platform_device(dev)); + struct at91_adc_state *st = iio_priv(indio_dev); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + goto resume_failed; + + ret = regulator_enable(st->reg); + if (ret) + goto resume_failed; + + ret = regulator_enable(st->vref); + if (ret) + goto reg_disable_resume; + + ret = clk_prepare_enable(st->per_clk); + if (ret) + goto vref_disable_resume; + + at91_adc_hw_init(st); + + /* reconfiguring trigger hardware state */ + if (iio_buffer_enabled(indio_dev)) + at91_adc_configure_trigger(st->trig, true); + + return 0; + +vref_disable_resume: + regulator_disable(st->vref); +reg_disable_resume: + regulator_disable(st->reg); +resume_failed: + dev_err(&indio_dev->dev, "failed to resume\n"); + return ret; +} + +static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume); + static const struct of_device_id at91_adc_dt_match[] = { { .compatible = "atmel,sama5d2-adc", @@ -547,6 +843,7 @@ static struct platform_driver at91_adc_driver = { .driver = { .name = "at91-sama5d2_adc", .of_match_table = at91_adc_dt_match, + .pm = &at91_adc_pm_ops, }, }; module_platform_driver(at91_adc_driver) diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 34b928cefeed..15109728cae7 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -799,7 +799,7 @@ static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz) * For sama5d3x and at91sam9x5, the formula changes to: * Startup Time = <lookup_table_value> / ADC Clock */ - const int startup_lookup[] = { + static const int startup_lookup[] = { 0, 8, 16, 24, 64, 80, 96, 112, 512, 576, 640, 704, diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c new file mode 100644 index 000000000000..ab8d6aed5085 --- /dev/null +++ b/drivers/iio/adc/dln2-adc.c @@ -0,0 +1,722 @@ +/* + * Driver for the Diolan DLN-2 USB-ADC adapter + * + * Copyright (c) 2017 Jack Andersen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/mfd/dln2.h> + +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/trigger.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/iio/buffer.h> +#include <linux/iio/kfifo_buf.h> + +#define DLN2_ADC_MOD_NAME "dln2-adc" + +#define DLN2_ADC_ID 0x06 + +#define DLN2_ADC_GET_CHANNEL_COUNT DLN2_CMD(0x01, DLN2_ADC_ID) +#define DLN2_ADC_ENABLE DLN2_CMD(0x02, DLN2_ADC_ID) +#define DLN2_ADC_DISABLE DLN2_CMD(0x03, DLN2_ADC_ID) +#define DLN2_ADC_CHANNEL_ENABLE DLN2_CMD(0x05, DLN2_ADC_ID) +#define DLN2_ADC_CHANNEL_DISABLE DLN2_CMD(0x06, DLN2_ADC_ID) +#define DLN2_ADC_SET_RESOLUTION DLN2_CMD(0x08, DLN2_ADC_ID) +#define DLN2_ADC_CHANNEL_GET_VAL DLN2_CMD(0x0A, DLN2_ADC_ID) +#define DLN2_ADC_CHANNEL_GET_ALL_VAL DLN2_CMD(0x0B, DLN2_ADC_ID) +#define DLN2_ADC_CHANNEL_SET_CFG DLN2_CMD(0x0C, DLN2_ADC_ID) +#define DLN2_ADC_CHANNEL_GET_CFG DLN2_CMD(0x0D, DLN2_ADC_ID) +#define DLN2_ADC_CONDITION_MET_EV DLN2_CMD(0x10, DLN2_ADC_ID) + +#define DLN2_ADC_EVENT_NONE 0 +#define DLN2_ADC_EVENT_BELOW 1 +#define DLN2_ADC_EVENT_LEVEL_ABOVE 2 +#define DLN2_ADC_EVENT_OUTSIDE 3 +#define DLN2_ADC_EVENT_INSIDE 4 +#define DLN2_ADC_EVENT_ALWAYS 5 + +#define DLN2_ADC_MAX_CHANNELS 8 +#define DLN2_ADC_DATA_BITS 10 + +/* + * Plays similar role to iio_demux_table in subsystem core; except allocated + * in a fixed 8-element array. + */ +struct dln2_adc_demux_table { + unsigned int from; + unsigned int to; + unsigned int length; +}; + +struct dln2_adc { + struct platform_device *pdev; + struct iio_chan_spec iio_channels[DLN2_ADC_MAX_CHANNELS + 1]; + int port, trigger_chan; + struct iio_trigger *trig; + struct mutex mutex; + /* Cached sample period in milliseconds */ + unsigned int sample_period; + /* Demux table */ + unsigned int demux_count; + struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS]; + /* Precomputed timestamp padding offset and length */ + unsigned int ts_pad_offset, ts_pad_length; +}; + +struct dln2_adc_port_chan { + u8 port; + u8 chan; +}; + +struct dln2_adc_get_all_vals { + __le16 channel_mask; + __le16 values[DLN2_ADC_MAX_CHANNELS]; +}; + +static void dln2_adc_add_demux(struct dln2_adc *dln2, + unsigned int in_loc, unsigned int out_loc, + unsigned int length) +{ + struct dln2_adc_demux_table *p = dln2->demux_count ? + &dln2->demux[dln2->demux_count - 1] : NULL; + + if (p && p->from + p->length == in_loc && + p->to + p->length == out_loc) { + p->length += length; + } else if (dln2->demux_count < DLN2_ADC_MAX_CHANNELS) { + p = &dln2->demux[dln2->demux_count++]; + p->from = in_loc; + p->to = out_loc; + p->length = length; + } +} + +static void dln2_adc_update_demux(struct dln2_adc *dln2) +{ + int in_ind = -1, out_ind; + unsigned int in_loc = 0, out_loc = 0; + struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev); + + /* Clear out any old demux */ + dln2->demux_count = 0; + + /* Optimize all 8-channels case */ + if (indio_dev->masklength && + (*indio_dev->active_scan_mask & 0xff) == 0xff) { + dln2_adc_add_demux(dln2, 0, 0, 16); + dln2->ts_pad_offset = 0; + dln2->ts_pad_length = 0; + return; + } + + /* Build demux table from fixed 8-channels to active_scan_mask */ + for_each_set_bit(out_ind, + indio_dev->active_scan_mask, + indio_dev->masklength) { + /* Handle timestamp separately */ + if (out_ind == DLN2_ADC_MAX_CHANNELS) + break; + for (++in_ind; in_ind != out_ind; ++in_ind) + in_loc += 2; + dln2_adc_add_demux(dln2, in_loc, out_loc, 2); + out_loc += 2; + in_loc += 2; + } + + if (indio_dev->scan_timestamp) { + size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1; + + dln2->ts_pad_offset = out_loc; + dln2->ts_pad_length = ts_offset * sizeof(int64_t) - out_loc; + } else { + dln2->ts_pad_offset = 0; + dln2->ts_pad_length = 0; + } +} + +static int dln2_adc_get_chan_count(struct dln2_adc *dln2) +{ + int ret; + u8 port = dln2->port; + u8 count; + int olen = sizeof(count); + + ret = dln2_transfer(dln2->pdev, DLN2_ADC_GET_CHANNEL_COUNT, + &port, sizeof(port), &count, &olen); + if (ret < 0) { + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + return ret; + } + if (olen < sizeof(count)) + return -EPROTO; + + return count; +} + +static int dln2_adc_set_port_resolution(struct dln2_adc *dln2) +{ + int ret; + struct dln2_adc_port_chan port_chan = { + .port = dln2->port, + .chan = DLN2_ADC_DATA_BITS, + }; + + ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_SET_RESOLUTION, + &port_chan, sizeof(port_chan)); + if (ret < 0) + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + + return ret; +} + +static int dln2_adc_set_chan_enabled(struct dln2_adc *dln2, + int channel, bool enable) +{ + int ret; + struct dln2_adc_port_chan port_chan = { + .port = dln2->port, + .chan = channel, + }; + u16 cmd = enable ? DLN2_ADC_CHANNEL_ENABLE : DLN2_ADC_CHANNEL_DISABLE; + + ret = dln2_transfer_tx(dln2->pdev, cmd, &port_chan, sizeof(port_chan)); + if (ret < 0) + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + + return ret; +} + +static int dln2_adc_set_port_enabled(struct dln2_adc *dln2, bool enable, + u16 *conflict_out) +{ + int ret; + u8 port = dln2->port; + __le16 conflict; + int olen = sizeof(conflict); + u16 cmd = enable ? DLN2_ADC_ENABLE : DLN2_ADC_DISABLE; + + if (conflict_out) + *conflict_out = 0; + + ret = dln2_transfer(dln2->pdev, cmd, &port, sizeof(port), + &conflict, &olen); + if (ret < 0) { + dev_dbg(&dln2->pdev->dev, "Problem in %s(%d)\n", + __func__, (int)enable); + if (conflict_out && enable && olen >= sizeof(conflict)) + *conflict_out = le16_to_cpu(conflict); + return ret; + } + if (enable && olen < sizeof(conflict)) + return -EPROTO; + + return ret; +} + +static int dln2_adc_set_chan_period(struct dln2_adc *dln2, + unsigned int channel, unsigned int period) +{ + int ret; + struct { + struct dln2_adc_port_chan port_chan; + __u8 type; + __le16 period; + __le16 low; + __le16 high; + } __packed set_cfg = { + .port_chan.port = dln2->port, + .port_chan.chan = channel, + .type = period ? DLN2_ADC_EVENT_ALWAYS : DLN2_ADC_EVENT_NONE, + .period = cpu_to_le16(period) + }; + + ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_CHANNEL_SET_CFG, + &set_cfg, sizeof(set_cfg)); + if (ret < 0) + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + + return ret; +} + +static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel) +{ + int ret, i; + struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev); + u16 conflict; + __le16 value; + int olen = sizeof(value); + struct dln2_adc_port_chan port_chan = { + .port = dln2->port, + .chan = channel, + }; + + ret = iio_device_claim_direct_mode(indio_dev); + if (ret < 0) + return ret; + + ret = dln2_adc_set_chan_enabled(dln2, channel, true); + if (ret < 0) + goto release_direct; + + ret = dln2_adc_set_port_enabled(dln2, true, &conflict); + if (ret < 0) { + if (conflict) { + dev_err(&dln2->pdev->dev, + "ADC pins conflict with mask %04X\n", + (int)conflict); + ret = -EBUSY; + } + goto disable_chan; + } + + /* + * Call GET_VAL twice due to initial zero-return immediately after + * enabling channel. + */ + for (i = 0; i < 2; ++i) { + ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_VAL, + &port_chan, sizeof(port_chan), + &value, &olen); + if (ret < 0) { + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + goto disable_port; + } + if (olen < sizeof(value)) { + ret = -EPROTO; + goto disable_port; + } + } + + ret = le16_to_cpu(value); + +disable_port: + dln2_adc_set_port_enabled(dln2, false, NULL); +disable_chan: + dln2_adc_set_chan_enabled(dln2, channel, false); +release_direct: + iio_device_release_direct_mode(indio_dev); + + return ret; +} + +static int dln2_adc_read_all(struct dln2_adc *dln2, + struct dln2_adc_get_all_vals *get_all_vals) +{ + int ret; + __u8 port = dln2->port; + int olen = sizeof(*get_all_vals); + + ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_ALL_VAL, + &port, sizeof(port), get_all_vals, &olen); + if (ret < 0) { + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + return ret; + } + if (olen < sizeof(*get_all_vals)) + return -EPROTO; + + return ret; +} + +static int dln2_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, + int *val2, + long mask) +{ + int ret; + unsigned int microhertz; + struct dln2_adc *dln2 = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&dln2->mutex); + ret = dln2_adc_read(dln2, chan->channel); + mutex_unlock(&dln2->mutex); + + if (ret < 0) + return ret; + + *val = ret; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + /* + * Voltage reference is fixed at 3.3v + * 3.3 / (1 << 10) * 1000000000 + */ + *val = 0; + *val2 = 3222656; + return IIO_VAL_INT_PLUS_NANO; + + case IIO_CHAN_INFO_SAMP_FREQ: + if (dln2->sample_period) { + microhertz = 1000000000 / dln2->sample_period; + *val = microhertz / 1000000; + *val2 = microhertz % 1000000; + } else { + *val = 0; + *val2 = 0; + } + + return IIO_VAL_INT_PLUS_MICRO; + + default: + return -EINVAL; + } +} + +static int dln2_adc_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask) +{ + int ret; + unsigned int microhertz; + struct dln2_adc *dln2 = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + microhertz = 1000000 * val + val2; + + mutex_lock(&dln2->mutex); + + dln2->sample_period = + microhertz ? 1000000000 / microhertz : UINT_MAX; + if (dln2->sample_period > 65535) { + dln2->sample_period = 65535; + dev_warn(&dln2->pdev->dev, + "clamping period to 65535ms\n"); + } + + /* + * The first requested channel is arbitrated as a shared + * trigger source, so only one event is registered with the + * DLN. The event handler will then read all enabled channel + * values using DLN2_ADC_CHANNEL_GET_ALL_VAL to maintain + * synchronization between ADC readings. + */ + if (dln2->trigger_chan != -1) + ret = dln2_adc_set_chan_period(dln2, + dln2->trigger_chan, dln2->sample_period); + else + ret = 0; + + mutex_unlock(&dln2->mutex); + + return ret; + + default: + return -EINVAL; + } +} + +static int dln2_update_scan_mode(struct iio_dev *indio_dev, + const unsigned long *scan_mask) +{ + struct dln2_adc *dln2 = iio_priv(indio_dev); + int chan_count = indio_dev->num_channels - 1; + int ret, i, j; + + mutex_lock(&dln2->mutex); + + for (i = 0; i < chan_count; ++i) { + ret = dln2_adc_set_chan_enabled(dln2, i, + test_bit(i, scan_mask)); + if (ret < 0) { + for (j = 0; j < i; ++j) + dln2_adc_set_chan_enabled(dln2, j, false); + mutex_unlock(&dln2->mutex); + dev_err(&dln2->pdev->dev, + "Unable to enable ADC channel %d\n", i); + return -EBUSY; + } + } + + dln2_adc_update_demux(dln2); + + mutex_unlock(&dln2->mutex); + + return 0; +} + +#define DLN2_ADC_CHAN(lval, idx) { \ + lval.type = IIO_VOLTAGE; \ + lval.channel = idx; \ + lval.indexed = 1; \ + lval.info_mask_separate = BIT(IIO_CHAN_INFO_RAW); \ + lval.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ); \ + lval.scan_index = idx; \ + lval.scan_type.sign = 'u'; \ + lval.scan_type.realbits = DLN2_ADC_DATA_BITS; \ + lval.scan_type.storagebits = 16; \ + lval.scan_type.endianness = IIO_LE; \ +} + +/* Assignment version of IIO_CHAN_SOFT_TIMESTAMP */ +#define IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(lval, _si) { \ + lval.type = IIO_TIMESTAMP; \ + lval.channel = -1; \ + lval.scan_index = _si; \ + lval.scan_type.sign = 's'; \ + lval.scan_type.realbits = 64; \ + lval.scan_type.storagebits = 64; \ +} + +static const struct iio_info dln2_adc_info = { + .read_raw = dln2_adc_read_raw, + .write_raw = dln2_adc_write_raw, + .update_scan_mode = dln2_update_scan_mode, + .driver_module = THIS_MODULE, +}; + +static irqreturn_t dln2_adc_trigger_h(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct { + __le16 values[DLN2_ADC_MAX_CHANNELS]; + int64_t timestamp_space; + } data; + struct dln2_adc_get_all_vals dev_data; + struct dln2_adc *dln2 = iio_priv(indio_dev); + const struct dln2_adc_demux_table *t; + int ret, i; + + mutex_lock(&dln2->mutex); + ret = dln2_adc_read_all(dln2, &dev_data); + mutex_unlock(&dln2->mutex); + if (ret < 0) + goto done; + + /* Demux operation */ + for (i = 0; i < dln2->demux_count; ++i) { + t = &dln2->demux[i]; + memcpy((void *)data.values + t->to, + (void *)dev_data.values + t->from, t->length); + } + + /* Zero padding space between values and timestamp */ + if (dln2->ts_pad_length) + memset((void *)data.values + dln2->ts_pad_offset, + 0, dln2->ts_pad_length); + + iio_push_to_buffers_with_timestamp(indio_dev, &data, + iio_get_time_ns(indio_dev)); + +done: + iio_trigger_notify_done(indio_dev->trig); + return IRQ_HANDLED; +} + +static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev) +{ + int ret; + struct dln2_adc *dln2 = iio_priv(indio_dev); + u16 conflict; + unsigned int trigger_chan; + + mutex_lock(&dln2->mutex); + + /* Enable ADC */ + ret = dln2_adc_set_port_enabled(dln2, true, &conflict); + if (ret < 0) { + mutex_unlock(&dln2->mutex); + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + if (conflict) { + dev_err(&dln2->pdev->dev, + "ADC pins conflict with mask %04X\n", + (int)conflict); + ret = -EBUSY; + } + return ret; + } + + /* Assign trigger channel based on first enabled channel */ + trigger_chan = find_first_bit(indio_dev->active_scan_mask, + indio_dev->masklength); + if (trigger_chan < DLN2_ADC_MAX_CHANNELS) { + dln2->trigger_chan = trigger_chan; + ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan, + dln2->sample_period); + mutex_unlock(&dln2->mutex); + if (ret < 0) { + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + return ret; + } + } else { + dln2->trigger_chan = -1; + mutex_unlock(&dln2->mutex); + } + + return iio_triggered_buffer_postenable(indio_dev); +} + +static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev) +{ + int ret; + struct dln2_adc *dln2 = iio_priv(indio_dev); + + mutex_lock(&dln2->mutex); + + /* Disable trigger channel */ + if (dln2->trigger_chan != -1) { + dln2_adc_set_chan_period(dln2, dln2->trigger_chan, 0); + dln2->trigger_chan = -1; + } + + /* Disable ADC */ + ret = dln2_adc_set_port_enabled(dln2, false, NULL); + + mutex_unlock(&dln2->mutex); + if (ret < 0) { + dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__); + return ret; + } + + return iio_triggered_buffer_predisable(indio_dev); +} + +static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = { + .postenable = dln2_adc_triggered_buffer_postenable, + .predisable = dln2_adc_triggered_buffer_predisable, +}; + +static void dln2_adc_event(struct platform_device *pdev, u16 echo, + const void *data, int len) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct dln2_adc *dln2 = iio_priv(indio_dev); + + /* Called via URB completion handler */ + iio_trigger_poll(dln2->trig); +} + +static const struct iio_trigger_ops dln2_adc_trigger_ops = { + .owner = THIS_MODULE, +}; + +static int dln2_adc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dln2_adc *dln2; + struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct iio_dev *indio_dev; + int i, ret, chans; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2)); + if (!indio_dev) { + dev_err(dev, "failed allocating iio device\n"); + return -ENOMEM; + } + + dln2 = iio_priv(indio_dev); + dln2->pdev = pdev; + dln2->port = pdata->port; + dln2->trigger_chan = -1; + mutex_init(&dln2->mutex); + + platform_set_drvdata(pdev, indio_dev); + + ret = dln2_adc_set_port_resolution(dln2); + if (ret < 0) { + dev_err(dev, "failed to set ADC resolution to 10 bits\n"); + return ret; + } + + chans = dln2_adc_get_chan_count(dln2); + if (chans < 0) { + dev_err(dev, "failed to get channel count: %d\n", chans); + return chans; + } + if (chans > DLN2_ADC_MAX_CHANNELS) { + chans = DLN2_ADC_MAX_CHANNELS; + dev_warn(dev, "clamping channels to %d\n", + DLN2_ADC_MAX_CHANNELS); + } + + for (i = 0; i < chans; ++i) + DLN2_ADC_CHAN(dln2->iio_channels[i], i) + IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(dln2->iio_channels[i], i); + + indio_dev->name = DLN2_ADC_MOD_NAME; + indio_dev->dev.parent = dev; + indio_dev->info = &dln2_adc_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = dln2->iio_channels; + indio_dev->num_channels = chans + 1; + indio_dev->setup_ops = &dln2_adc_buffer_setup_ops; + + dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", + indio_dev->name, indio_dev->id); + if (!dln2->trig) { + dev_err(dev, "failed to allocate trigger\n"); + return -ENOMEM; + } + dln2->trig->ops = &dln2_adc_trigger_ops; + iio_trigger_set_drvdata(dln2->trig, dln2); + devm_iio_trigger_register(dev, dln2->trig); + iio_trigger_set_immutable(indio_dev, dln2->trig); + + ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, + dln2_adc_trigger_h, + &dln2_adc_buffer_setup_ops); + if (ret) { + dev_err(dev, "failed to allocate triggered buffer: %d\n", ret); + return ret; + } + + ret = dln2_register_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV, + dln2_adc_event); + if (ret) { + dev_err(dev, "failed to setup DLN2 periodic event: %d\n", ret); + return ret; + } + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(dev, "failed to register iio device: %d\n", ret); + goto unregister_event; + } + + return ret; + +unregister_event: + dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV); + + return ret; +} + +static int dln2_adc_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + + iio_device_unregister(indio_dev); + dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV); + return 0; +} + +static struct platform_driver dln2_adc_driver = { + .driver.name = DLN2_ADC_MOD_NAME, + .probe = dln2_adc_probe, + .remove = dln2_adc_remove, +}; + +module_platform_driver(dln2_adc_driver); + +MODULE_AUTHOR("Jack Andersen <jackoalan@gmail.com"); +MODULE_DESCRIPTION("Driver for the Diolan DLN2 ADC interface"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dln2-adc"); diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c new file mode 100644 index 000000000000..a179ac476c6d --- /dev/null +++ b/drivers/iio/adc/ep93xx_adc.c @@ -0,0 +1,255 @@ +/* + * Driver for ADC module on the Cirrus Logic EP93xx series of SoCs + * + * Copyright (C) 2015 Alexander Sverdlin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * The driver uses polling to get the conversion status. According to EP93xx + * datasheets, reading ADCResult register starts the conversion, but user is also + * responsible for ensuring that delay between adjacent conversion triggers is + * long enough so that maximum allowed conversion rate is not exceeded. This + * basically renders IRQ mode unusable. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/iio/iio.h> +#include <linux/io.h> +#include <linux/irqflags.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> + +/* + * This code could benefit from real HR Timers, but jiffy granularity would + * lower ADC conversion rate down to CONFIG_HZ, so we fallback to busy wait + * in such case. + * + * HR Timers-based version loads CPU only up to 10% during back to back ADC + * conversion, while busy wait-based version consumes whole CPU power. + */ +#ifdef CONFIG_HIGH_RES_TIMERS +#define ep93xx_adc_delay(usmin, usmax) usleep_range(usmin, usmax) +#else +#define ep93xx_adc_delay(usmin, usmax) udelay(usmin) +#endif + +#define EP93XX_ADC_RESULT 0x08 +#define EP93XX_ADC_SDR BIT(31) +#define EP93XX_ADC_SWITCH 0x18 +#define EP93XX_ADC_SW_LOCK 0x20 + +struct ep93xx_adc_priv { + struct clk *clk; + void __iomem *base; + int lastch; + struct mutex lock; +}; + +#define EP93XX_ADC_CH(index, dname, swcfg) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = index, \ + .address = swcfg, \ + .datasheet_name = dname, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ +} + +/* + * Numbering scheme for channels 0..4 is defined in EP9301 and EP9302 datasheets. + * EP9307, EP9312 and EP9312 have 3 channels more (total 8), but the numbering is + * not defined. So the last three are numbered randomly, let's say. + */ +static const struct iio_chan_spec ep93xx_adc_channels[8] = { + EP93XX_ADC_CH(0, "YM", 0x608), + EP93XX_ADC_CH(1, "SXP", 0x680), + EP93XX_ADC_CH(2, "SXM", 0x640), + EP93XX_ADC_CH(3, "SYP", 0x620), + EP93XX_ADC_CH(4, "SYM", 0x610), + EP93XX_ADC_CH(5, "XP", 0x601), + EP93XX_ADC_CH(6, "XM", 0x602), + EP93XX_ADC_CH(7, "YP", 0x604), +}; + +static int ep93xx_read_raw(struct iio_dev *iiodev, + struct iio_chan_spec const *channel, int *value, + int *shift, long mask) +{ + struct ep93xx_adc_priv *priv = iio_priv(iiodev); + unsigned long timeout; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&priv->lock); + if (priv->lastch != channel->channel) { + priv->lastch = channel->channel; + /* + * Switch register is software-locked, unlocking must be + * immediately followed by write + */ + local_irq_disable(); + writel_relaxed(0xAA, priv->base + EP93XX_ADC_SW_LOCK); + writel_relaxed(channel->address, + priv->base + EP93XX_ADC_SWITCH); + local_irq_enable(); + /* + * Settling delay depends on module clock and could be + * 2ms or 500us + */ + ep93xx_adc_delay(2000, 2000); + } + /* Start the conversion, eventually discarding old result */ + readl_relaxed(priv->base + EP93XX_ADC_RESULT); + /* Ensure maximum conversion rate is not exceeded */ + ep93xx_adc_delay(DIV_ROUND_UP(1000000, 925), + DIV_ROUND_UP(1000000, 925)); + /* At this point conversion must be completed, but anyway... */ + ret = IIO_VAL_INT; + timeout = jiffies + msecs_to_jiffies(1) + 1; + while (1) { + u32 t; + + t = readl_relaxed(priv->base + EP93XX_ADC_RESULT); + if (t & EP93XX_ADC_SDR) { + *value = sign_extend32(t, 15); + break; + } + + if (time_after(jiffies, timeout)) { + dev_err(&iiodev->dev, "Conversion timeout\n"); + ret = -ETIMEDOUT; + break; + } + + cpu_relax(); + } + mutex_unlock(&priv->lock); + return ret; + + case IIO_CHAN_INFO_OFFSET: + /* According to datasheet, range is -25000..25000 */ + *value = 25000; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + /* Typical supply voltage is 3.3v */ + *value = (1ULL << 32) * 3300 / 50000; + *shift = 32; + return IIO_VAL_FRACTIONAL_LOG2; + } + + return -EINVAL; +} + +static const struct iio_info ep93xx_adc_info = { + .driver_module = THIS_MODULE, + .read_raw = ep93xx_read_raw, +}; + +static int ep93xx_adc_probe(struct platform_device *pdev) +{ + int ret; + struct iio_dev *iiodev; + struct ep93xx_adc_priv *priv; + struct clk *pclk; + struct resource *res; + + iiodev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); + if (!iiodev) + return -ENOMEM; + priv = iio_priv(iiodev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Cannot obtain memory resource\n"); + return -ENXIO; + } + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) { + dev_err(&pdev->dev, "Cannot map memory resource\n"); + return PTR_ERR(priv->base); + } + + iiodev->dev.parent = &pdev->dev; + iiodev->name = dev_name(&pdev->dev); + iiodev->modes = INDIO_DIRECT_MODE; + iiodev->info = &ep93xx_adc_info; + iiodev->num_channels = ARRAY_SIZE(ep93xx_adc_channels); + iiodev->channels = ep93xx_adc_channels; + + priv->lastch = -1; + mutex_init(&priv->lock); + + platform_set_drvdata(pdev, iiodev); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "Cannot obtain clock\n"); + return PTR_ERR(priv->clk); + } + + pclk = clk_get_parent(priv->clk); + if (!pclk) { + dev_warn(&pdev->dev, "Cannot obtain parent clock\n"); + } else { + /* + * This is actually a place for improvement: + * EP93xx ADC supports two clock divisors -- 4 and 16, + * resulting in conversion rates 3750 and 925 samples per second + * with 500us or 2ms settling time respectively. + * One might find this interesting enough to be configurable. + */ + ret = clk_set_rate(priv->clk, clk_get_rate(pclk) / 16); + if (ret) + dev_warn(&pdev->dev, "Cannot set clock rate\n"); + /* + * We can tolerate rate setting failure because the module should + * work in any case. + */ + } + + ret = clk_enable(priv->clk); + if (ret) { + dev_err(&pdev->dev, "Cannot enable clock\n"); + return ret; + } + + ret = iio_device_register(iiodev); + if (ret) + clk_disable(priv->clk); + + return ret; +} + +static int ep93xx_adc_remove(struct platform_device *pdev) +{ + struct iio_dev *iiodev = platform_get_drvdata(pdev); + struct ep93xx_adc_priv *priv = iio_priv(iiodev); + + iio_device_unregister(iiodev); + clk_disable(priv->clk); + + return 0; +} + +static struct platform_driver ep93xx_adc_driver = { + .driver = { + .name = "ep93xx-adc", + }, + .probe = ep93xx_adc_probe, + .remove = ep93xx_adc_remove, +}; +module_platform_driver(ep93xx_adc_driver); + +MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@gmail.com>"); +MODULE_DESCRIPTION("Cirrus Logic EP93XX ADC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-adc"); diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 232c0b80d658..68884d26b50c 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -44,6 +44,7 @@ #define INA226_MASK_ENABLE 0x06 #define INA226_CVRF BIT(3) +#define INA219_CNVR BIT(1) #define INA2XX_MAX_REGISTERS 8 @@ -592,6 +593,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) int bit, ret, i = 0; s64 time_a, time_b; unsigned int alert; + int cnvr_need_clear = 0; time_a = iio_get_time_ns(indio_dev); @@ -603,22 +605,30 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) * we check the ConVersionReadyFlag. * On hardware that supports using the ALERT pin to toggle a * GPIO a triggered buffer could be used instead. - * For now, we pay for that extra read of the ALERT register + * For now, we do an extra read of the MASK_ENABLE register (INA226) + * resp. the BUS_VOLTAGE register (INA219). */ if (!chip->allow_async_readout) do { - ret = regmap_read(chip->regmap, INA226_MASK_ENABLE, - &alert); + if (chip->config->chip_id == ina226) { + ret = regmap_read(chip->regmap, + INA226_MASK_ENABLE, &alert); + alert &= INA226_CVRF; + } else { + ret = regmap_read(chip->regmap, + INA2XX_BUS_VOLTAGE, &alert); + alert &= INA219_CNVR; + cnvr_need_clear = alert; + } + if (ret < 0) return ret; - alert &= INA226_CVRF; } while (!alert); /* - * Single register reads: bulk_read will not work with ina226 - * as there is no auto-increment of the address register for - * data length longer than 16bits. + * Single register reads: bulk_read will not work with ina226/219 + * as there is no auto-increment of the register pointer. */ for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { @@ -630,6 +640,18 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) return ret; data[i++] = val; + + if (INA2XX_SHUNT_VOLTAGE + bit == INA2XX_POWER) + cnvr_need_clear = 0; + } + + /* Dummy read on INA219 power register to clear CNVR flag */ + if (cnvr_need_clear && chip->config->chip_id == ina219) { + unsigned int val; + + ret = regmap_read(chip->regmap, INA2XX_POWER, &val); + if (ret < 0) + return ret; } time_b = iio_get_time_ns(indio_dev); diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c new file mode 100644 index 000000000000..29b7ed60cdb0 --- /dev/null +++ b/drivers/iio/adc/ltc2471.c @@ -0,0 +1,160 @@ +/* + * Driver for Linear Technology LTC2471 and LTC2473 voltage monitors + * The LTC2473 is identical to the 2471, but reports a differential signal. + * + * Copyright (C) 2017 Topic Embedded Products + * Author: Mike Looijmans <mike.looijmans@topic.nl> + * + * License: GPLv2 + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> + +enum ltc2471_chips { + ltc2471, + ltc2473, +}; + +struct ltc2471_data { + struct i2c_client *client; +}; + +/* Reference voltage is 1.25V */ +#define LTC2471_VREF 1250 + +/* Read two bytes from the I2C bus to obtain the ADC result */ +static int ltc2471_get_value(struct i2c_client *client) +{ + int ret; + __be16 buf; + + ret = i2c_master_recv(client, (char *)&buf, sizeof(buf)); + if (ret < 0) + return ret; + if (ret != sizeof(buf)) + return -EIO; + + /* MSB first */ + return be16_to_cpu(buf); +} + +static int ltc2471_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long info) +{ + struct ltc2471_data *data = iio_priv(indio_dev); + int ret; + + switch (info) { + case IIO_CHAN_INFO_RAW: + ret = ltc2471_get_value(data->client); + if (ret < 0) + return ret; + *val = ret; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + if (chan->differential) + /* Output ranges from -VREF to +VREF */ + *val = 2 * LTC2471_VREF; + else + /* Output ranges from 0 to VREF */ + *val = LTC2471_VREF; + *val2 = 16; /* 16 data bits */ + return IIO_VAL_FRACTIONAL_LOG2; + + case IIO_CHAN_INFO_OFFSET: + /* Only differential chip has this property */ + *val = -LTC2471_VREF; + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static const struct iio_chan_spec ltc2471_channel[] = { + { + .type = IIO_VOLTAGE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + }, +}; + +static const struct iio_chan_spec ltc2473_channel[] = { + { + .type = IIO_VOLTAGE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + .differential = 1, + }, +}; + +static const struct iio_info ltc2471_info = { + .read_raw = ltc2471_read_raw, + .driver_module = THIS_MODULE, +}; + +static int ltc2471_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct iio_dev *indio_dev; + struct ltc2471_data *data; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -EOPNOTSUPP; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = client; + + indio_dev->dev.parent = &client->dev; + indio_dev->name = id->name; + indio_dev->info = <c2471_info; + indio_dev->modes = INDIO_DIRECT_MODE; + if (id->driver_data == ltc2473) + indio_dev->channels = ltc2473_channel; + else + indio_dev->channels = ltc2471_channel; + indio_dev->num_channels = 1; + + /* Trigger once to start conversion and check if chip is there */ + ret = ltc2471_get_value(client); + if (ret < 0) { + dev_err(&client->dev, "Cannot read from device.\n"); + return ret; + } + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static const struct i2c_device_id ltc2471_i2c_id[] = { + { "ltc2471", ltc2471 }, + { "ltc2473", ltc2473 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, ltc2471_i2c_id); + +static struct i2c_driver ltc2471_i2c_driver = { + .driver = { + .name = "ltc2471", + }, + .probe = ltc2471_i2c_probe, + .id_table = ltc2471_i2c_id, +}; + +module_i2c_driver(ltc2471_i2c_driver); + +MODULE_DESCRIPTION("LTC2471/LTC2473 ADC driver"); +MODULE_AUTHOR("Topic Embedded Products"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c index 2691b10023f5..5bf8011dcde9 100644 --- a/drivers/iio/adc/ltc2497.c +++ b/drivers/iio/adc/ltc2497.c @@ -11,6 +11,7 @@ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/iio/iio.h> +#include <linux/iio/driver.h> #include <linux/iio/sysfs.h> #include <linux/module.h> #include <linux/of.h> @@ -127,13 +128,14 @@ static int ltc2497_read_raw(struct iio_dev *indio_dev, } } -#define LTC2497_CHAN(_chan, _addr) { \ +#define LTC2497_CHAN(_chan, _addr, _ds_name) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = (_chan), \ .address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .datasheet_name = (_ds_name), \ } #define LTC2497_CHAN_DIFF(_chan, _addr) { \ @@ -148,22 +150,22 @@ static int ltc2497_read_raw(struct iio_dev *indio_dev, } static const struct iio_chan_spec ltc2497_channel[] = { - LTC2497_CHAN(0, LTC2497_SGL), - LTC2497_CHAN(1, LTC2497_SGL), - LTC2497_CHAN(2, LTC2497_SGL), - LTC2497_CHAN(3, LTC2497_SGL), - LTC2497_CHAN(4, LTC2497_SGL), - LTC2497_CHAN(5, LTC2497_SGL), - LTC2497_CHAN(6, LTC2497_SGL), - LTC2497_CHAN(7, LTC2497_SGL), - LTC2497_CHAN(8, LTC2497_SGL), - LTC2497_CHAN(9, LTC2497_SGL), - LTC2497_CHAN(10, LTC2497_SGL), - LTC2497_CHAN(11, LTC2497_SGL), - LTC2497_CHAN(12, LTC2497_SGL), - LTC2497_CHAN(13, LTC2497_SGL), - LTC2497_CHAN(14, LTC2497_SGL), - LTC2497_CHAN(15, LTC2497_SGL), + LTC2497_CHAN(0, LTC2497_SGL, "CH0"), + LTC2497_CHAN(1, LTC2497_SGL, "CH1"), + LTC2497_CHAN(2, LTC2497_SGL, "CH2"), + LTC2497_CHAN(3, LTC2497_SGL, "CH3"), + LTC2497_CHAN(4, LTC2497_SGL, "CH4"), + LTC2497_CHAN(5, LTC2497_SGL, "CH5"), + LTC2497_CHAN(6, LTC2497_SGL, "CH6"), + LTC2497_CHAN(7, LTC2497_SGL, "CH7"), + LTC2497_CHAN(8, LTC2497_SGL, "CH8"), + LTC2497_CHAN(9, LTC2497_SGL, "CH9"), + LTC2497_CHAN(10, LTC2497_SGL, "CH10"), + LTC2497_CHAN(11, LTC2497_SGL, "CH11"), + LTC2497_CHAN(12, LTC2497_SGL, "CH12"), + LTC2497_CHAN(13, LTC2497_SGL, "CH13"), + LTC2497_CHAN(14, LTC2497_SGL, "CH14"), + LTC2497_CHAN(15, LTC2497_SGL, "CH15"), LTC2497_CHAN_DIFF(0, LTC2497_DIFF), LTC2497_CHAN_DIFF(1, LTC2497_DIFF), LTC2497_CHAN_DIFF(2, LTC2497_DIFF), @@ -192,6 +194,7 @@ static int ltc2497_probe(struct i2c_client *client, { struct iio_dev *indio_dev; struct ltc2497_st *st; + struct iio_map *plat_data; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | @@ -221,19 +224,31 @@ static int ltc2497_probe(struct i2c_client *client, if (ret < 0) return ret; + if (client->dev.platform_data) { + plat_data = ((struct iio_map *)client->dev.platform_data); + ret = iio_map_array_register(indio_dev, plat_data); + if (ret) { + dev_err(&indio_dev->dev, "iio map err: %d\n", ret); + goto err_regulator_disable; + } + } + ret = i2c_smbus_write_byte(st->client, LTC2497_CONFIG_DEFAULT); if (ret < 0) - goto err_regulator_disable; + goto err_array_unregister; st->addr_prev = LTC2497_CONFIG_DEFAULT; st->time_prev = ktime_get(); ret = iio_device_register(indio_dev); if (ret < 0) - goto err_regulator_disable; + goto err_array_unregister; return 0; +err_array_unregister: + iio_map_array_unregister(indio_dev); + err_regulator_disable: regulator_disable(st->ref); @@ -245,6 +260,7 @@ static int ltc2497_remove(struct i2c_client *client) struct iio_dev *indio_dev = i2c_get_clientdata(client); struct ltc2497_st *st = iio_priv(indio_dev); + iio_map_array_unregister(indio_dev); iio_device_unregister(indio_dev); regulator_disable(st->ref); diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c index b0526e4b9530..b1dd17cbce58 100644 --- a/drivers/iio/adc/max9611.c +++ b/drivers/iio/adc/max9611.c @@ -549,8 +549,8 @@ static int max9611_probe(struct i2c_client *client, ret = of_property_read_u32(of_node, shunt_res_prop, &of_shunt); if (ret) { dev_err(&client->dev, - "Missing %s property for %s node\n", - shunt_res_prop, of_node->full_name); + "Missing %s property for %pOF node\n", + shunt_res_prop, of_node); return ret; } max9611->shunt_resistor_uohm = of_shunt; diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 254135e07792..63de705086ed 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -379,10 +379,12 @@ static int mcp3422_probe(struct i2c_client *client, /* meaningful default configuration */ config = (MCP3422_CONT_SAMPLING - | MCP3422_CHANNEL_VALUE(1) + | MCP3422_CHANNEL_VALUE(0) | MCP3422_PGA_VALUE(MCP3422_PGA_1) | MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240)); - mcp3422_update_config(adc, config); + err = mcp3422_update_config(adc, config); + if (err < 0) + return err; err = devm_iio_device_register(&client->dev, indio_dev); if (err < 0) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 83da50ed73ab..2e8dbb89c8c9 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -572,8 +572,8 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, struct clk_init_data init; const char *clk_parents[1]; - init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div", - of_node_full_name(indio_dev->dev.of_node)); + init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div", + indio_dev->dev.of_node); init.flags = 0; init.ops = &clk_divider_ops; clk_parents[0] = __clk_get_name(priv->clkin); @@ -591,8 +591,8 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, if (WARN_ON(IS_ERR(priv->adc_div_clk))) return PTR_ERR(priv->adc_div_clk); - init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en", - of_node_full_name(indio_dev->dev.of_node)); + init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en", + indio_dev->dev.of_node); init.flags = CLK_SET_RATE_PARENT; init.ops = &clk_gate_ops; clk_parents[0] = __clk_get_name(priv->adc_div_clk); @@ -915,6 +915,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev) init_completion(&priv->done); match = of_match_device(meson_sar_adc_of_match, &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "failed to match device\n"); + return -ENODEV; + } + priv->data = match->data; indio_dev->name = priv->data->name; diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c index 2d104c828041..414cf44bf19d 100644 --- a/drivers/iio/adc/mt6577_auxadc.c +++ b/drivers/iio/adc/mt6577_auxadc.c @@ -184,6 +184,37 @@ static const struct iio_info mt6577_auxadc_info = { .read_raw = &mt6577_auxadc_read_raw, }; +static int __maybe_unused mt6577_auxadc_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev); + int ret; + + ret = clk_prepare_enable(adc_dev->adc_clk); + if (ret) { + pr_err("failed to enable auxadc clock\n"); + return ret; + } + + mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC, + MT6577_AUXADC_PDN_EN, 0); + mdelay(MT6577_AUXADC_POWER_READY_MS); + + return 0; +} + +static int __maybe_unused mt6577_auxadc_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev); + + mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC, + 0, MT6577_AUXADC_PDN_EN); + clk_disable_unprepare(adc_dev->adc_clk); + + return 0; +} + static int mt6577_auxadc_probe(struct platform_device *pdev) { struct mt6577_auxadc_device *adc_dev; @@ -269,8 +300,13 @@ static int mt6577_auxadc_remove(struct platform_device *pdev) return 0; } +static SIMPLE_DEV_PM_OPS(mt6577_auxadc_pm_ops, + mt6577_auxadc_suspend, + mt6577_auxadc_resume); + static const struct of_device_id mt6577_auxadc_of_match[] = { { .compatible = "mediatek,mt2701-auxadc", }, + { .compatible = "mediatek,mt7622-auxadc", }, { .compatible = "mediatek,mt8173-auxadc", }, { } }; @@ -280,6 +316,7 @@ static struct platform_driver mt6577_auxadc_driver = { .driver = { .name = "mt6577-auxadc", .of_match_table = mt6577_auxadc_of_match, + .pm = &mt6577_auxadc_pm_ops, }, .probe = mt6577_auxadc_probe, .remove = mt6577_auxadc_remove, diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index ae6d3324f518..2bf2ed15a870 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -224,6 +224,11 @@ static int rockchip_saradc_probe(struct platform_device *pdev) info = iio_priv(indio_dev); match = of_match_device(rockchip_saradc_match, &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "failed to match device\n"); + return -ENODEV; + } + info->data = match->data; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index e09233b03c05..9d083c2338f9 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -172,7 +172,7 @@ struct stm32h7_adc_ck_spec { int div; }; -const struct stm32h7_adc_ck_spec stm32h7_adc_ckmodes_spec[] = { +static const struct stm32h7_adc_ck_spec stm32h7_adc_ckmodes_spec[] = { /* 00: CK_ADC[1..3]: Asynchronous clock modes */ { 0, 0, 1 }, { 0, 1, 2 }, diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 884b8e461b17..7972845b3823 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -505,24 +505,24 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE; if (of_property_read_u32(node, "reg", &pval)) { - dev_err(&client->dev, "invalid reg on %s\n", - node->full_name); + dev_err(&client->dev, "invalid reg on %pOF\n", + node); continue; } channel = pval; if (channel >= ADS1015_CHANNELS) { dev_err(&client->dev, - "invalid channel index %d on %s\n", - channel, node->full_name); + "invalid channel index %d on %pOF\n", + channel, node); continue; } if (!of_property_read_u32(node, "ti,gain", &pval)) { pga = pval; if (pga > 6) { - dev_err(&client->dev, "invalid gain on %s\n", - node->full_name); + dev_err(&client->dev, "invalid gain on %pOF\n", + node); of_node_put(node); return -EINVAL; } @@ -532,8 +532,8 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) data_rate = pval; if (data_rate > 7) { dev_err(&client->dev, - "invalid data_rate on %s\n", - node->full_name); + "invalid data_rate on %pOF\n", + node); of_node_put(node); return -EINVAL; } diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index cea7f9857a1f..4d799b5cceac 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -21,6 +21,13 @@ config ATLAS_PH_SENSOR To compile this driver as module, choose M here: the module will be called atlas-ph-sensor. +config CCS811 + tristate "AMS CCS811 VOC sensor" + depends on I2C + help + Say Y here to build I2C interface support for the AMS + CCS811 VOC (Volatile Organic Compounds) sensor + config IAQCORE tristate "AMS iAQ-Core VOC sensors" depends on I2C diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile index b02202b41289..a629b29d1e0b 100644 --- a/drivers/iio/chemical/Makefile +++ b/drivers/iio/chemical/Makefile @@ -4,5 +4,6 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o +obj-$(CONFIG_CCS811) += ccs811.o obj-$(CONFIG_IAQCORE) += ams-iaq-core.o obj-$(CONFIG_VZ89X) += vz89x.o diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c new file mode 100644 index 000000000000..8dbb5eddeb1f --- /dev/null +++ b/drivers/iio/chemical/ccs811.c @@ -0,0 +1,339 @@ +/* + * ccs811.c - Support for AMS CCS811 VOC Sensor + * + * Copyright (C) 2017 Narcisa Vasile <narcisaanamaria12@gmail.com> + * + * Datasheet: ams.com/content/download/951091/2269479/CCS811_DS000459_3-00.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * IIO driver for AMS CCS811 (I2C address 0x5A/0x5B set by ADDR Low/High) + * + * TODO: + * 1. Make the drive mode selectable form userspace + * 2. Add support for interrupts + * 3. Adjust time to wait for data to be ready based on selected operation mode + * 4. Read error register and put the information in logs + */ + +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/iio/iio.h> +#include <linux/module.h> + +#define CCS811_STATUS 0x00 +#define CCS811_MEAS_MODE 0x01 +#define CCS811_ALG_RESULT_DATA 0x02 +#define CCS811_RAW_DATA 0x03 +#define CCS811_HW_ID 0x20 +#define CCS881_HW_ID_VALUE 0x81 +#define CCS811_HW_VERSION 0x21 +#define CCS811_HW_VERSION_VALUE 0x10 +#define CCS811_HW_VERSION_MASK 0xF0 +#define CCS811_ERR 0xE0 +/* Used to transition from boot to application mode */ +#define CCS811_APP_START 0xF4 + +/* Status register flags */ +#define CCS811_STATUS_ERROR BIT(0) +#define CCS811_STATUS_DATA_READY BIT(3) +#define CCS811_STATUS_APP_VALID_MASK BIT(4) +#define CCS811_STATUS_APP_VALID_LOADED BIT(4) +/* + * Value of FW_MODE bit of STATUS register describes the sensor's state: + * 0: Firmware is in boot mode, this allows new firmware to be loaded + * 1: Firmware is in application mode. CCS811 is ready to take ADC measurements + */ +#define CCS811_STATUS_FW_MODE_MASK BIT(7) +#define CCS811_STATUS_FW_MODE_APPLICATION BIT(7) + +/* Measurement modes */ +#define CCS811_MODE_IDLE 0x00 +#define CCS811_MODE_IAQ_1SEC 0x10 +#define CCS811_MODE_IAQ_10SEC 0x20 +#define CCS811_MODE_IAQ_60SEC 0x30 +#define CCS811_MODE_RAW_DATA 0x40 + +#define CCS811_VOLTAGE_MASK 0x3FF + +struct ccs811_reading { + __be16 co2; + __be16 voc; + u8 status; + u8 error; + __be16 resistance; +} __attribute__((__packed__)); + +struct ccs811_data { + struct i2c_client *client; + struct mutex lock; /* Protect readings */ + struct ccs811_reading buffer; +}; + +static const struct iio_chan_spec ccs811_channels[] = { + { + .type = IIO_CURRENT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) + }, { + .type = IIO_VOLTAGE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) + }, { + .type = IIO_CONCENTRATION, + .channel2 = IIO_MOD_CO2, + .modified = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SCALE) + }, { + .type = IIO_CONCENTRATION, + .channel2 = IIO_MOD_VOC, + .modified = 1, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) + }, +}; + +/* + * The CCS811 powers-up in boot mode. A setup write to CCS811_APP_START will + * transition the sensor to application mode. + */ +static int ccs811_start_sensor_application(struct i2c_client *client) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, CCS811_STATUS); + if (ret < 0) + return ret; + + if ((ret & CCS811_STATUS_APP_VALID_MASK) != + CCS811_STATUS_APP_VALID_LOADED) + return -EIO; + + ret = i2c_smbus_write_byte(client, CCS811_APP_START); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_byte_data(client, CCS811_STATUS); + if (ret < 0) + return ret; + + if ((ret & CCS811_STATUS_FW_MODE_MASK) != + CCS811_STATUS_FW_MODE_APPLICATION) { + dev_err(&client->dev, "Application failed to start. Sensor is still in boot mode.\n"); + return -EIO; + } + + return 0; +} + +static int ccs811_setup(struct i2c_client *client) +{ + int ret; + + ret = ccs811_start_sensor_application(client); + if (ret < 0) + return ret; + + return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE, + CCS811_MODE_IAQ_1SEC); +} + +static int ccs811_get_measurement(struct ccs811_data *data) +{ + int ret, tries = 11; + + /* Maximum waiting time: 1s, as measurements are made every second */ + while (tries-- > 0) { + ret = i2c_smbus_read_byte_data(data->client, CCS811_STATUS); + if (ret < 0) + return ret; + + if ((ret & CCS811_STATUS_DATA_READY) || tries == 0) + break; + msleep(100); + } + if (!(ret & CCS811_STATUS_DATA_READY)) + return -EIO; + + return i2c_smbus_read_i2c_block_data(data->client, + CCS811_ALG_RESULT_DATA, 8, + (char *)&data->buffer); +} + +static int ccs811_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct ccs811_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&data->lock); + ret = ccs811_get_measurement(data); + if (ret < 0) { + mutex_unlock(&data->lock); + return ret; + } + + switch (chan->type) { + case IIO_VOLTAGE: + *val = be16_to_cpu(data->buffer.resistance) & + CCS811_VOLTAGE_MASK; + ret = IIO_VAL_INT; + break; + case IIO_CURRENT: + *val = be16_to_cpu(data->buffer.resistance) >> 10; + ret = IIO_VAL_INT; + break; + case IIO_CONCENTRATION: + switch (chan->channel2) { + case IIO_MOD_CO2: + *val = be16_to_cpu(data->buffer.co2); + ret = IIO_VAL_INT; + break; + case IIO_MOD_VOC: + *val = be16_to_cpu(data->buffer.voc); + ret = IIO_VAL_INT; + break; + default: + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + mutex_unlock(&data->lock); + + return ret; + + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_VOLTAGE: + *val = 1; + *val2 = 612903; + return IIO_VAL_INT_PLUS_MICRO; + case IIO_CURRENT: + *val = 0; + *val2 = 1000; + return IIO_VAL_INT_PLUS_MICRO; + case IIO_CONCENTRATION: + switch (chan->channel2) { + case IIO_MOD_CO2: + *val = 0; + *val2 = 12834; + return IIO_VAL_INT_PLUS_MICRO; + case IIO_MOD_VOC: + *val = 0; + *val2 = 84246; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OFFSET: + if (!(chan->type == IIO_CONCENTRATION && + chan->channel2 == IIO_MOD_CO2)) + return -EINVAL; + *val = -400; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static const struct iio_info ccs811_info = { + .read_raw = ccs811_read_raw, + .driver_module = THIS_MODULE, +}; + +static int ccs811_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct iio_dev *indio_dev; + struct ccs811_data *data; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE + | I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_READ_I2C_BLOCK)) + return -EOPNOTSUPP; + + /* Check hardware id (should be 0x81 for this family of devices) */ + ret = i2c_smbus_read_byte_data(client, CCS811_HW_ID); + if (ret < 0) + return ret; + + if (ret != CCS881_HW_ID_VALUE) { + dev_err(&client->dev, "hardware id doesn't match CCS81x\n"); + return -ENODEV; + } + + ret = i2c_smbus_read_byte_data(client, CCS811_HW_VERSION); + if (ret < 0) + return ret; + + if ((ret & CCS811_HW_VERSION_MASK) != CCS811_HW_VERSION_VALUE) { + dev_err(&client->dev, "no CCS811 sensor\n"); + return -ENODEV; + } + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + ret = ccs811_setup(client); + if (ret < 0) + return ret; + + data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + data->client = client; + + mutex_init(&data->lock); + + indio_dev->dev.parent = &client->dev; + indio_dev->name = id->name; + indio_dev->info = &ccs811_info; + + indio_dev->channels = ccs811_channels; + indio_dev->num_channels = ARRAY_SIZE(ccs811_channels); + + return iio_device_register(indio_dev); +} + +static int ccs811_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + + iio_device_unregister(indio_dev); + + return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE, + CCS811_MODE_IDLE); +} + +static const struct i2c_device_id ccs811_id[] = { + {"ccs811", 0}, + { } +}; +MODULE_DEVICE_TABLE(i2c, ccs811_id); + +static struct i2c_driver ccs811_driver = { + .driver = { + .name = "ccs811", + }, + .probe = ccs811_probe, + .remove = ccs811_remove, + .id_table = ccs811_id, +}; +module_i2c_driver(ccs811_driver); + +MODULE_AUTHOR("Narcisa Vasile <narcisaanamaria12@gmail.com>"); +MODULE_DESCRIPTION("CCS811 volatile organic compounds sensor"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 6e6a1ecc99dd..d99bb1460fe2 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -15,6 +15,7 @@ #include <linux/iio/iio.h> #include <linux/regulator/consumer.h> #include <linux/of.h> +#include <linux/of_device.h> #include <asm/unaligned.h> #include <linux/iio/common/st_sensors.h> @@ -345,6 +346,36 @@ static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev, return pdata; } + +/** + * st_sensors_of_name_probe() - device tree probe for ST sensor name + * @dev: driver model representation of the device. + * @match: the OF match table for the device, containing compatible strings + * but also a .data field with the corresponding internal kernel name + * used by this sensor. + * @name: device name buffer reference. + * @len: device name buffer length. + * + * In effect this function matches a compatible string to an internal kernel + * name for a certain sensor device, so that the rest of the autodetection can + * rely on that name from this point on. I2C/SPI devices will be renamed + * to match the internal kernel convention. + */ +void st_sensors_of_name_probe(struct device *dev, + const struct of_device_id *match, + char *name, int len) +{ + const struct of_device_id *of_id; + + of_id = of_match_device(match, dev); + if (!of_id || !of_id->data) + return; + + /* The name from the OF match takes precedence if present */ + strncpy(name, of_id->data, len); + name[len - 1] = '\0'; +} +EXPORT_SYMBOL(st_sensors_of_name_probe); #else static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev, struct st_sensors_platform_data *defdata) diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c index c83df4dbfcd7..b81e48e9f27e 100644 --- a/drivers/iio/common/st_sensors/st_sensors_i2c.c +++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c @@ -79,35 +79,6 @@ void st_sensors_i2c_configure(struct iio_dev *indio_dev, } EXPORT_SYMBOL(st_sensors_i2c_configure); -#ifdef CONFIG_OF -/** - * st_sensors_of_i2c_probe() - device tree probe for ST I2C sensors - * @client: the I2C client device for the sensor - * @match: the OF match table for the device, containing compatible strings - * but also a .data field with the corresponding internal kernel name - * used by this sensor. - * - * In effect this function matches a compatible string to an internal kernel - * name for a certain sensor device, so that the rest of the autodetection can - * rely on that name from this point on. I2C client devices will be renamed - * to match the internal kernel convention. - */ -void st_sensors_of_i2c_probe(struct i2c_client *client, - const struct of_device_id *match) -{ - const struct of_device_id *of_id; - - of_id = of_match_device(match, &client->dev); - if (!of_id) - return; - - /* The name from the OF match takes precedence if present */ - strncpy(client->name, of_id->data, sizeof(client->name)); - client->name[sizeof(client->name) - 1] = '\0'; -} -EXPORT_SYMBOL(st_sensors_of_i2c_probe); -#endif - #ifdef CONFIG_ACPI int st_sensors_match_acpi_device(struct device *dev) { diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c index 75e48788c7ea..32701be71cf7 100644 --- a/drivers/iio/dac/stm32-dac-core.c +++ b/drivers/iio/dac/stm32-dac-core.c @@ -42,6 +42,14 @@ struct stm32_dac_priv { struct stm32_dac_common common; }; +/** + * struct stm32_dac_cfg - DAC configuration + * @has_hfsel: DAC has high frequency control + */ +struct stm32_dac_cfg { + bool has_hfsel; +}; + static struct stm32_dac_priv *to_stm32_dac_priv(struct stm32_dac_common *com) { return container_of(com, struct stm32_dac_priv, common); @@ -57,6 +65,7 @@ static const struct regmap_config stm32_dac_regmap_cfg = { static int stm32_dac_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct stm32_dac_cfg *cfg; struct stm32_dac_priv *priv; struct regmap *regmap; struct resource *res; @@ -69,6 +78,8 @@ static int stm32_dac_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + cfg = (const struct stm32_dac_cfg *) + of_match_device(dev->driver->of_match_table, dev)->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mmio = devm_ioremap_resource(dev, res); @@ -121,12 +132,16 @@ static int stm32_dac_probe(struct platform_device *pdev) reset_control_deassert(priv->rst); } - /* When clock speed is higher than 80MHz, set HFSEL */ - priv->common.hfsel = (clk_get_rate(priv->pclk) > 80000000UL); - ret = regmap_update_bits(regmap, STM32_DAC_CR, STM32H7_DAC_CR_HFSEL, - priv->common.hfsel ? STM32H7_DAC_CR_HFSEL : 0); - if (ret) - goto err_pclk; + if (cfg && cfg->has_hfsel) { + /* When clock speed is higher than 80MHz, set HFSEL */ + priv->common.hfsel = (clk_get_rate(priv->pclk) > 80000000UL); + ret = regmap_update_bits(regmap, STM32_DAC_CR, + STM32H7_DAC_CR_HFSEL, + priv->common.hfsel ? + STM32H7_DAC_CR_HFSEL : 0); + if (ret) + goto err_pclk; + } platform_set_drvdata(pdev, &priv->common); @@ -158,8 +173,17 @@ static int stm32_dac_remove(struct platform_device *pdev) return 0; } +static const struct stm32_dac_cfg stm32h7_dac_cfg = { + .has_hfsel = true, +}; + static const struct of_device_id stm32_dac_of_match[] = { - { .compatible = "st,stm32h7-dac-core", }, + { + .compatible = "st,stm32f4-dac-core", + }, { + .compatible = "st,stm32h7-dac-core", + .data = (void *)&stm32h7_dac_cfg, + }, {}, }; MODULE_DEVICE_TABLE(of, stm32_dac_of_match); diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c index 50f8ec091058..c1864e8aa851 100644 --- a/drivers/iio/dac/stm32-dac.c +++ b/drivers/iio/dac/stm32-dac.c @@ -268,7 +268,7 @@ static int stm32_dac_chan_of_init(struct iio_dev *indio_dev) break; } if (i >= ARRAY_SIZE(stm32_dac_channels)) { - dev_err(&indio_dev->dev, "Invalid st,dac-channel\n"); + dev_err(&indio_dev->dev, "Invalid reg property\n"); return -EINVAL; } diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 2be2a5d287e6..e0d241a9aa30 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -1063,11 +1063,6 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq) case IRQF_TRIGGER_RISING: dev_info(&indio_dev->dev, "pulse interrupts on the rising edge\n"); - if (mpu3050->irq_opendrain) { - dev_info(&indio_dev->dev, - "rising edge incompatible with open drain\n"); - mpu3050->irq_opendrain = false; - } break; case IRQF_TRIGGER_FALLING: mpu3050->irq_actl = true; @@ -1078,11 +1073,6 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq) mpu3050->irq_latch = true; dev_info(&indio_dev->dev, "interrupts active high level\n"); - if (mpu3050->irq_opendrain) { - dev_info(&indio_dev->dev, - "active high incompatible with open drain\n"); - mpu3050->irq_opendrain = false; - } /* * With level IRQs, we mask the IRQ until it is processed, * but with edge IRQs (pulses) we can queue several interrupts diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h index a5c5c4e29add..48923ae6ac3b 100644 --- a/drivers/iio/gyro/st_gyro.h +++ b/drivers/iio/gyro/st_gyro.h @@ -19,6 +19,7 @@ #define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro" #define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro" #define L3GD20_GYRO_DEV_NAME "l3gd20" +#define L3GD20H_GYRO_DEV_NAME "l3gd20h" #define L3G4IS_GYRO_DEV_NAME "l3g4is_ui" #define LSM330_GYRO_DEV_NAME "lsm330_gyro" #define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro" diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 2a42b3d583e8..e366422e8512 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -35,6 +35,7 @@ #define ST_GYRO_DEFAULT_OUT_Z_L_ADDR 0x2c /* FULLSCALE */ +#define ST_GYRO_FS_AVL_245DPS 245 #define ST_GYRO_FS_AVL_250DPS 250 #define ST_GYRO_FS_AVL_500DPS 500 #define ST_GYRO_FS_AVL_2000DPS 2000 @@ -196,17 +197,17 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { .wai = 0xd7, .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS, .sensors_supported = { - [0] = L3GD20_GYRO_DEV_NAME, + [0] = L3GD20H_GYRO_DEV_NAME, }, .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, .odr = { .addr = 0x20, .mask = 0xc0, .odr_avl = { - { .hz = 95, .value = 0x00, }, - { .hz = 190, .value = 0x01, }, - { .hz = 380, .value = 0x02, }, - { .hz = 760, .value = 0x03, }, + { .hz = 100, .value = 0x00, }, + { .hz = 200, .value = 0x01, }, + { .hz = 400, .value = 0x02, }, + { .hz = 800, .value = 0x03, }, }, }, .pw = { @@ -224,7 +225,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { .mask = 0x30, .fs_avl = { [0] = { - .num = ST_GYRO_FS_AVL_250DPS, + .num = ST_GYRO_FS_AVL_245DPS, .value = 0x00, .gain = IIO_DEGREE_TO_RAD(8750), }, diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c index 40056b821036..b405b82b9177 100644 --- a/drivers/iio/gyro/st_gyro_i2c.c +++ b/drivers/iio/gyro/st_gyro_i2c.c @@ -41,6 +41,10 @@ static const struct of_device_id st_gyro_of_match[] = { .data = L3GD20_GYRO_DEV_NAME, }, { + .compatible = "st,l3gd20h-gyro", + .data = L3GD20H_GYRO_DEV_NAME, + }, + { .compatible = "st,l3g4is-gyro", .data = L3G4IS_GYRO_DEV_NAME, }, @@ -71,7 +75,8 @@ static int st_gyro_i2c_probe(struct i2c_client *client, return -ENOMEM; gdata = iio_priv(indio_dev); - st_sensors_of_i2c_probe(client, st_gyro_of_match); + st_sensors_of_name_probe(&client->dev, st_gyro_of_match, + client->name, sizeof(client->name)); st_sensors_i2c_configure(indio_dev, client, gdata); @@ -95,6 +100,7 @@ static const struct i2c_device_id st_gyro_id_table[] = { { LSM330DL_GYRO_DEV_NAME }, { LSM330DLC_GYRO_DEV_NAME }, { L3GD20_GYRO_DEV_NAME }, + { L3GD20H_GYRO_DEV_NAME }, { L3G4IS_GYRO_DEV_NAME }, { LSM330_GYRO_DEV_NAME }, { LSM9DS0_GYRO_DEV_NAME }, diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c index fbf2faed501c..0b52ed577dc2 100644 --- a/drivers/iio/gyro/st_gyro_spi.c +++ b/drivers/iio/gyro/st_gyro_spi.c @@ -18,6 +18,56 @@ #include <linux/iio/common/st_sensors_spi.h> #include "st_gyro.h" +#ifdef CONFIG_OF +/* + * For new single-chip sensors use <device_name> as compatible string. + * For old single-chip devices keep <device_name>-gyro to maintain + * compatibility + */ +static const struct of_device_id st_gyro_of_match[] = { + { + .compatible = "st,l3g4200d-gyro", + .data = L3G4200D_GYRO_DEV_NAME, + }, + { + .compatible = "st,lsm330d-gyro", + .data = LSM330D_GYRO_DEV_NAME, + }, + { + .compatible = "st,lsm330dl-gyro", + .data = LSM330DL_GYRO_DEV_NAME, + }, + { + .compatible = "st,lsm330dlc-gyro", + .data = LSM330DLC_GYRO_DEV_NAME, + }, + { + .compatible = "st,l3gd20-gyro", + .data = L3GD20_GYRO_DEV_NAME, + }, + { + .compatible = "st,l3gd20h-gyro", + .data = L3GD20H_GYRO_DEV_NAME, + }, + { + .compatible = "st,l3g4is-gyro", + .data = L3G4IS_GYRO_DEV_NAME, + }, + { + .compatible = "st,lsm330-gyro", + .data = LSM330_GYRO_DEV_NAME, + }, + { + .compatible = "st,lsm9ds0-gyro", + .data = LSM9DS0_GYRO_DEV_NAME, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_gyro_of_match); +#else +#define st_gyro_of_match NULL +#endif + static int st_gyro_spi_probe(struct spi_device *spi) { struct iio_dev *indio_dev; @@ -30,6 +80,8 @@ static int st_gyro_spi_probe(struct spi_device *spi) gdata = iio_priv(indio_dev); + st_sensors_of_name_probe(&spi->dev, st_gyro_of_match, + spi->modalias, sizeof(spi->modalias)); st_sensors_spi_configure(indio_dev, spi, gdata); err = st_gyro_common_probe(indio_dev); @@ -52,6 +104,7 @@ static const struct spi_device_id st_gyro_id_table[] = { { LSM330DL_GYRO_DEV_NAME }, { LSM330DLC_GYRO_DEV_NAME }, { L3GD20_GYRO_DEV_NAME }, + { L3GD20H_GYRO_DEV_NAME }, { L3G4IS_GYRO_DEV_NAME }, { LSM330_GYRO_DEV_NAME }, { LSM9DS0_GYRO_DEV_NAME }, @@ -62,6 +115,7 @@ MODULE_DEVICE_TABLE(spi, st_gyro_id_table); static struct spi_driver st_gyro_driver = { .driver = { .name = "st-gyro-spi", + .of_match_table = of_match_ptr(st_gyro_of_match), }, .probe = st_gyro_spi_probe, .remove = st_gyro_spi_remove, diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig index 14b9ce453d9d..2c0fc9a400b8 100644 --- a/drivers/iio/humidity/Kconfig +++ b/drivers/iio/humidity/Kconfig @@ -31,7 +31,8 @@ config HDC100X select IIO_TRIGGERED_BUFFER help Say yes here to build support for the Texas Instruments - HDC1000 and HDC1008 relative humidity and temperature sensors. + HDC1000, HDC1008, HDC1010, HDC1050, and HDC1080 relative + humidity and temperature sensors. To compile this driver as a module, choose M here: the module will be called hdc100x. diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index aa17115f54c9..7851bd90ef64 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -13,6 +13,12 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * + * Datasheets: + * http://www.ti.com/product/HDC1000/datasheet + * http://www.ti.com/product/HDC1008/datasheet + * http://www.ti.com/product/HDC1010/datasheet + * http://www.ti.com/product/HDC1050/datasheet + * http://www.ti.com/product/HDC1080/datasheet */ #include <linux/delay.h> @@ -414,13 +420,29 @@ static int hdc100x_remove(struct i2c_client *client) static const struct i2c_device_id hdc100x_id[] = { { "hdc100x", 0 }, + { "hdc1000", 0 }, + { "hdc1008", 0 }, + { "hdc1010", 0 }, + { "hdc1050", 0 }, + { "hdc1080", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, hdc100x_id); +static const struct of_device_id hdc100x_dt_ids[] = { + { .compatible = "ti,hdc1000" }, + { .compatible = "ti,hdc1008" }, + { .compatible = "ti,hdc1010" }, + { .compatible = "ti,hdc1050" }, + { .compatible = "ti,hdc1080" }, + { } +}; +MODULE_DEVICE_TABLE(of, hdc100x_dt_ids); + static struct i2c_driver hdc100x_driver = { .driver = { .name = "hdc100x", + .of_match_table = of_match_ptr(hdc100x_dt_ids), }, .probe = hdc100x_probe, .remove = hdc100x_remove, diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h index 94510266e0a5..51d021966222 100644 --- a/drivers/iio/humidity/hts221.h +++ b/drivers/iio/humidity/hts221.h @@ -30,12 +30,6 @@ struct hts221_transfer_function { int (*write)(struct device *dev, u8 addr, int len, u8 *data); }; -#define HTS221_AVG_DEPTH 8 -struct hts221_avg_avl { - u16 avg; - u8 val; -}; - enum hts221_sensor_type { HTS221_SENSOR_H, HTS221_SENSOR_T, @@ -66,10 +60,9 @@ struct hts221_hw { extern const struct dev_pm_ops hts221_pm_ops; -int hts221_config_drdy(struct hts221_hw *hw, bool enable); +int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val); int hts221_probe(struct iio_dev *iio_dev); -int hts221_power_on(struct hts221_hw *hw); -int hts221_power_off(struct hts221_hw *hw); +int hts221_set_enable(struct hts221_hw *hw, bool enable); int hts221_allocate_buffers(struct hts221_hw *hw); int hts221_allocate_trigger(struct hts221_hw *hw); diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 7d19a3da7ab7..9690dfe9a844 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -20,8 +20,16 @@ #include <linux/iio/triggered_buffer.h> #include <linux/iio/buffer.h> +#include <linux/platform_data/st_sensors_pdata.h> + #include "hts221.h" +#define HTS221_REG_DRDY_HL_ADDR 0x22 +#define HTS221_REG_DRDY_HL_MASK BIT(7) +#define HTS221_REG_DRDY_PP_OD_ADDR 0x22 +#define HTS221_REG_DRDY_PP_OD_MASK BIT(6) +#define HTS221_REG_DRDY_EN_ADDR 0x22 +#define HTS221_REG_DRDY_EN_MASK BIT(2) #define HTS221_REG_STATUS_ADDR 0x27 #define HTS221_RH_DRDY_MASK BIT(1) #define HTS221_TEMP_DRDY_MASK BIT(0) @@ -30,8 +38,12 @@ static int hts221_trig_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig); struct hts221_hw *hw = iio_priv(iio_dev); + int err; + + err = hts221_write_with_mask(hw, HTS221_REG_DRDY_EN_ADDR, + HTS221_REG_DRDY_EN_MASK, state); - return hts221_config_drdy(hw, state); + return err < 0 ? err : 0; } static const struct iio_trigger_ops hts221_trigger_ops = { @@ -67,6 +79,9 @@ static irqreturn_t hts221_trigger_handler_thread(int irq, void *private) int hts221_allocate_trigger(struct hts221_hw *hw) { struct iio_dev *iio_dev = iio_priv_to_dev(hw); + bool irq_active_low = false, open_drain = false; + struct device_node *np = hw->dev->of_node; + struct st_sensors_platform_data *pdata; unsigned long irq_type; int err; @@ -76,6 +91,10 @@ int hts221_allocate_trigger(struct hts221_hw *hw) case IRQF_TRIGGER_HIGH: case IRQF_TRIGGER_RISING: break; + case IRQF_TRIGGER_LOW: + case IRQF_TRIGGER_FALLING: + irq_active_low = true; + break; default: dev_info(hw->dev, "mode %lx unsupported, using IRQF_TRIGGER_RISING\n", @@ -84,6 +103,24 @@ int hts221_allocate_trigger(struct hts221_hw *hw) break; } + err = hts221_write_with_mask(hw, HTS221_REG_DRDY_HL_ADDR, + HTS221_REG_DRDY_HL_MASK, irq_active_low); + if (err < 0) + return err; + + pdata = (struct st_sensors_platform_data *)hw->dev->platform_data; + if ((np && of_property_read_bool(np, "drive-open-drain")) || + (pdata && pdata->open_drain)) { + irq_type |= IRQF_SHARED; + open_drain = true; + } + + err = hts221_write_with_mask(hw, HTS221_REG_DRDY_PP_OD_ADDR, + HTS221_REG_DRDY_PP_OD_MASK, + open_drain); + if (err < 0) + return err; + err = devm_request_threaded_irq(hw->dev, hw->irq, NULL, hts221_trigger_handler_thread, irq_type | IRQF_ONESHOT, @@ -109,12 +146,12 @@ int hts221_allocate_trigger(struct hts221_hw *hw) static int hts221_buffer_preenable(struct iio_dev *iio_dev) { - return hts221_power_on(iio_priv(iio_dev)); + return hts221_set_enable(iio_priv(iio_dev), true); } static int hts221_buffer_postdisable(struct iio_dev *iio_dev) { - return hts221_power_off(iio_priv(iio_dev)); + return hts221_set_enable(iio_priv(iio_dev), false); } static const struct iio_buffer_setup_ops hts221_buffer_ops = { diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c index a56da3999e00..32524a8dc66f 100644 --- a/drivers/iio/humidity/hts221_core.c +++ b/drivers/iio/humidity/hts221_core.c @@ -23,7 +23,6 @@ #define HTS221_REG_CNTRL1_ADDR 0x20 #define HTS221_REG_CNTRL2_ADDR 0x21 -#define HTS221_REG_CNTRL3_ADDR 0x22 #define HTS221_REG_AVG_ADDR 0x10 #define HTS221_REG_H_OUT_L 0x28 @@ -32,30 +31,9 @@ #define HTS221_HUMIDITY_AVG_MASK 0x07 #define HTS221_TEMP_AVG_MASK 0x38 -#define HTS221_ODR_MASK 0x87 +#define HTS221_ODR_MASK 0x03 #define HTS221_BDU_MASK BIT(2) - -#define HTS221_DRDY_MASK BIT(2) - -#define HTS221_ENABLE_SENSOR BIT(7) - -#define HTS221_HUMIDITY_AVG_4 0x00 /* 0.4 %RH */ -#define HTS221_HUMIDITY_AVG_8 0x01 /* 0.3 %RH */ -#define HTS221_HUMIDITY_AVG_16 0x02 /* 0.2 %RH */ -#define HTS221_HUMIDITY_AVG_32 0x03 /* 0.15 %RH */ -#define HTS221_HUMIDITY_AVG_64 0x04 /* 0.1 %RH */ -#define HTS221_HUMIDITY_AVG_128 0x05 /* 0.07 %RH */ -#define HTS221_HUMIDITY_AVG_256 0x06 /* 0.05 %RH */ -#define HTS221_HUMIDITY_AVG_512 0x07 /* 0.03 %RH */ - -#define HTS221_TEMP_AVG_2 0x00 /* 0.08 degC */ -#define HTS221_TEMP_AVG_4 0x08 /* 0.05 degC */ -#define HTS221_TEMP_AVG_8 0x10 /* 0.04 degC */ -#define HTS221_TEMP_AVG_16 0x18 /* 0.03 degC */ -#define HTS221_TEMP_AVG_32 0x20 /* 0.02 degC */ -#define HTS221_TEMP_AVG_64 0x28 /* 0.015 degC */ -#define HTS221_TEMP_AVG_128 0x30 /* 0.01 degC */ -#define HTS221_TEMP_AVG_256 0x38 /* 0.007 degC */ +#define HTS221_ENABLE_MASK BIT(7) /* calibration registers */ #define HTS221_REG_0RH_CAL_X_H 0x36 @@ -73,10 +51,11 @@ struct hts221_odr { u8 val; }; +#define HTS221_AVG_DEPTH 8 struct hts221_avg { u8 addr; u8 mask; - struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH]; + u16 avg_avl[HTS221_AVG_DEPTH]; }; static const struct hts221_odr hts221_odr_table[] = { @@ -90,28 +69,28 @@ static const struct hts221_avg hts221_avg_list[] = { .addr = HTS221_REG_AVG_ADDR, .mask = HTS221_HUMIDITY_AVG_MASK, .avg_avl = { - { 4, HTS221_HUMIDITY_AVG_4 }, - { 8, HTS221_HUMIDITY_AVG_8 }, - { 16, HTS221_HUMIDITY_AVG_16 }, - { 32, HTS221_HUMIDITY_AVG_32 }, - { 64, HTS221_HUMIDITY_AVG_64 }, - { 128, HTS221_HUMIDITY_AVG_128 }, - { 256, HTS221_HUMIDITY_AVG_256 }, - { 512, HTS221_HUMIDITY_AVG_512 }, + 4, /* 0.4 %RH */ + 8, /* 0.3 %RH */ + 16, /* 0.2 %RH */ + 32, /* 0.15 %RH */ + 64, /* 0.1 %RH */ + 128, /* 0.07 %RH */ + 256, /* 0.05 %RH */ + 512, /* 0.03 %RH */ }, }, { .addr = HTS221_REG_AVG_ADDR, .mask = HTS221_TEMP_AVG_MASK, .avg_avl = { - { 2, HTS221_TEMP_AVG_2 }, - { 4, HTS221_TEMP_AVG_4 }, - { 8, HTS221_TEMP_AVG_8 }, - { 16, HTS221_TEMP_AVG_16 }, - { 32, HTS221_TEMP_AVG_32 }, - { 64, HTS221_TEMP_AVG_64 }, - { 128, HTS221_TEMP_AVG_128 }, - { 256, HTS221_TEMP_AVG_256 }, + 2, /* 0.08 degC */ + 4, /* 0.05 degC */ + 8, /* 0.04 degC */ + 16, /* 0.03 degC */ + 32, /* 0.02 degC */ + 64, /* 0.015 degC */ + 128, /* 0.01 degC */ + 256, /* 0.007 degC */ }, }, }; @@ -152,8 +131,7 @@ static const struct iio_chan_spec hts221_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(2), }; -static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, - u8 val) +int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val) { u8 data; int err; @@ -166,7 +144,7 @@ static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, goto unlock; } - data = (data & ~mask) | (val & mask); + data = (data & ~mask) | ((val << __ffs(mask)) & mask); err = hw->tf->write(hw->dev, addr, sizeof(data), &data); if (err < 0) @@ -199,21 +177,9 @@ static int hts221_check_whoami(struct hts221_hw *hw) return 0; } -int hts221_config_drdy(struct hts221_hw *hw, bool enable) -{ - u8 val = enable ? BIT(2) : 0; - int err; - - err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR, - HTS221_DRDY_MASK, val); - - return err < 0 ? err : 0; -} - static int hts221_update_odr(struct hts221_hw *hw, u8 odr) { int i, err; - u8 val; for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++) if (hts221_odr_table[i].hz == odr) @@ -222,9 +188,8 @@ static int hts221_update_odr(struct hts221_hw *hw, u8 odr) if (i == ARRAY_SIZE(hts221_odr_table)) return -EINVAL; - val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val; err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR, - HTS221_ODR_MASK, val); + HTS221_ODR_MASK, hts221_odr_table[i].val); if (err < 0) return err; @@ -241,14 +206,13 @@ static int hts221_update_avg(struct hts221_hw *hw, const struct hts221_avg *avg = &hts221_avg_list[type]; for (i = 0; i < HTS221_AVG_DEPTH; i++) - if (avg->avg_avl[i].avg == val) + if (avg->avg_avl[i] == val) break; if (i == HTS221_AVG_DEPTH) return -EINVAL; - err = hts221_write_with_mask(hw, avg->addr, avg->mask, - avg->avg_avl[i].val); + err = hts221_write_with_mask(hw, avg->addr, avg->mask, i); if (err < 0) return err; @@ -283,7 +247,7 @@ hts221_sysfs_rh_oversampling_avail(struct device *dev, for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++) len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", - avg->avg_avl[i].avg); + avg->avg_avl[i]); buf[len - 1] = '\n'; return len; @@ -300,36 +264,22 @@ hts221_sysfs_temp_oversampling_avail(struct device *dev, for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++) len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", - avg->avg_avl[i].avg); + avg->avg_avl[i]); buf[len - 1] = '\n'; return len; } -int hts221_power_on(struct hts221_hw *hw) -{ - int err; - - err = hts221_update_odr(hw, hw->odr); - if (err < 0) - return err; - - hw->enabled = true; - - return 0; -} - -int hts221_power_off(struct hts221_hw *hw) +int hts221_set_enable(struct hts221_hw *hw, bool enable) { - __le16 data = 0; int err; - err = hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data), - (u8 *)&data); + err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR, + HTS221_ENABLE_MASK, enable); if (err < 0) return err; - hw->enabled = false; + hw->enabled = enable; return 0; } @@ -484,7 +434,7 @@ static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val) u8 data[HTS221_DATA_SIZE]; int err; - err = hts221_power_on(hw); + err = hts221_set_enable(hw, true); if (err < 0) return err; @@ -494,7 +444,7 @@ static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val) if (err < 0) return err; - hts221_power_off(hw); + hts221_set_enable(hw, false); *val = (s16)get_unaligned_le16(data); @@ -534,13 +484,13 @@ static int hts221_read_raw(struct iio_dev *iio_dev, case IIO_HUMIDITYRELATIVE: avg = &hts221_avg_list[HTS221_SENSOR_H]; idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx; - *val = avg->avg_avl[idx].avg; + *val = avg->avg_avl[idx]; ret = IIO_VAL_INT; break; case IIO_TEMP: avg = &hts221_avg_list[HTS221_SENSOR_T]; idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx; - *val = avg->avg_avl[idx].avg; + *val = avg->avg_avl[idx]; ret = IIO_VAL_INT; break; default: @@ -644,8 +594,6 @@ int hts221_probe(struct iio_dev *iio_dev) if (err < 0) return err; - hw->odr = hts221_odr_table[0].hz; - iio_dev->modes = INDIO_DIRECT_MODE; iio_dev->dev.parent = hw->dev; iio_dev->available_scan_masks = hts221_scan_masks; @@ -654,6 +602,16 @@ int hts221_probe(struct iio_dev *iio_dev) iio_dev->name = HTS221_DEV_NAME; iio_dev->info = &hts221_info; + /* enable Block Data Update */ + err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR, + HTS221_BDU_MASK, 1); + if (err < 0) + return err; + + err = hts221_update_odr(hw, hts221_odr_table[0].hz); + if (err < 0) + return err; + /* configure humidity sensor */ err = hts221_parse_rh_caldata(hw); if (err < 0) { @@ -661,7 +619,7 @@ int hts221_probe(struct iio_dev *iio_dev) return err; } - data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg; + data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3]; err = hts221_update_avg(hw, HTS221_SENSOR_H, data); if (err < 0) { dev_err(hw->dev, "failed to set rh oversampling ratio\n"); @@ -676,7 +634,7 @@ int hts221_probe(struct iio_dev *iio_dev) return err; } - data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg; + data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3]; err = hts221_update_avg(hw, HTS221_SENSOR_T, data); if (err < 0) { dev_err(hw->dev, @@ -702,11 +660,10 @@ static int __maybe_unused hts221_suspend(struct device *dev) { struct iio_dev *iio_dev = dev_get_drvdata(dev); struct hts221_hw *hw = iio_priv(iio_dev); - __le16 data = 0; int err; - err = hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data), - (u8 *)&data); + err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR, + HTS221_ENABLE_MASK, false); return err < 0 ? err : 0; } @@ -718,7 +675,8 @@ static int __maybe_unused hts221_resume(struct device *dev) int err = 0; if (hw->enabled) - err = hts221_update_odr(hw, hw->odr); + err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR, + HTS221_ENABLE_MASK, true); return err; } diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c index 0fbbd8c40894..2c4b9be85a05 100644 --- a/drivers/iio/humidity/htu21.c +++ b/drivers/iio/humidity/htu21.c @@ -238,11 +238,19 @@ static const struct i2c_device_id htu21_id[] = { }; MODULE_DEVICE_TABLE(i2c, htu21_id); +static const struct of_device_id htu21_of_match[] = { + { .compatible = "meas,htu21", }, + { .compatible = "meas,ms8607-humidity", }, + { }, +}; +MODULE_DEVICE_TABLE(of, htu21_of_match); + static struct i2c_driver htu21_driver = { .probe = htu21_probe, .id_table = htu21_id, .driver = { .name = "htu21", + .of_match_table = of_match_ptr(htu21_of_match), }, }; diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index fb7c0dbed51c..9b697d35dbef 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -217,7 +217,7 @@ static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq) return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val); } -static const unsigned adis16400_3db_divisors[] = { +static const unsigned int adis16400_3db_divisors[] = { [0] = 2, /* Special case */ [1] = 6, [2] = 12, @@ -890,7 +890,7 @@ static const struct adis_data adis16400_data = { static void adis16400_setup_chan_mask(struct adis16400_state *st) { const struct adis16400_chip_info *chip_info = st->variant; - unsigned i; + unsigned int i; for (i = 0; i < chip_info->num_channels; i++) { const struct iio_chan_spec *ch = &chip_info->channels[i]; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 2a72acc6e049..e2737dc71b67 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -31,6 +31,8 @@ #include <linux/iio/iio.h> #include <linux/iio/buffer.h> +#include <linux/platform_data/st_sensors_pdata.h> + #include "st_lsm6dsx.h" #define ST_LSM6DSX_REG_FIFO_THL_ADDR 0x06 @@ -39,6 +41,8 @@ #define ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR 0x08 #define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12 #define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5) +#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12 +#define ST_LSM6DSX_REG_PP_OD_MASK BIT(4) #define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a #define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0) #define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3) @@ -417,6 +421,8 @@ static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = { int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw) { + struct device_node *np = hw->dev->of_node; + struct st_sensors_platform_data *pdata; struct iio_buffer *buffer; unsigned long irq_type; bool irq_active_low; @@ -444,6 +450,17 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw) if (err < 0) return err; + pdata = (struct st_sensors_platform_data *)hw->dev->platform_data; + if ((np && of_property_read_bool(np, "drive-open-drain")) || + (pdata && pdata->open_drain)) { + err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_PP_OD_ADDR, + ST_LSM6DSX_REG_PP_OD_MASK, 1); + if (err < 0) + return err; + + irq_type |= IRQF_SHARED; + } + err = devm_request_threaded_irq(hw->dev, hw->irq, st_lsm6dsx_handler_irq, st_lsm6dsx_handler_thread, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index da3d06b073bb..069defcc6d9b 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -44,7 +44,7 @@ int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) } mapi->map = &maps[i]; mapi->indio_dev = indio_dev; - list_add(&mapi->l, &iio_map_list); + list_add_tail(&mapi->l, &iio_map_list); i++; } error_ret: @@ -205,8 +205,8 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) break; else if (name && index >= 0) { - pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", - np->full_name, name ? name : "", index); + pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n", + np, name ? name : "", index); return NULL; } diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c index 9d0c2e859bb2..a6efa12613a2 100644 --- a/drivers/iio/light/rpr0521.c +++ b/drivers/iio/light/rpr0521.c @@ -9,7 +9,7 @@ * * IIO driver for RPR-0521RS (7-bit I2C slave address 0x38). * - * TODO: illuminance channel, buffer + * TODO: illuminance channel */ #include <linux/module.h> @@ -20,6 +20,10 @@ #include <linux/acpi.h> #include <linux/iio/iio.h> +#include <linux/iio/buffer.h> +#include <linux/iio/trigger.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> #include <linux/iio/sysfs.h> #include <linux/pm_runtime.h> @@ -30,6 +34,7 @@ #define RPR0521_REG_PXS_DATA 0x44 /* 16-bit, little endian */ #define RPR0521_REG_ALS_DATA0 0x46 /* 16-bit, little endian */ #define RPR0521_REG_ALS_DATA1 0x48 /* 16-bit, little endian */ +#define RPR0521_REG_INTERRUPT 0x4A #define RPR0521_REG_PS_OFFSET_LSB 0x53 #define RPR0521_REG_ID 0x92 @@ -42,16 +47,31 @@ #define RPR0521_ALS_DATA1_GAIN_SHIFT 2 #define RPR0521_PXS_GAIN_MASK GENMASK(5, 4) #define RPR0521_PXS_GAIN_SHIFT 4 +#define RPR0521_PXS_PERSISTENCE_MASK GENMASK(3, 0) +#define RPR0521_INTERRUPT_INT_TRIG_PS_MASK BIT(0) +#define RPR0521_INTERRUPT_INT_TRIG_ALS_MASK BIT(1) +#define RPR0521_INTERRUPT_INT_REASSERT_MASK BIT(3) +#define RPR0521_INTERRUPT_ALS_INT_STATUS_MASK BIT(6) +#define RPR0521_INTERRUPT_PS_INT_STATUS_MASK BIT(7) #define RPR0521_MODE_ALS_ENABLE BIT(7) #define RPR0521_MODE_ALS_DISABLE 0x00 #define RPR0521_MODE_PXS_ENABLE BIT(6) #define RPR0521_MODE_PXS_DISABLE 0x00 +#define RPR0521_PXS_PERSISTENCE_DRDY 0x00 + +#define RPR0521_INTERRUPT_INT_TRIG_PS_ENABLE BIT(0) +#define RPR0521_INTERRUPT_INT_TRIG_PS_DISABLE 0x00 +#define RPR0521_INTERRUPT_INT_TRIG_ALS_ENABLE BIT(1) +#define RPR0521_INTERRUPT_INT_TRIG_ALS_DISABLE 0x00 +#define RPR0521_INTERRUPT_INT_REASSERT_ENABLE BIT(3) +#define RPR0521_INTERRUPT_INT_REASSERT_DISABLE 0x00 #define RPR0521_MANUFACT_ID 0xE0 #define RPR0521_DEFAULT_MEAS_TIME 0x06 /* ALS - 100ms, PXS - 100ms */ #define RPR0521_DRV_NAME "RPR0521" +#define RPR0521_IRQ_NAME "rpr0521_event" #define RPR0521_REGMAP_NAME "rpr0521_regmap" #define RPR0521_SLEEP_DELAY_MS 2000 @@ -167,6 +187,9 @@ struct rpr0521_data { bool als_dev_en; bool pxs_dev_en; + struct iio_trigger *drdy_trigger0; + s64 irq_timestamp; + /* optimize runtime pm ops - enable/disable device only if needed */ bool als_ps_need_en; bool pxs_ps_need_en; @@ -196,6 +219,19 @@ static const struct attribute_group rpr0521_attribute_group = { .attrs = rpr0521_attributes, }; +/* Order of the channel data in buffer */ +enum rpr0521_scan_index_order { + RPR0521_CHAN_INDEX_PXS, + RPR0521_CHAN_INDEX_BOTH, + RPR0521_CHAN_INDEX_IR, +}; + +static const unsigned long rpr0521_available_scan_masks[] = { + BIT(RPR0521_CHAN_INDEX_PXS) | BIT(RPR0521_CHAN_INDEX_BOTH) | + BIT(RPR0521_CHAN_INDEX_IR), + 0 +}; + static const struct iio_chan_spec rpr0521_channels[] = { { .type = IIO_PROXIMITY, @@ -204,6 +240,13 @@ static const struct iio_chan_spec rpr0521_channels[] = { BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .scan_index = RPR0521_CHAN_INDEX_PXS, + .scan_type = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_LE, + }, }, { .type = IIO_INTENSITY, @@ -213,6 +256,13 @@ static const struct iio_chan_spec rpr0521_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .scan_index = RPR0521_CHAN_INDEX_BOTH, + .scan_type = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_LE, + }, }, { .type = IIO_INTENSITY, @@ -222,6 +272,13 @@ static const struct iio_chan_spec rpr0521_channels[] = { .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .scan_index = RPR0521_CHAN_INDEX_IR, + .scan_type = { + .sign = 'u', + .realbits = 16, + .storagebits = 16, + .endianness = IIO_LE, + }, }, }; @@ -330,6 +387,198 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on, return 0; } +/* Interrupt register tells if this sensor caused the interrupt or not. */ +static inline bool rpr0521_is_triggered(struct rpr0521_data *data) +{ + int ret; + int reg; + + ret = regmap_read(data->regmap, RPR0521_REG_INTERRUPT, ®); + if (ret < 0) + return false; /* Reg read failed. */ + if (reg & + (RPR0521_INTERRUPT_ALS_INT_STATUS_MASK | + RPR0521_INTERRUPT_PS_INT_STATUS_MASK)) + return true; + else + return false; /* Int not from this sensor. */ +} + +/* IRQ to trigger handler */ +static irqreturn_t rpr0521_drdy_irq_handler(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct rpr0521_data *data = iio_priv(indio_dev); + + data->irq_timestamp = iio_get_time_ns(indio_dev); + /* + * We need to wake the thread to read the interrupt reg. It + * is not possible to do that here because regmap_read takes a + * mutex. + */ + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t rpr0521_drdy_irq_thread(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct rpr0521_data *data = iio_priv(indio_dev); + + if (rpr0521_is_triggered(data)) { + iio_trigger_poll_chained(data->drdy_trigger0); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static irqreturn_t rpr0521_trigger_consumer_store_time(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + + /* Other trigger polls store time here. */ + if (!iio_trigger_using_own(indio_dev)) + pf->timestamp = iio_get_time_ns(indio_dev); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct rpr0521_data *data = iio_priv(indio_dev); + int err; + + u8 buffer[16]; /* 3 16-bit channels + padding + ts */ + + /* Use irq timestamp when reasonable. */ + if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) { + pf->timestamp = data->irq_timestamp; + data->irq_timestamp = 0; + } + /* Other chained trigger polls get timestamp only here. */ + if (!pf->timestamp) + pf->timestamp = iio_get_time_ns(indio_dev); + + err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA, + &buffer, + (3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */ + if (!err) + iio_push_to_buffers_with_timestamp(indio_dev, + buffer, pf->timestamp); + else + dev_err(&data->client->dev, + "Trigger consumer can't read from sensor.\n"); + pf->timestamp = 0; + + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int rpr0521_write_int_enable(struct rpr0521_data *data) +{ + int err; + + /* Interrupt after each measurement */ + err = regmap_update_bits(data->regmap, RPR0521_REG_PXS_CTRL, + RPR0521_PXS_PERSISTENCE_MASK, + RPR0521_PXS_PERSISTENCE_DRDY); + if (err) { + dev_err(&data->client->dev, "PS control reg write fail.\n"); + return -EBUSY; + } + + /* Ignore latch and mode because of drdy */ + err = regmap_write(data->regmap, RPR0521_REG_INTERRUPT, + RPR0521_INTERRUPT_INT_REASSERT_DISABLE | + RPR0521_INTERRUPT_INT_TRIG_ALS_DISABLE | + RPR0521_INTERRUPT_INT_TRIG_PS_ENABLE + ); + if (err) { + dev_err(&data->client->dev, "Interrupt setup write fail.\n"); + return -EBUSY; + } + + return 0; +} + +static int rpr0521_write_int_disable(struct rpr0521_data *data) +{ + /* Don't care of clearing mode, assert and latch. */ + return regmap_write(data->regmap, RPR0521_REG_INTERRUPT, + RPR0521_INTERRUPT_INT_TRIG_ALS_DISABLE | + RPR0521_INTERRUPT_INT_TRIG_PS_DISABLE + ); +} + +/* + * Trigger producer enable / disable. Note that there will be trigs only when + * measurement data is ready to be read. + */ +static int rpr0521_pxs_drdy_set_state(struct iio_trigger *trigger, + bool enable_drdy) +{ + struct iio_dev *indio_dev = iio_trigger_get_drvdata(trigger); + struct rpr0521_data *data = iio_priv(indio_dev); + int err; + + if (enable_drdy) + err = rpr0521_write_int_enable(data); + else + err = rpr0521_write_int_disable(data); + if (err) + dev_err(&data->client->dev, "rpr0521_pxs_drdy_set_state failed\n"); + + return err; +} + +static const struct iio_trigger_ops rpr0521_trigger_ops = { + .set_trigger_state = rpr0521_pxs_drdy_set_state, + .owner = THIS_MODULE, + }; + + +static int rpr0521_buffer_preenable(struct iio_dev *indio_dev) +{ + int err; + struct rpr0521_data *data = iio_priv(indio_dev); + + mutex_lock(&data->lock); + err = rpr0521_set_power_state(data, true, + (RPR0521_MODE_PXS_MASK | RPR0521_MODE_ALS_MASK)); + mutex_unlock(&data->lock); + if (err) + dev_err(&data->client->dev, "_buffer_preenable fail\n"); + + return err; +} + +static int rpr0521_buffer_postdisable(struct iio_dev *indio_dev) +{ + int err; + struct rpr0521_data *data = iio_priv(indio_dev); + + mutex_lock(&data->lock); + err = rpr0521_set_power_state(data, false, + (RPR0521_MODE_PXS_MASK | RPR0521_MODE_ALS_MASK)); + mutex_unlock(&data->lock); + if (err) + dev_err(&data->client->dev, "_buffer_postdisable fail\n"); + + return err; +} + +static const struct iio_buffer_setup_ops rpr0521_buffer_setup_ops = { + .preenable = rpr0521_buffer_preenable, + .postenable = iio_triggered_buffer_postenable, + .predisable = iio_triggered_buffer_predisable, + .postdisable = rpr0521_buffer_postdisable, +}; + static int rpr0521_get_gain(struct rpr0521_data *data, int chan, int *val, int *val2) { @@ -473,6 +722,7 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev, { struct rpr0521_data *data = iio_priv(indio_dev); int ret; + int busy; u8 device_mask; __le16 raw_data; @@ -481,26 +731,30 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev, if (chan->type != IIO_INTENSITY && chan->type != IIO_PROXIMITY) return -EINVAL; + busy = iio_device_claim_direct_mode(indio_dev); + if (busy) + return -EBUSY; + device_mask = rpr0521_data_reg[chan->address].device_mask; mutex_lock(&data->lock); ret = rpr0521_set_power_state(data, true, device_mask); - if (ret < 0) { - mutex_unlock(&data->lock); - return ret; - } + if (ret < 0) + goto rpr0521_read_raw_out; ret = regmap_bulk_read(data->regmap, rpr0521_data_reg[chan->address].address, &raw_data, sizeof(raw_data)); if (ret < 0) { rpr0521_set_power_state(data, false, device_mask); - mutex_unlock(&data->lock); - return ret; + goto rpr0521_read_raw_out; } ret = rpr0521_set_power_state(data, false, device_mask); + +rpr0521_read_raw_out: mutex_unlock(&data->lock); + iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; @@ -617,12 +871,15 @@ static int rpr0521_init(struct rpr0521_data *data) return ret; #endif + data->irq_timestamp = 0; + return 0; } static int rpr0521_poweroff(struct rpr0521_data *data) { int ret; + int tmp; ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL, RPR0521_MODE_ALS_MASK | @@ -635,6 +892,16 @@ static int rpr0521_poweroff(struct rpr0521_data *data) data->als_dev_en = false; data->pxs_dev_en = false; + /* + * Int pin keeps state after power off. Set pin to high impedance + * mode to prevent power drain. + */ + ret = regmap_read(data->regmap, RPR0521_REG_INTERRUPT, &tmp); + if (ret) { + dev_err(&data->client->dev, "Failed to reset int pin.\n"); + return ret; + } + return 0; } @@ -707,6 +974,61 @@ static int rpr0521_probe(struct i2c_client *client, pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS); pm_runtime_use_autosuspend(&client->dev); + /* + * If sensor write/read is needed in _probe after _use_autosuspend, + * sensor needs to be _resumed first using rpr0521_set_power_state(). + */ + + /* IRQ to trigger setup */ + if (client->irq) { + /* Trigger0 producer setup */ + data->drdy_trigger0 = devm_iio_trigger_alloc( + indio_dev->dev.parent, + "%s-dev%d", indio_dev->name, indio_dev->id); + if (!data->drdy_trigger0) { + ret = -ENOMEM; + goto err_pm_disable; + } + data->drdy_trigger0->dev.parent = indio_dev->dev.parent; + data->drdy_trigger0->ops = &rpr0521_trigger_ops; + indio_dev->available_scan_masks = rpr0521_available_scan_masks; + iio_trigger_set_drvdata(data->drdy_trigger0, indio_dev); + + /* Ties irq to trigger producer handler. */ + ret = devm_request_threaded_irq(&client->dev, client->irq, + rpr0521_drdy_irq_handler, rpr0521_drdy_irq_thread, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + RPR0521_IRQ_NAME, indio_dev); + if (ret < 0) { + dev_err(&client->dev, "request irq %d for trigger0 failed\n", + client->irq); + goto err_pm_disable; + } + + ret = devm_iio_trigger_register(indio_dev->dev.parent, + data->drdy_trigger0); + if (ret) { + dev_err(&client->dev, "iio trigger register failed\n"); + goto err_pm_disable; + } + + /* + * Now whole pipe from physical interrupt (irq defined by + * devicetree to device) to trigger0 output is set up. + */ + + /* Trigger consumer setup */ + ret = devm_iio_triggered_buffer_setup(indio_dev->dev.parent, + indio_dev, + rpr0521_trigger_consumer_store_time, + rpr0521_trigger_consumer_handler, + &rpr0521_buffer_setup_ops); + if (ret < 0) { + dev_err(&client->dev, "iio triggered buffer setup failed\n"); + goto err_pm_disable; + } + } + ret = iio_device_register(indio_dev); if (ret) goto err_pm_disable; diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c index 3aa71e34ae28..09e6ca5e332e 100644 --- a/drivers/iio/light/tcs3472.c +++ b/drivers/iio/light/tcs3472.c @@ -11,6 +11,8 @@ * 7-bit I2C slave address 0x39 (TCS34721, TCS34723) or 0x29 (TCS34725, * TCS34727) * + * Datasheet: http://ams.com/eng/content/download/319364/1117183/file/TCS3472_Datasheet_EN_v2.pdf + * * TODO: interrupt support, thresholds, wait time */ @@ -169,7 +171,7 @@ static int tcs3472_write_raw(struct iio_dev *indio_dev, for (i = 0; i < 256; i++) { if (val2 == (256 - i) * 2400) { data->atime = i; - return i2c_smbus_write_word_data( + return i2c_smbus_write_byte_data( data->client, TCS3472_ATIME, data->atime); } diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 825369fb1c57..4ff883942f7b 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -784,6 +784,7 @@ static const struct iio_info ak8975_info = { .driver_module = THIS_MODULE, }; +#ifdef CONFIG_ACPI static const struct acpi_device_id ak_acpi_match[] = { {"AK8975", AK8975}, {"AK8963", AK8963}, @@ -793,6 +794,7 @@ static const struct acpi_device_id ak_acpi_match[] = { { }, }; MODULE_DEVICE_TABLE(acpi, ak_acpi_match); +#endif static const char *ak8975_match_acpi_device(struct device *dev, enum asahi_compass_chipset *chipset) diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 8e1b0861fbe4..3573636bad8e 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -315,7 +315,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { }, }, }, - .multi_read_bit = false, + .multi_read_bit = true, .bootime = 2, }, { diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c index 8aa37af306ed..6a6c8121ac2c 100644 --- a/drivers/iio/magnetometer/st_magn_i2c.c +++ b/drivers/iio/magnetometer/st_magn_i2c.c @@ -59,7 +59,8 @@ static int st_magn_i2c_probe(struct i2c_client *client, return -ENOMEM; mdata = iio_priv(indio_dev); - st_sensors_of_i2c_probe(client, st_magn_of_match); + st_sensors_of_name_probe(&client->dev, st_magn_of_match, + client->name, sizeof(client->name)); st_sensors_i2c_configure(indio_dev, client, mdata); diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c index f3cb4dc05391..1ea64dd318aa 100644 --- a/drivers/iio/magnetometer/st_magn_spi.c +++ b/drivers/iio/magnetometer/st_magn_spi.c @@ -18,6 +18,28 @@ #include <linux/iio/common/st_sensors_spi.h> #include "st_magn.h" +#ifdef CONFIG_OF +/* + * For new single-chip sensors use <device_name> as compatible string. + * For old single-chip devices keep <device_name>-magn to maintain + * compatibility + */ +static const struct of_device_id st_magn_of_match[] = { + { + .compatible = "st,lis3mdl-magn", + .data = LIS3MDL_MAGN_DEV_NAME, + }, + { + .compatible = "st,lsm303agr-magn", + .data = LSM303AGR_MAGN_DEV_NAME, + }, + {} +}; +MODULE_DEVICE_TABLE(of, st_magn_of_match); +#else +#define st_magn_of_match NULL +#endif + static int st_magn_spi_probe(struct spi_device *spi) { struct iio_dev *indio_dev; @@ -30,6 +52,8 @@ static int st_magn_spi_probe(struct spi_device *spi) mdata = iio_priv(indio_dev); + st_sensors_of_name_probe(&spi->dev, st_magn_of_match, + spi->modalias, sizeof(spi->modalias)); st_sensors_spi_configure(indio_dev, spi, mdata); err = st_magn_common_probe(indio_dev); @@ -57,6 +81,7 @@ MODULE_DEVICE_TABLE(spi, st_magn_id_table); static struct spi_driver st_magn_driver = { .driver = { .name = "st-magn-spi", + .of_match_table = of_match_ptr(st_magn_of_match), }, .probe = st_magn_spi_probe, .remove = st_magn_spi_remove, diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c index e9fa86c87db5..98fe0c5df380 100644 --- a/drivers/iio/orientation/hid-sensor-rotation.c +++ b/drivers/iio/orientation/hid-sensor-rotation.c @@ -238,7 +238,7 @@ static int dev_rot_parse_report(struct platform_device *pdev, static int hid_dev_rot_probe(struct platform_device *pdev) { int ret; - static char *name; + char *name; struct iio_dev *indio_dev; struct dev_rot_state *rot_state; struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data; diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c index 953ffbc0ef96..c413f8a84a63 100644 --- a/drivers/iio/pressure/ms5637.c +++ b/drivers/iio/pressure/ms5637.c @@ -181,11 +181,21 @@ static const struct i2c_device_id ms5637_id[] = { }; MODULE_DEVICE_TABLE(i2c, ms5637_id); +static const struct of_device_id ms5637_of_match[] = { + { .compatible = "meas,ms5637", }, + { .compatible = "meas,ms5805", }, + { .compatible = "meas,ms5837", }, + { .compatible = "meas,ms8607-temppressure", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ms5637_of_match); + static struct i2c_driver ms5637_driver = { .probe = ms5637_probe, .id_table = ms5637_id, .driver = { - .name = "ms5637" + .name = "ms5637", + .of_match_table = of_match_ptr(ms5637_of_match), }, }; diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c index 17417a4d5a5f..7f15e927fa2b 100644 --- a/drivers/iio/pressure/st_pressure_i2c.c +++ b/drivers/iio/pressure/st_pressure_i2c.c @@ -77,7 +77,8 @@ static int st_press_i2c_probe(struct i2c_client *client, press_data = iio_priv(indio_dev); if (client->dev.of_node) { - st_sensors_of_i2c_probe(client, st_press_of_match); + st_sensors_of_name_probe(&client->dev, st_press_of_match, + client->name, sizeof(client->name)); } else if (ACPI_HANDLE(&client->dev)) { ret = st_sensors_match_acpi_device(&client->dev); if ((ret < 0) || (ret >= ST_PRESS_MAX)) diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c index 550508025af1..f5ebd36bb4bf 100644 --- a/drivers/iio/pressure/st_pressure_spi.c +++ b/drivers/iio/pressure/st_pressure_spi.c @@ -18,6 +18,36 @@ #include <linux/iio/common/st_sensors_spi.h> #include "st_pressure.h" +#ifdef CONFIG_OF +/* + * For new single-chip sensors use <device_name> as compatible string. + * For old single-chip devices keep <device_name>-press to maintain + * compatibility + */ +static const struct of_device_id st_press_of_match[] = { + { + .compatible = "st,lps001wp-press", + .data = LPS001WP_PRESS_DEV_NAME, + }, + { + .compatible = "st,lps25h-press", + .data = LPS25H_PRESS_DEV_NAME, + }, + { + .compatible = "st,lps331ap-press", + .data = LPS331AP_PRESS_DEV_NAME, + }, + { + .compatible = "st,lps22hb-press", + .data = LPS22HB_PRESS_DEV_NAME, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_press_of_match); +#else +#define st_press_of_match NULL +#endif + static int st_press_spi_probe(struct spi_device *spi) { struct iio_dev *indio_dev; @@ -30,6 +60,8 @@ static int st_press_spi_probe(struct spi_device *spi) press_data = iio_priv(indio_dev); + st_sensors_of_name_probe(&spi->dev, st_press_of_match, + spi->modalias, sizeof(spi->modalias)); st_sensors_spi_configure(indio_dev, spi, press_data); err = st_press_common_probe(indio_dev); @@ -58,6 +90,7 @@ MODULE_DEVICE_TABLE(spi, st_press_id_table); static struct spi_driver st_press_driver = { .driver = { .name = "st-press-spi", + .of_match_table = of_match_ptr(st_press_of_match), }, .probe = st_press_spi_probe, .remove = st_press_spi_remove, diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index c92a95f9f52c..ebfb1de7377f 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -141,14 +141,14 @@ struct zpa2326_private { struct regulator *vdd; }; -#define zpa2326_err(_idev, _format, _arg...) \ - dev_err(_idev->dev.parent, _format, ##_arg) +#define zpa2326_err(idev, fmt, ...) \ + dev_err(idev->dev.parent, fmt "\n", ##__VA_ARGS__) -#define zpa2326_warn(_idev, _format, _arg...) \ - dev_warn(_idev->dev.parent, _format, ##_arg) +#define zpa2326_warn(idev, fmt, ...) \ + dev_warn(idev->dev.parent, fmt "\n", ##__VA_ARGS__) -#define zpa2326_dbg(_idev, _format, _arg...) \ - dev_dbg(_idev->dev.parent, _format, ##_arg) +#define zpa2326_dbg(idev, fmt, ...) \ + dev_dbg(idev->dev.parent, fmt "\n", ##__VA_ARGS__) bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg) { diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c index 3e60c6189d98..d8aa211d76e4 100644 --- a/drivers/iio/temperature/tsys01.c +++ b/drivers/iio/temperature/tsys01.c @@ -214,11 +214,18 @@ static const struct i2c_device_id tsys01_id[] = { }; MODULE_DEVICE_TABLE(i2c, tsys01_id); +static const struct of_device_id tsys01_of_match[] = { + { .compatible = "meas,tsys01", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tsys01_of_match); + static struct i2c_driver tsys01_driver = { .probe = tsys01_i2c_probe, .id_table = tsys01_id, .driver = { .name = "tsys01", + .of_match_table = of_match_ptr(tsys01_of_match), }, }; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index ef28a1cb64ae..e97d72e3bc40 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -112,4 +112,6 @@ source "drivers/staging/typec/Kconfig" source "drivers/staging/vboxvideo/Kconfig" +source "drivers/staging/pi433/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 2918580bdb9e..993ed0c1556c 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -45,3 +45,4 @@ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ +obj-$(CONFIG_PI433) += pi433/ diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index fa9ed81ab972..621e5f7ceacb 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -135,7 +135,7 @@ struct ion_heap_ops { /** * heap flags - flags between the heaps and core ion code */ -#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) +#define ION_HEAP_FLAG_DEFER_FREE BIT(0) /** * private flags - flags internal to ion @@ -146,7 +146,7 @@ struct ion_heap_ops { * any buffer storage that came from the system allocator will be * returned to the system allocator. */ -#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) +#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0) /** * struct ion_heap - represents a heap in the system @@ -226,8 +226,8 @@ int ion_heap_buffer_zero(struct ion_buffer *buffer); int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); int ion_alloc(size_t len, - unsigned int heap_id_mask, - unsigned int flags); + unsigned int heap_id_mask, + unsigned int flags); /** * ion_heap_init_shrinker @@ -291,7 +291,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); * flag. */ size_t ion_heap_freelist_shrink(struct ion_heap *heap, - size_t size); + size_t size); /** * ion_heap_freelist_size - returns the size of the freelist in bytes @@ -352,7 +352,7 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); * returns the number of items freed in pages */ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, - int nr_to_scan); + int nr_to_scan); long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index a0949bc0dcf4..dd5545d9990a 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -31,7 +31,6 @@ struct ion_cma_heap { #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) - /* ION CMA heap operations functions */ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long len, @@ -46,7 +45,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, if (!pages) return -ENOMEM; - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) goto err; @@ -106,7 +105,7 @@ static struct ion_heap *__ion_cma_heap_create(struct cma *cma) return &cma_heap->heap; } -int __ion_add_cma_heaps(struct cma *cma, void *data) +static int __ion_add_cma_heaps(struct cma *cma, void *data) { struct ion_heap *heap; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 5964bf21fd80..4dc5d7a589c2 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -98,7 +98,6 @@ static void free_buffer_page(struct ion_system_heap *heap, ion_page_pool_free(pool, page); } - static struct page *alloc_largest_available(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long size, @@ -256,7 +255,6 @@ static struct ion_heap_ops system_heap_ops = { static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, void *unused) { - struct ion_system_heap *sys_heap = container_of(heap, struct ion_system_heap, heap); diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig index 36a87c686a2a..0b3092ba2fcb 100644 --- a/drivers/staging/ccree/Kconfig +++ b/drivers/staging/ccree/Kconfig @@ -23,12 +23,3 @@ config CRYPTO_DEV_CCREE Choose this if you wish to use hardware acceleration of cryptographic operations on the system REE. If unsure say Y. - -config CCREE_FIPS_SUPPORT - bool "Turn on CryptoCell 7XX REE FIPS mode support" - depends on CRYPTO_DEV_CCREE - default n - help - Say 'Y' to enable support for FIPS compliant mode by the - CCREE driver. - If unsure say N. diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile index 318c2b39acf6..ae702f3b5369 100644 --- a/drivers/staging/ccree/Makefile +++ b/drivers/staging/ccree/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o -ccree-$(CCREE_FIPS_SUPPORT) += ssi_fips.o ssi_fips_ll.o ssi_fips_ext.o ssi_fips_local.o +ccree-$(CONFIG_CRYPTO_FIPS) += ssi_fips.o diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h index e6b8cea3f88d..2ae0f655e7a0 100644 --- a/drivers/staging/ccree/cc_hw_queue_defs.h +++ b/drivers/staging/ccree/cc_hw_queue_defs.h @@ -27,7 +27,8 @@ ******************************************************************************/ #define HW_DESC_SIZE_WORDS 6 -#define HW_QUEUE_SLOTS_MAX 15 /* Max. available slots in HW queue */ +/* Define max. available slots in HW queue */ +#define HW_QUEUE_SLOTS_MAX 15 #define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c index 1fc0b05ea0d5..f5ca0e35c5d3 100644 --- a/drivers/staging/ccree/ssi_aead.c +++ b/drivers/staging/ccree/ssi_aead.c @@ -36,7 +36,6 @@ #include "ssi_hash.h" #include "ssi_sysfs.h" #include "ssi_sram_mgr.h" -#include "ssi_fips_local.h" #define template_aead template_u.aead @@ -57,22 +56,26 @@ struct ssi_aead_handle { struct list_head aead_list; }; +struct cc_hmac_s { + u8 *padded_authkey; + u8 *ipad_opad; /* IPAD, OPAD*/ + dma_addr_t padded_authkey_dma_addr; + dma_addr_t ipad_opad_dma_addr; +}; + +struct cc_xcbc_s { + u8 *xcbc_keys; /* K1,K2,K3 */ + dma_addr_t xcbc_keys_dma_addr; +}; + struct ssi_aead_ctx { struct ssi_drvdata *drvdata; u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */ u8 *enckey; dma_addr_t enckey_dma_addr; union { - struct { - u8 *padded_authkey; - u8 *ipad_opad; /* IPAD, OPAD*/ - dma_addr_t padded_authkey_dma_addr; - dma_addr_t ipad_opad_dma_addr; - } hmac; - struct { - u8 *xcbc_keys; /* K1,K2,K3 */ - dma_addr_t xcbc_keys_dma_addr; - } xcbc; + struct cc_hmac_s hmac; + struct cc_xcbc_s xcbc; } auth_state; unsigned int enc_keylen; unsigned int auth_keylen; @@ -93,46 +96,50 @@ static void ssi_aead_exit(struct crypto_aead *tfm) struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm); SSI_LOG_DEBUG("Clearing context @%p for %s\n", - crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base))); + crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base)); dev = &ctx->drvdata->plat_dev->dev; /* Unmap enckey buffer */ if (ctx->enckey) { dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr); - SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n", - (unsigned long long)ctx->enckey_dma_addr); + SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n", + ctx->enckey_dma_addr); ctx->enckey_dma_addr = 0; ctx->enckey = NULL; } if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */ - if (ctx->auth_state.xcbc.xcbc_keys) { + struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc; + + if (xcbc->xcbc_keys) { dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3, - ctx->auth_state.xcbc.xcbc_keys, - ctx->auth_state.xcbc.xcbc_keys_dma_addr); + xcbc->xcbc_keys, + xcbc->xcbc_keys_dma_addr); } - SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n", - (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr); - ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0; - ctx->auth_state.xcbc.xcbc_keys = NULL; + SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n", + xcbc->xcbc_keys_dma_addr); + xcbc->xcbc_keys_dma_addr = 0; + xcbc->xcbc_keys = NULL; } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */ - if (ctx->auth_state.hmac.ipad_opad) { + struct cc_hmac_s *hmac = &ctx->auth_state.hmac; + + if (hmac->ipad_opad) { dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE, - ctx->auth_state.hmac.ipad_opad, - ctx->auth_state.hmac.ipad_opad_dma_addr); - SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n", - (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr); - ctx->auth_state.hmac.ipad_opad_dma_addr = 0; - ctx->auth_state.hmac.ipad_opad = NULL; + hmac->ipad_opad, + hmac->ipad_opad_dma_addr); + SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n", + hmac->ipad_opad_dma_addr); + hmac->ipad_opad_dma_addr = 0; + hmac->ipad_opad = NULL; } - if (ctx->auth_state.hmac.padded_authkey) { + if (hmac->padded_authkey) { dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE, - ctx->auth_state.hmac.padded_authkey, - ctx->auth_state.hmac.padded_authkey_dma_addr); - SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n", - (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr); - ctx->auth_state.hmac.padded_authkey_dma_addr = 0; - ctx->auth_state.hmac.padded_authkey = NULL; + hmac->padded_authkey, + hmac->padded_authkey_dma_addr); + SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n", + hmac->padded_authkey_dma_addr); + hmac->padded_authkey_dma_addr = 0; + hmac->padded_authkey = NULL; } } } @@ -144,9 +151,7 @@ static int ssi_aead_init(struct crypto_aead *tfm) struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm); struct ssi_crypto_alg *ssi_alg = container_of(alg, struct ssi_crypto_alg, aead_alg); - SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base))); - - CHECK_AND_RETURN_UPON_FIPS_ERROR(); + SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base)); /* Initialize modes in instance */ ctx->cipher_mode = ssi_alg->cipher_mode; @@ -158,7 +163,7 @@ static int ssi_aead_init(struct crypto_aead *tfm) /* Allocate key buffer, cache line aligned */ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE, - &ctx->enckey_dma_addr, GFP_KERNEL); + &ctx->enckey_dma_addr, GFP_KERNEL); if (!ctx->enckey) { SSI_LOG_ERR("Failed allocating key buffer\n"); goto init_failed; @@ -168,31 +173,42 @@ static int ssi_aead_init(struct crypto_aead *tfm) /* Set default authlen value */ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */ + struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc; + const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3; + /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */ /* (and temporary for user key - up to 256b) */ - ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev, - CC_AES_128_BIT_KEY_SIZE * 3, - &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL); - if (!ctx->auth_state.xcbc.xcbc_keys) { + xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size, + &xcbc->xcbc_keys_dma_addr, + GFP_KERNEL); + if (!xcbc->xcbc_keys) { SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n"); goto init_failed; } } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */ + struct cc_hmac_s *hmac = &ctx->auth_state.hmac; + const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE; + dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr; + /* Allocate dma-coherent buffer for IPAD + OPAD */ - ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev, - 2 * MAX_HMAC_DIGEST_SIZE, - &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL); - if (!ctx->auth_state.hmac.ipad_opad) { + hmac->ipad_opad = dma_alloc_coherent(dev, digest_size, + &hmac->ipad_opad_dma_addr, + GFP_KERNEL); + + if (!hmac->ipad_opad) { SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n"); goto init_failed; } + SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n", - ctx->auth_state.hmac.ipad_opad); + hmac->ipad_opad); + + hmac->padded_authkey = dma_alloc_coherent(dev, + MAX_HMAC_BLOCK_SIZE, + pkey_dma, + GFP_KERNEL); - ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev, - MAX_HMAC_BLOCK_SIZE, - &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL); - if (!ctx->auth_state.hmac.padded_authkey) { + if (!hmac->padded_authkey) { SSI_LOG_ERR("failed to allocate padded_authkey\n"); goto init_failed; } @@ -223,7 +239,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr, - ctx->authsize) != 0) { + ctx->authsize) != 0) { SSI_LOG_DEBUG("Payload authentication failure, " "(auth-size=%d, cipher=%d).\n", ctx->authsize, ctx->cipher_mode); @@ -236,8 +252,8 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c } else { /*ENCRYPT*/ if (unlikely(areq_ctx->is_icv_fragmented)) ssi_buffer_mgr_copy_scatterlist_portion( - areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen + areq_ctx->dstOffset, - areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF); + areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset, + areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF); /* If an IV was generated, copy it back to the user provided buffer. */ if (areq_ctx->backup_giv) { @@ -292,12 +308,13 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx) static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx) { - unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; + unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; unsigned int digest_ofs = 0; unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; + struct cc_hmac_s *hmac = &ctx->auth_state.hmac; int idx = 0; int i; @@ -325,7 +342,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx) /* Prepare ipad key */ hw_desc_init(&desc[idx]); - set_xor_val(&desc[idx], hmacPadConst[i]); + set_xor_val(&desc[idx], hmac_pad_const[i]); set_cipher_mode(&desc[idx], hash_mode); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); @@ -334,7 +351,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx) /* Perform HASH update */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, - ctx->auth_state.hmac.padded_authkey_dma_addr, + hmac->padded_authkey_dma_addr, SHA256_BLOCK_SIZE, NS_BIT); set_cipher_mode(&desc[idx], hash_mode); set_xor_active(&desc[idx]); @@ -345,8 +362,8 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx) hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_dout_dlli(&desc[idx], - (ctx->auth_state.hmac.ipad_opad_dma_addr + - digest_ofs), digest_size, NS_BIT, 0); + (hmac->ipad_opad_dma_addr + digest_ofs), + digest_size, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); @@ -361,7 +378,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx) static int validate_keys_sizes(struct ssi_aead_ctx *ctx) { SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n", - ctx->enc_keylen, ctx->auth_keylen); + ctx->enc_keylen, ctx->auth_keylen); switch (ctx->auth_mode) { case DRV_HASH_SHA1: @@ -385,7 +402,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx) if (unlikely(ctx->flow_mode == S_DIN_to_DES)) { if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n", - ctx->enc_keylen); + ctx->enc_keylen); return -EINVAL; } } else { /* Default assumed to be AES ciphers */ @@ -393,7 +410,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx) (ctx->enc_keylen != AES_KEYSIZE_192) && (ctx->enc_keylen != AES_KEYSIZE_256)) { SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n", - ctx->enc_keylen); + ctx->enc_keylen); return -EINVAL; } } @@ -536,9 +553,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) int seq_len = 0, rc = -EINVAL; SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n", - ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); + ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), + key, keylen); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ @@ -654,7 +671,6 @@ static int ssi_aead_setauthsize( { struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); /* Unsupported auth. sizes */ if ((authsize == 0) || (authsize > crypto_aead_maxauthsize(authenc))) { @@ -669,7 +685,7 @@ static int ssi_aead_setauthsize( #if SSI_CC_HAS_AES_CCM static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) + unsigned int authsize) { switch (authsize) { case 8: @@ -684,7 +700,7 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc, } static int ssi_ccm_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) + unsigned int authsize) { switch (authsize) { case 4: @@ -762,11 +778,11 @@ ssi_aead_process_authenc_data_desc( { struct scatterlist *cipher = (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? - areq_ctx->dstSgl : areq_ctx->srcSgl; + areq_ctx->dst_sgl : areq_ctx->src_sgl; unsigned int offset = (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? - areq_ctx->dstOffset : areq_ctx->srcOffset; + areq_ctx->dst_offset : areq_ctx->src_offset; SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, @@ -828,11 +844,11 @@ ssi_aead_process_cipher_data_desc( SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, - (sg_dma_address(areq_ctx->srcSgl) + - areq_ctx->srcOffset), areq_ctx->cryptlen, NS_BIT); + (sg_dma_address(areq_ctx->src_sgl) + + areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT); set_dout_dlli(&desc[idx], - (sg_dma_address(areq_ctx->dstSgl) + - areq_ctx->dstOffset), + (sg_dma_address(areq_ctx->dst_sgl) + + areq_ctx->dst_offset), areq_ctx->cryptlen, NS_BIT, 0); set_flow_mode(&desc[idx], flow_mode); break; @@ -1168,8 +1184,8 @@ static inline void ssi_aead_load_mlli_to_sram( (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) || !req_ctx->is_single_pass)) { SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", - (unsigned int)ctx->drvdata->mlli_sram_addr, - req_ctx->mlli_params.mlli_len); + (unsigned int)ctx->drvdata->mlli_sram_addr, + req_ctx->mlli_params.mlli_len); /* Copy MLLI table host-to-sram */ hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, @@ -1313,7 +1329,8 @@ ssi_aead_xcbc_authenc( } static int validate_data_size(struct ssi_aead_ctx *ctx, - enum drv_crypto_direction direct, struct aead_request *req) + enum drv_crypto_direction direct, + struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int assoclen = req->assoclen; @@ -1321,7 +1338,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx, (req->cryptlen - ctx->authsize) : req->cryptlen; if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) && - (req->cryptlen < ctx->authsize))) + (req->cryptlen < ctx->authsize))) goto data_size_err; areq_ctx->is_single_pass = true; /*defaulted to fast flow*/ @@ -1329,7 +1346,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx, switch (ctx->flow_mode) { case S_DIN_to_AES: if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) && - !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))) + !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))) goto data_size_err; if (ctx->cipher_mode == DRV_CIPHER_CCM) break; @@ -1365,27 +1382,27 @@ data_size_err: } #if SSI_CC_HAS_AES_CCM -static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize) +static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size) { unsigned int len = 0; - if (headerSize == 0) + if (header_size == 0) return 0; - if (headerSize < ((1UL << 16) - (1UL << 8))) { + if (header_size < ((1UL << 16) - (1UL << 8))) { len = 2; - pA0Buff[0] = (headerSize >> 8) & 0xFF; - pA0Buff[1] = headerSize & 0xFF; + pa0_buff[0] = (header_size >> 8) & 0xFF; + pa0_buff[1] = header_size & 0xFF; } else { len = 6; - pA0Buff[0] = 0xFF; - pA0Buff[1] = 0xFE; - pA0Buff[2] = (headerSize >> 24) & 0xFF; - pA0Buff[3] = (headerSize >> 16) & 0xFF; - pA0Buff[4] = (headerSize >> 8) & 0xFF; - pA0Buff[5] = headerSize & 0xFF; + pa0_buff[0] = 0xFF; + pa0_buff[1] = 0xFE; + pa0_buff[2] = (header_size >> 24) & 0xFF; + pa0_buff[3] = (header_size >> 16) & 0xFF; + pa0_buff[4] = (header_size >> 8) & 0xFF; + pa0_buff[5] = header_size & 0xFF; } return len; @@ -1557,7 +1574,7 @@ static int config_ccm_adata(struct aead_request *req) /* taken from crypto/ccm.c */ /* 2 <= L <= 8, so 1 <= L' <= 7. */ - if (2 > l || l > 8) { + if (l < 2 || l > 8) { SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]); return -EINVAL; } @@ -1848,8 +1865,9 @@ static inline void ssi_aead_dump_gcm( SSI_LOG_DEBUG("%s\n", title); } - SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \ - ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen); + SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", + ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, + req->assoclen, req_ctx->cryptlen); if (ctx->enckey) dump_byte_array("mac key", ctx->enckey, 16); @@ -1864,7 +1882,7 @@ static inline void ssi_aead_dump_gcm( dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE); - dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE); + dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE); if (req->src && req->cryptlen) dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen); @@ -1886,7 +1904,7 @@ static int config_gcm_context(struct aead_request *req) (req->cryptlen - ctx->authsize); __be32 counter = cpu_to_be32(2); - SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", cryptlen, req->assoclen, ctx->authsize); + SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize); memset(req_ctx->hkey, 0, AES_BLOCK_SIZE); @@ -1903,16 +1921,16 @@ static int config_gcm_context(struct aead_request *req) __be64 temp64; temp64 = cpu_to_be64(req->assoclen * 8); - memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64)); + memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = cpu_to_be64(cryptlen * 8); - memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8); + memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); } else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted. __be64 temp64; temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8); - memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64)); + memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = 0; - memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8); + memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); } return 0; @@ -1944,16 +1962,16 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction struct ssi_crypto_req ssi_req = {}; SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n", - ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv, - sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); + ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), + ctx, req, req->iv, sg_virt(req->src), req->src->offset, + sg_virt(req->dst), req->dst->offset, req->cryptlen); /* STAT_PHASE_0: Init and sanity checks */ /* Check data length according to mode */ if (unlikely(validate_data_size(ctx, direct, req) != 0)) { SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n", - req->cryptlen, req->assoclen); + req->cryptlen, req->assoclen); crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); return -EINVAL; } @@ -1976,7 +1994,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE); if (!areq_ctx->backup_giv) /*User none-generated IV*/ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, - req->iv, CTR_RFC3686_IV_SIZE); + req->iv, CTR_RFC3686_IV_SIZE); /* Initialize counter portion of counter block */ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); @@ -2198,7 +2216,7 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm); int rc = 0; - SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p\n", keylen, key); + SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key); if (keylen < 4) return -EINVAL; @@ -2216,7 +2234,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm); int rc = 0; - SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p\n", keylen, key); + SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key); if (keylen < 4) return -EINVAL; @@ -2230,7 +2248,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign } static int ssi_gcm_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) + unsigned int authsize) { switch (authsize) { case 4: @@ -2249,9 +2267,9 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc, } static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc, - unsigned int authsize) + unsigned int authsize) { - SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d\n", authsize); + SSI_LOG_DEBUG("authsize %d\n", authsize); switch (authsize) { case 8: @@ -2268,7 +2286,7 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc, static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { - SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d\n", authsize); + SSI_LOG_DEBUG("authsize %d\n", authsize); if (authsize != 16) return -EINVAL; @@ -2720,14 +2738,14 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata) if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); SSI_LOG_ERR("%s alg allocation failed\n", - aead_algs[alg].driver_name); + aead_algs[alg].driver_name); goto fail1; } t_alg->drvdata = drvdata; rc = crypto_register_aead(&t_alg->aead_alg); if (unlikely(rc != 0)) { SSI_LOG_ERR("%s alg registration failed\n", - t_alg->aead_alg.base.cra_driver_name); + t_alg->aead_alg.base.cra_driver_name); goto fail2; } else { list_add_tail(&t_alg->entry, &aead_handle->aead_list); diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h index 39cc633a3ffa..e85bcd917e7b 100644 --- a/drivers/staging/ccree/ssi_aead.h +++ b/drivers/staging/ccree/ssi_aead.h @@ -69,8 +69,8 @@ struct aead_req_ctx { u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned; u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned; struct { - u8 lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned; - u8 lenC[GCM_BLOCK_LEN_SIZE]; + u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned; + u8 len_c[GCM_BLOCK_LEN_SIZE]; } gcm_len_block; u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned; @@ -94,10 +94,10 @@ struct aead_req_ctx { struct ssi_mlli assoc; struct ssi_mlli src; struct ssi_mlli dst; - struct scatterlist *srcSgl; - struct scatterlist *dstSgl; - unsigned int srcOffset; - unsigned int dstOffset; + struct scatterlist *src_sgl; + struct scatterlist *dst_sgl; + unsigned int src_offset; + unsigned int dst_offset; enum ssi_req_dma_buf_type assoc_buff_type; enum ssi_req_dma_buf_type data_buff_type; struct mlli_params mlli_params; diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c index b35871eeabd1..63936091d524 100644 --- a/drivers/staging/ccree/ssi_buffer_mgr.c +++ b/drivers/staging/ccree/ssi_buffer_mgr.c @@ -150,7 +150,7 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli( u32 **mlli_entry_pp) { u32 *mlli_entry_p = *mlli_entry_pp; - u32 new_nents;; + u32 new_nents; /* Verify there is no memory overflow*/ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); @@ -162,8 +162,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli( cc_lli_set_addr(mlli_entry_p, buff_dma); cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents, - mlli_entry_p[LLI_WORD0_OFFSET], - mlli_entry_p[LLI_WORD1_OFFSET]); + mlli_entry_p[LLI_WORD0_OFFSET], + mlli_entry_p[LLI_WORD1_OFFSET]); buff_dma += CC_MAX_MLLI_ENTRY_SIZE; buff_size -= CC_MAX_MLLI_ENTRY_SIZE; mlli_entry_p = mlli_entry_p + 2; @@ -173,8 +173,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli( cc_lli_set_addr(mlli_entry_p, buff_dma); cc_lli_set_size(mlli_entry_p, buff_size); SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents, - mlli_entry_p[LLI_WORD0_OFFSET], - mlli_entry_p[LLI_WORD1_OFFSET]); + mlli_entry_p[LLI_WORD0_OFFSET], + mlli_entry_p[LLI_WORD1_OFFSET]); mlli_entry_p = mlli_entry_p + 2; *mlli_entry_pp = mlli_entry_p; (*curr_nents)++; @@ -182,8 +182,8 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli( } static inline int ssi_buffer_mgr_render_scatterlist_to_mlli( - struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents, - u32 **mlli_entry_pp) + struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset, + u32 *curr_nents, u32 **mlli_entry_pp) { struct scatterlist *curr_sgl = sgl; u32 *mlli_entry_p = *mlli_entry_pp; @@ -192,16 +192,17 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli( for ( ; (curr_sgl) && (sgl_data_len != 0); curr_sgl = sg_next(curr_sgl)) { u32 entry_data_len = - (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ? - sg_dma_len(curr_sgl) - sglOffset : sgl_data_len; + (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? + sg_dma_len(curr_sgl) - sgl_offset : + sgl_data_len; sgl_data_len -= entry_data_len; rc = ssi_buffer_mgr_render_buff_to_mlli( - sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents, - &mlli_entry_p); + sg_dma_address(curr_sgl) + sgl_offset, entry_data_len, + curr_nents, &mlli_entry_p); if (rc != 0) return rc; - sglOffset = 0; + sgl_offset = 0; } *mlli_entry_pp = mlli_entry_p; return 0; @@ -221,7 +222,7 @@ static int ssi_buffer_mgr_generate_mlli( /* Allocate memory from the pointed pool */ mlli_params->mlli_virt_addr = dma_pool_alloc( mlli_params->curr_pool, GFP_KERNEL, - &(mlli_params->mlli_dma_addr)); + &mlli_params->mlli_dma_addr); if (unlikely(!mlli_params->mlli_virt_addr)) { SSI_LOG_ERR("dma_pool_alloc() failed\n"); rc = -ENOMEM; @@ -249,7 +250,7 @@ static int ssi_buffer_mgr_generate_mlli( /*Calculate the current MLLI table length for the *length field in the descriptor */ - *(sg_data->mlli_nents[i]) += + *sg_data->mlli_nents[i] += (total_nents - prev_total_nents); prev_total_nents = total_nents; } @@ -259,9 +260,9 @@ static int ssi_buffer_mgr_generate_mlli( mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); SSI_LOG_DEBUG("MLLI params: " - "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n", + "virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", mlli_params->mlli_virt_addr, - (unsigned long long)mlli_params->mlli_dma_addr, + mlli_params->mlli_dma_addr, mlli_params->mlli_len); build_mlli_exit: @@ -275,9 +276,9 @@ static inline void ssi_buffer_mgr_add_buffer_entry( { unsigned int index = sgl_data->num_of_buffers; - SSI_LOG_DEBUG("index=%u single_buff=0x%llX " + SSI_LOG_DEBUG("index=%u single_buff=%pad " "buffer_len=0x%08X is_last=%d\n", - index, (unsigned long long)buffer_dma, buffer_len, is_last_entry); + index, buffer_dma, buffer_len, is_last_entry); sgl_data->nents[index] = 1; sgl_data->entry[index].buffer_dma = buffer_dma; sgl_data->offset[index] = 0; @@ -302,7 +303,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry( unsigned int index = sgl_data->num_of_buffers; SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", - index, nents, sgl, data_len, is_last_table); + index, nents, sgl, data_len, is_last_table); sgl_data->nents[index] = nents; sgl_data->entry[index].sgl = sgl; sgl_data->offset[index] = data_offset; @@ -317,7 +318,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry( static int ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents, - enum dma_data_direction direction) + enum dma_data_direction direction) { u32 i, j; struct scatterlist *l_sg = sg; @@ -358,10 +359,10 @@ static int ssi_buffer_mgr_map_scatterlist( SSI_LOG_ERR("dma_map_sg() single buffer failed\n"); return -ENOMEM; } - SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX " + SSI_LOG_DEBUG("Mapped sg: dma_address=%pad " "page=%p addr=%pK offset=%u " "length=%u\n", - (unsigned long long)sg_dma_address(sg), + sg_dma_address(sg), sg_page(sg), sg_virt(sg), sg->offset, sg->length); @@ -370,11 +371,11 @@ static int ssi_buffer_mgr_map_scatterlist( *mapped_nents = 1; } else { /*sg_is_last*/ *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes, - &is_chained); + &is_chained); if (*nents > max_sg_nents) { *nents = 0; SSI_LOG_ERR("Too many fragments. current %d max %d\n", - *nents, max_sg_nents); + *nents, max_sg_nents); return -ENOMEM; } if (!is_chained) { @@ -392,9 +393,9 @@ static int ssi_buffer_mgr_map_scatterlist( * must have the same nents before and after map */ *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev, - sg, - *nents, - direction); + sg, + *nents, + direction); if (unlikely(*mapped_nents != *nents)) { *nents = *mapped_nents; SSI_LOG_ERR("dma_map_sg() sg buffer failed\n"); @@ -408,10 +409,10 @@ static int ssi_buffer_mgr_map_scatterlist( static inline int ssi_aead_handle_config_buf(struct device *dev, - struct aead_req_ctx *areq_ctx, - u8 *config_data, - struct buffer_array *sg_data, - unsigned int assoclen) + struct aead_req_ctx *areq_ctx, + u8 *config_data, + struct buffer_array *sg_data, + unsigned int assoclen) { SSI_LOG_DEBUG(" handle additional data config set to DLLI\n"); /* create sg for the current buffer */ @@ -422,10 +423,10 @@ ssi_aead_handle_config_buf(struct device *dev, "config buffer failed\n"); return -ENOMEM; } - SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX " + SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad " "page=%p addr=%pK " "offset=%u length=%u\n", - (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg), + sg_dma_address(&areq_ctx->ccm_adata_sg), sg_page(&areq_ctx->ccm_adata_sg), sg_virt(&areq_ctx->ccm_adata_sg), areq_ctx->ccm_adata_sg.offset, @@ -433,19 +434,18 @@ ssi_aead_handle_config_buf(struct device *dev, /* prepare for case of MLLI */ if (assoclen > 0) { ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, - &areq_ctx->ccm_adata_sg, - (AES_BLOCK_SIZE + - areq_ctx->ccm_hdr_size), 0, - false, NULL); + &areq_ctx->ccm_adata_sg, + (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), + 0, false, NULL); } return 0; } static inline int ssi_ahash_handle_curr_buf(struct device *dev, - struct ahash_req_ctx *areq_ctx, - u8 *curr_buff, - u32 curr_buff_cnt, - struct buffer_array *sg_data) + struct ahash_req_ctx *areq_ctx, + u8 *curr_buff, + u32 curr_buff_cnt, + struct buffer_array *sg_data) { SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt); /* create sg for the current buffer */ @@ -456,10 +456,10 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev, "src buffer failed\n"); return -ENOMEM; } - SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX " + SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad " "page=%p addr=%pK " "offset=%u length=%u\n", - (unsigned long long)sg_dma_address(areq_ctx->buff_sg), + sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, @@ -469,7 +469,7 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev, areq_ctx->in_nents = 0; /* prepare for case of MLLI */ ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg, - curr_buff_cnt, 0, false, NULL); + curr_buff_cnt, 0, false, NULL); return 0; } @@ -483,9 +483,9 @@ void ssi_buffer_mgr_unmap_blkcipher_request( struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx; if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) { - SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n", - (unsigned long long)req_ctx->gen_ctx.iv_dma_addr, - ivsize); + SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", + req_ctx->gen_ctx.iv_dma_addr, + ivsize); dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, ivsize, req_ctx->is_giv ? DMA_BIDIRECTIONAL : @@ -498,16 +498,12 @@ void ssi_buffer_mgr_unmap_blkcipher_request( req_ctx->mlli_params.mlli_dma_addr); } - dma_unmap_sg(dev, src, req_ctx->in_nents, - DMA_BIDIRECTIONAL); - SSI_LOG_DEBUG("Unmapped req->src=%pK\n", - sg_virt(src)); + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); + SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src)); if (src != dst) { - dma_unmap_sg(dev, dst, req_ctx->out_nents, - DMA_BIDIRECTIONAL); - SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", - sg_virt(dst)); + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); + SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst)); } } @@ -542,22 +538,24 @@ int ssi_buffer_mgr_map_blkcipher_request( req_ctx->is_giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, - req_ctx->gen_ctx.iv_dma_addr))) { + req_ctx->gen_ctx.iv_dma_addr))) { SSI_LOG_ERR("Mapping iv %u B at va=%pK " "for DMA failed\n", ivsize, info); return -ENOMEM; } - SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n", - ivsize, info, - (unsigned long long)req_ctx->gen_ctx.iv_dma_addr); + SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n", + ivsize, info, + req_ctx->gen_ctx.iv_dma_addr); } else { req_ctx->gen_ctx.iv_dma_addr = 0; } /* Map the src SGL */ rc = ssi_buffer_mgr_map_scatterlist(dev, src, - nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); + nbytes, DMA_BIDIRECTIONAL, + &req_ctx->in_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, + &mapped_nents); if (unlikely(rc != 0)) { rc = -ENOMEM; goto ablkcipher_exit; @@ -570,8 +568,10 @@ int ssi_buffer_mgr_map_blkcipher_request( if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) { req_ctx->out_nents = 0; ssi_buffer_mgr_add_scatterlist_entry(&sg_data, - req_ctx->in_nents, src, - nbytes, 0, true, &req_ctx->in_mlli_nents); + req_ctx->in_nents, + src, nbytes, 0, + true, + &req_ctx->in_mlli_nents); } } else { /* Map the dst sg */ @@ -588,13 +588,15 @@ int ssi_buffer_mgr_map_blkcipher_request( if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) { ssi_buffer_mgr_add_scatterlist_entry(&sg_data, - req_ctx->in_nents, src, - nbytes, 0, true, - &req_ctx->in_mlli_nents); + req_ctx->in_nents, + src, nbytes, 0, + true, + &req_ctx->in_mlli_nents); ssi_buffer_mgr_add_scatterlist_entry(&sg_data, - req_ctx->out_nents, dst, - nbytes, 0, true, - &req_ctx->out_mlli_nents); + req_ctx->out_nents, + dst, nbytes, 0, + true, + &req_ctx->out_mlli_nents); } } @@ -606,7 +608,7 @@ int ssi_buffer_mgr_map_blkcipher_request( } SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n", - GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type)); + GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type)); return 0; @@ -628,7 +630,7 @@ void ssi_buffer_mgr_unmap_aead_request( if (areq_ctx->mac_buf_dma_addr != 0) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, - MAX_MAC_SIZE, DMA_BIDIRECTIONAL); + MAX_MAC_SIZE, DMA_BIDIRECTIONAL); } #if SSI_CC_HAS_AES_GCM @@ -645,12 +647,12 @@ void ssi_buffer_mgr_unmap_aead_request( if (areq_ctx->gcm_iv_inc1_dma_addr != 0) { dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + AES_BLOCK_SIZE, DMA_TO_DEVICE); } if (areq_ctx->gcm_iv_inc2_dma_addr != 0) { dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + AES_BLOCK_SIZE, DMA_TO_DEVICE); } } #endif @@ -658,7 +660,7 @@ void ssi_buffer_mgr_unmap_aead_request( if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (areq_ctx->ccm_iv0_dma_addr != 0) { dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + AES_BLOCK_SIZE, DMA_TO_DEVICE); } dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); @@ -672,9 +674,9 @@ void ssi_buffer_mgr_unmap_aead_request( *allocated and should be released */ if (areq_ctx->mlli_params.curr_pool) { - SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n", - (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr, - areq_ctx->mlli_params.mlli_virt_addr); + SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n", + areq_ctx->mlli_params.mlli_dma_addr, + areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); @@ -690,14 +692,17 @@ void ssi_buffer_mgr_unmap_aead_request( dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL); if (unlikely(req->src != req->dst)) { SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n", - sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained), - DMA_BIDIRECTIONAL); + sg_virt(req->dst)); + dma_unmap_sg(dev, req->dst, + ssi_buffer_mgr_get_sgl_nents(req->dst, + size_to_unmap, + &dummy, + &chained), + DMA_BIDIRECTIONAL); } if (drvdata->coherent && (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) && - likely(req->src == req->dst)) - { + likely(req->src == req->dst)) { u32 size_to_skip = req->assoclen; if (areq_ctx->is_gcm4543) @@ -753,11 +758,11 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents( *is_icv_fragmented = true; } else { SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n", - MAX_ICV_NENTS_SUPPORTED); + MAX_ICV_NENTS_SUPPORTED); nents = -1; /*unsupported*/ } SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n", - (*is_icv_fragmented ? "true" : "false"), nents); + (*is_icv_fragmented ? "true" : "false"), nents); return nents; } @@ -778,18 +783,18 @@ static inline int ssi_buffer_mgr_aead_chain_iv( goto chain_iv_exit; } - areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, - hw_iv_size, DMA_BIDIRECTIONAL); + areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) { SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n", - hw_iv_size, req->iv); + hw_iv_size, req->iv); rc = -ENOMEM; goto chain_iv_exit; } - SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n", - hw_iv_size, req->iv, - (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr); + SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n", + hw_iv_size, req->iv, + areq_ctx->gen_ctx.iv_dma_addr); if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); @@ -833,8 +838,8 @@ static inline int ssi_buffer_mgr_aead_chain_assoc( areq_ctx->assoc.nents = 0; areq_ctx->assoc.mlli_nents = 0; SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n", - GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type), - areq_ctx->assoc.nents); + GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type), + areq_ctx->assoc.nents); goto chain_assoc_exit; } @@ -868,10 +873,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc( if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (unlikely((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) { - SSI_LOG_ERR("CCM case.Too many fragments. " - "Current %d max %d\n", - (areq_ctx->assoc.nents + 1), - LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); + SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n", + (areq_ctx->assoc.nents + 1), + LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); rc = -ENOMEM; goto chain_assoc_exit; } @@ -884,10 +888,10 @@ static inline int ssi_buffer_mgr_aead_chain_assoc( areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI; if (unlikely((do_chain) || - (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) { + (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) { SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n", - GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type), - areq_ctx->assoc.nents); + GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type), + areq_ctx->assoc.nents); ssi_buffer_mgr_add_scatterlist_entry( sg_data, areq_ctx->assoc.nents, req->src, req->assoclen, 0, is_last, @@ -911,26 +915,26 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli( if (likely(req->src == req->dst)) { /*INPLACE*/ areq_ctx->icv_dma_addr = sg_dma_address( - areq_ctx->srcSgl) + + areq_ctx->src_sgl) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt( - areq_ctx->srcSgl) + + areq_ctx->src_sgl) + (*src_last_bytes - authsize); } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /*NON-INPLACE and DECRYPT*/ areq_ctx->icv_dma_addr = sg_dma_address( - areq_ctx->srcSgl) + + areq_ctx->src_sgl) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt( - areq_ctx->srcSgl) + + areq_ctx->src_sgl) + (*src_last_bytes - authsize); } else { /*NON-INPLACE and ENCRYPT*/ areq_ctx->icv_dma_addr = sg_dma_address( - areq_ctx->dstSgl) + + areq_ctx->dst_sgl) + (*dst_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt( - areq_ctx->dstSgl) + + areq_ctx->dst_sgl) + (*dst_last_bytes - authsize); } } @@ -951,13 +955,18 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli( if (likely(req->src == req->dst)) { /*INPLACE*/ ssi_buffer_mgr_add_scatterlist_entry(sg_data, - areq_ctx->src.nents, areq_ctx->srcSgl, - areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table, - &areq_ctx->src.mlli_nents); - - icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl, - areq_ctx->src.nents, authsize, *src_last_bytes, - &areq_ctx->is_icv_fragmented); + areq_ctx->src.nents, + areq_ctx->src_sgl, + areq_ctx->cryptlen, + areq_ctx->src_offset, + is_last_table, + &areq_ctx->src.mlli_nents); + + icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl, + areq_ctx->src.nents, + authsize, + *src_last_bytes, + &areq_ctx->is_icv_fragmented); if (unlikely(icv_nents < 0)) { rc = -ENOTSUPP; goto prepare_data_mlli_exit; @@ -995,27 +1004,35 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli( } else { /* Contig. ICV */ /*Should hanlde if the sg is not contig.*/ areq_ctx->icv_dma_addr = sg_dma_address( - &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) + + &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt( - &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) + + &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) + (*src_last_bytes - authsize); } } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /*NON-INPLACE and DECRYPT*/ ssi_buffer_mgr_add_scatterlist_entry(sg_data, - areq_ctx->src.nents, areq_ctx->srcSgl, - areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table, - &areq_ctx->src.mlli_nents); + areq_ctx->src.nents, + areq_ctx->src_sgl, + areq_ctx->cryptlen, + areq_ctx->src_offset, + is_last_table, + &areq_ctx->src.mlli_nents); ssi_buffer_mgr_add_scatterlist_entry(sg_data, - areq_ctx->dst.nents, areq_ctx->dstSgl, - areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table, - &areq_ctx->dst.mlli_nents); - - icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl, - areq_ctx->src.nents, authsize, *src_last_bytes, - &areq_ctx->is_icv_fragmented); + areq_ctx->dst.nents, + areq_ctx->dst_sgl, + areq_ctx->cryptlen, + areq_ctx->dst_offset, + is_last_table, + &areq_ctx->dst.mlli_nents); + + icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl, + areq_ctx->src.nents, + authsize, + *src_last_bytes, + &areq_ctx->is_icv_fragmented); if (unlikely(icv_nents < 0)) { rc = -ENOTSUPP; goto prepare_data_mlli_exit; @@ -1039,26 +1056,34 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli( } else { /* Contig. ICV */ /*Should hanlde if the sg is not contig.*/ areq_ctx->icv_dma_addr = sg_dma_address( - &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) + + &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt( - &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) + + &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) + (*src_last_bytes - authsize); } } else { /*NON-INPLACE and ENCRYPT*/ ssi_buffer_mgr_add_scatterlist_entry(sg_data, - areq_ctx->dst.nents, areq_ctx->dstSgl, - areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table, - &areq_ctx->dst.mlli_nents); + areq_ctx->dst.nents, + areq_ctx->dst_sgl, + areq_ctx->cryptlen, + areq_ctx->dst_offset, + is_last_table, + &areq_ctx->dst.mlli_nents); ssi_buffer_mgr_add_scatterlist_entry(sg_data, - areq_ctx->src.nents, areq_ctx->srcSgl, - areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table, - &areq_ctx->src.mlli_nents); - - icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl, - areq_ctx->dst.nents, authsize, *dst_last_bytes, + areq_ctx->src.nents, + areq_ctx->src_sgl, + areq_ctx->cryptlen, + areq_ctx->src_offset, + is_last_table, + &areq_ctx->src.mlli_nents); + + icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dst_sgl, + areq_ctx->dst.nents, + authsize, + *dst_last_bytes, &areq_ctx->is_icv_fragmented); if (unlikely(icv_nents < 0)) { rc = -ENOTSUPP; @@ -1068,10 +1093,10 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli( if (likely(!areq_ctx->is_icv_fragmented)) { /* Contig. ICV */ areq_ctx->icv_dma_addr = sg_dma_address( - &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) + + &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) + (*dst_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt( - &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) + + &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) + (*dst_last_bytes - authsize); } else { areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; @@ -1113,37 +1138,36 @@ static inline int ssi_buffer_mgr_aead_chain_data( rc = -EINVAL; goto chain_data_exit; } - areq_ctx->srcSgl = req->src; - areq_ctx->dstSgl = req->dst; + areq_ctx->src_sgl = req->src; + areq_ctx->dst_sgl = req->dst; if (is_gcm4543) size_for_map += crypto_aead_ivsize(tfm); size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained); - sg_index = areq_ctx->srcSgl->length; + sg_index = areq_ctx->src_sgl->length; //check where the data starts while (sg_index <= size_to_skip) { - offset -= areq_ctx->srcSgl->length; - areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl); + offset -= areq_ctx->src_sgl->length; + areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl); //if have reached the end of the sgl, then this is unexpected - if (!areq_ctx->srcSgl) { + if (!areq_ctx->src_sgl) { SSI_LOG_ERR("reached end of sg list. unexpected\n"); BUG(); } - sg_index += areq_ctx->srcSgl->length; + sg_index += areq_ctx->src_sgl->length; src_mapped_nents--; } - if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) - { + if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) { SSI_LOG_ERR("Too many fragments. current %d max %d\n", - src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); + src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); return -ENOMEM; } areq_ctx->src.nents = src_mapped_nents; - areq_ctx->srcOffset = offset; + areq_ctx->src_offset = offset; if (req->src != req->dst) { size_for_map = req->assoclen + req->cryptlen; @@ -1152,9 +1176,11 @@ static inline int ssi_buffer_mgr_aead_chain_data( size_for_map += crypto_aead_ivsize(tfm); rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map, - DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents), - LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, - &dst_mapped_nents); + DMA_BIDIRECTIONAL, + &areq_ctx->dst.nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, + &dst_last_bytes, + &dst_mapped_nents); if (unlikely(rc != 0)) { rc = -ENOMEM; goto chain_data_exit; @@ -1162,35 +1188,37 @@ static inline int ssi_buffer_mgr_aead_chain_data( } dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained); - sg_index = areq_ctx->dstSgl->length; + sg_index = areq_ctx->dst_sgl->length; offset = size_to_skip; //check where the data starts while (sg_index <= size_to_skip) { - offset -= areq_ctx->dstSgl->length; - areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl); + offset -= areq_ctx->dst_sgl->length; + areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl); //if have reached the end of the sgl, then this is unexpected - if (!areq_ctx->dstSgl) { + if (!areq_ctx->dst_sgl) { SSI_LOG_ERR("reached end of sg list. unexpected\n"); BUG(); } - sg_index += areq_ctx->dstSgl->length; + sg_index += areq_ctx->dst_sgl->length; dst_mapped_nents--; } - if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) - { + if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) { SSI_LOG_ERR("Too many fragments. current %d max %d\n", dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); return -ENOMEM; } areq_ctx->dst.nents = dst_mapped_nents; - areq_ctx->dstOffset = offset; + areq_ctx->dst_offset = offset; if ((src_mapped_nents > 1) || (dst_mapped_nents > 1) || do_chain) { areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI; - rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data, - &src_last_bytes, &dst_last_bytes, is_last_table); + rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, + sg_data, + &src_last_bytes, + &dst_last_bytes, + is_last_table); } else { areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI; ssi_buffer_mgr_prepare_aead_data_dlli( @@ -1202,7 +1230,7 @@ chain_data_exit: } static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata, - struct aead_request *req) + struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); u32 curr_mlli_size = 0; @@ -1274,8 +1302,7 @@ int ssi_buffer_mgr_map_aead_request( if (drvdata->coherent && (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) && - likely(req->src == req->dst)) - { + likely(req->src == req->dst)) { u32 size_to_skip = req->assoclen; if (is_gcm4543) @@ -1296,19 +1323,21 @@ int ssi_buffer_mgr_map_aead_request( req->cryptlen : (req->cryptlen - authsize); - areq_ctx->mac_buf_dma_addr = dma_map_single(dev, - areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); + areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf, + MAX_MAC_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) { SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n", - MAX_MAC_SIZE, areq_ctx->mac_buf); + MAX_MAC_SIZE, areq_ctx->mac_buf); rc = -ENOMEM; goto aead_map_failure; } if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev, - (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET), - AES_BLOCK_SIZE, DMA_TO_DEVICE); + (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET), + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) { SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK " @@ -1319,7 +1348,8 @@ int ssi_buffer_mgr_map_aead_request( goto aead_map_failure; } if (ssi_aead_handle_config_buf(dev, areq_ctx, - areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) { + areq_ctx->ccm_config, &sg_data, + req->assoclen) != 0) { rc = -ENOMEM; goto aead_map_failure; } @@ -1328,26 +1358,31 @@ int ssi_buffer_mgr_map_aead_request( #if SSI_CC_HAS_AES_GCM if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { areq_ctx->hkey_dma_addr = dma_map_single(dev, - areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); + areq_ctx->hkey, + AES_BLOCK_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) { SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n", - AES_BLOCK_SIZE, areq_ctx->hkey); + AES_BLOCK_SIZE, areq_ctx->hkey); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev, - &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE); + &areq_ctx->gcm_len_block, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) { SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n", - AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); + AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev, - areq_ctx->gcm_iv_inc1, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + areq_ctx->gcm_iv_inc1, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) { SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK " @@ -1359,8 +1394,9 @@ int ssi_buffer_mgr_map_aead_request( } areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev, - areq_ctx->gcm_iv_inc2, - AES_BLOCK_SIZE, DMA_TO_DEVICE); + areq_ctx->gcm_iv_inc2, + AES_BLOCK_SIZE, + DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) { SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK " @@ -1380,7 +1416,7 @@ int ssi_buffer_mgr_map_aead_request( if (is_gcm4543) size_to_map += crypto_aead_ivsize(tfm); rc = ssi_buffer_mgr_map_scatterlist(dev, req->src, - size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents), + size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (unlikely(rc != 0)) { rc = -ENOMEM; @@ -1491,18 +1527,18 @@ int ssi_buffer_mgr_map_hash_request_final( /* map the previous buffer */ if (*curr_buff_cnt != 0) { if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff, - *curr_buff_cnt, &sg_data) != 0) { + *curr_buff_cnt, &sg_data) != 0) { return -ENOMEM; } } if (src && (nbytes > 0) && do_update) { - if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, - nbytes, - DMA_TO_DEVICE, - &areq_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents))){ + if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes, + DMA_TO_DEVICE, + &areq_ctx->in_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, + &mapped_nents))){ goto unmap_curr_buff; } if (src && (mapped_nents == 1) @@ -1522,19 +1558,18 @@ int ssi_buffer_mgr_map_hash_request_final( mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; /* add the src data to the sg_data */ ssi_buffer_mgr_add_scatterlist_entry(&sg_data, - areq_ctx->in_nents, - src, - nbytes, 0, - true, &areq_ctx->mlli_nents); + areq_ctx->in_nents, + src, nbytes, 0, true, + &areq_ctx->mlli_nents); if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data, - mlli_params) != 0)) { + mlli_params) != 0)) { goto fail_unmap_din; } } /* change the buffer index for the unmap function */ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n", - GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type)); + GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type)); return 0; fail_unmap_din: @@ -1588,8 +1623,8 @@ int ssi_buffer_mgr_map_hash_request_update( &curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = ssi_buffer_mgr_get_sgl_nents(src, - nbytes, - &dummy, NULL); + nbytes, + &dummy, NULL); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; @@ -1612,15 +1647,15 @@ int ssi_buffer_mgr_map_hash_request_update( (update_data_len - *curr_buff_cnt), *next_buff_cnt); ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src, - (update_data_len - *curr_buff_cnt), - nbytes, SSI_SG_TO_BUF); + (update_data_len - *curr_buff_cnt), + nbytes, SSI_SG_TO_BUF); /* change the buffer index for next operation */ swap_index = 1; } if (*curr_buff_cnt != 0) { if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff, - *curr_buff_cnt, &sg_data) != 0) { + *curr_buff_cnt, &sg_data) != 0) { return -ENOMEM; } /* change the buffer index for next operation */ @@ -1629,11 +1664,12 @@ int ssi_buffer_mgr_map_hash_request_update( if (update_data_len > *curr_buff_cnt) { if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, - (update_data_len - *curr_buff_cnt), - DMA_TO_DEVICE, - &areq_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents))){ + (update_data_len - *curr_buff_cnt), + DMA_TO_DEVICE, + &areq_ctx->in_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, + &mapped_nents))){ goto unmap_curr_buff; } if ((mapped_nents == 1) @@ -1653,12 +1689,14 @@ int ssi_buffer_mgr_map_hash_request_update( mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; /* add the src data to the sg_data */ ssi_buffer_mgr_add_scatterlist_entry(&sg_data, - areq_ctx->in_nents, - src, - (update_data_len - *curr_buff_cnt), 0, - true, &areq_ctx->mlli_nents); + areq_ctx->in_nents, + src, + (update_data_len - *curr_buff_cnt), + 0, + true, + &areq_ctx->mlli_nents); if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data, - mlli_params) != 0)) { + mlli_params) != 0)) { goto fail_unmap_din; } } @@ -1687,28 +1725,28 @@ void ssi_buffer_mgr_unmap_hash_request( *allocated and should be released */ if (areq_ctx->mlli_params.curr_pool) { - SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n", - (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr, - areq_ctx->mlli_params.mlli_virt_addr); + SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n", + areq_ctx->mlli_params.mlli_dma_addr, + areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } if ((src) && likely(areq_ctx->in_nents != 0)) { - SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n", - sg_virt(src), - (unsigned long long)sg_dma_address(src), - sg_dma_len(src)); + SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", + sg_virt(src), + sg_dma_address(src), + sg_dma_len(src)); dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); } if (*prev_len != 0) { SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK" - " dma=0x%llX len 0x%X\n", + " dma=%pad len 0x%X\n", sg_virt(areq_ctx->buff_sg), - (unsigned long long)sg_dma_address(areq_ctx->buff_sg), + sg_dma_address(areq_ctx->buff_sg), sg_dma_len(areq_ctx->buff_sg)); dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); if (!do_revert) { @@ -1725,8 +1763,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata) struct buff_mgr_handle *buff_mgr_handle; struct device *dev = &drvdata->plat_dev->dev; - buff_mgr_handle = (struct buff_mgr_handle *) - kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL); + buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL); if (!buff_mgr_handle) return -ENOMEM; diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c index cd2eafc04232..6219a92184aa 100644 --- a/drivers/staging/ccree/ssi_cipher.c +++ b/drivers/staging/ccree/ssi_cipher.c @@ -23,6 +23,7 @@ #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/des.h> +#include <crypto/xts.h> #include "ssi_config.h" #include "ssi_driver.h" @@ -31,7 +32,6 @@ #include "ssi_cipher.h" #include "ssi_request_mgr.h" #include "ssi_sysfs.h" -#include "ssi_fips_local.h" #define MAX_ABLKCIPHER_SEQ_LEN 6 @@ -68,7 +68,8 @@ struct ssi_ablkcipher_ctx { static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base); -static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) { +static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) +{ switch (ctx_p->flow_mode) { case S_DIN_to_AES: switch (size) { @@ -92,8 +93,7 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) { break; } case S_DIN_to_DES: - if (likely(size == DES3_EDE_KEY_SIZE || - size == DES_KEY_SIZE)) + if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)) return 0; break; #if SSI_CC_HAS_MULTI2 @@ -108,7 +108,8 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) { return -EINVAL; } -static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) { +static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) +{ switch (ctx_p->flow_mode) { case S_DIN_to_AES: switch (ctx_p->cipher_mode) { @@ -183,10 +184,9 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm) int rc = 0; unsigned int max_key_buf_size = get_max_keysize(tfm); - SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p, - crypto_tfm_alg_name(tfm)); + SSI_LOG_DEBUG("Initializing context @%p for %s\n", + ctx_p, crypto_tfm_alg_name(tfm)); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); ctx_p->cipher_mode = ssi_alg->cipher_mode; ctx_p->flow_mode = ssi_alg->flow_mode; ctx_p->drvdata = ssi_alg->drvdata; @@ -203,15 +203,16 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm) /* Map key buffer */ ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key, - max_key_buf_size, DMA_TO_DEVICE); + max_key_buf_size, + DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n", - max_key_buf_size, ctx_p->user.key); + max_key_buf_size, ctx_p->user.key); return -ENOMEM; } - SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n", - max_key_buf_size, ctx_p->user.key, - (unsigned long long)ctx_p->user.key_dma_addr); + SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=%pad\n", + max_key_buf_size, ctx_p->user.key, + ctx_p->user.key_dma_addr); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* Alloc hash tfm for essiv */ @@ -232,7 +233,7 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm) unsigned int max_key_buf_size = get_max_keysize(tfm); SSI_LOG_DEBUG("Clearing context @%p for %s\n", - crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm)); + crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm)); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* Free hash tfm for essiv */ @@ -242,9 +243,9 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm) /* Unmap key buffer */ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size, - DMA_TO_DEVICE); - SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n", - (unsigned long long)ctx_p->user.key_dma_addr); + DMA_TO_DEVICE); + SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=%pad\n", + ctx_p->user.key_dma_addr); /* Free key buffer in context */ kfree(ctx_p->user.key); @@ -263,31 +264,15 @@ static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; /* The function verifies that tdes keys are not weak.*/ -static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen) +static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen) { -#ifdef CCREE_FIPS_SUPPORT struct tdes_keys *tdes_key = (struct tdes_keys *)key; /* verify key1 != key2 and key3 != key2*/ if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) || - (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) { + (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) { return -ENOEXEC; } -#endif /* CCREE_FIPS_SUPPORT */ - - return 0; -} - -/* The function verifies that xts keys are not weak.*/ -static int ssi_fips_verify_xts_keys(const u8 *key, unsigned int keylen) -{ -#ifdef CCREE_FIPS_SUPPORT - /* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */ - int singleKeySize = keylen >> 1; - - if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0)) - return -ENOEXEC; -#endif /* CCREE_FIPS_SUPPORT */ return 0; } @@ -317,12 +302,10 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, unsigned int max_key_buf_size = get_max_keysize(tfm); SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n", - ctx_p, crypto_tfm_alg_name(tfm), keylen); + ctx_p, crypto_tfm_alg_name(tfm), keylen); dump_byte_array("key", (u8 *)key, keylen); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); - - SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check"); + SSI_LOG_DEBUG("after FIPS check"); /* STAT_PHASE_0: Init and sanity checks */ @@ -368,7 +351,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, } ctx_p->keylen = keylen; - SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0"); + SSI_LOG_DEBUG("ssi_is_hw_key ret 0"); return 0; } @@ -378,25 +361,25 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, if (unlikely(!des_ekey(tmp, key)) && (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; - SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak DES key"); + SSI_LOG_DEBUG("weak DES key"); return -EINVAL; } } if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) && - ssi_fips_verify_xts_keys(key, keylen) != 0) { - SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key"); + xts_check_key(tfm, key, keylen) != 0) { + SSI_LOG_DEBUG("weak XTS key"); return -EINVAL; } if ((ctx_p->flow_mode == S_DIN_to_DES) && (keylen == DES3_EDE_KEY_SIZE) && - ssi_fips_verify_3des_keys(key, keylen) != 0) { - SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key"); + ssi_verify_3des_keys(key, keylen) != 0) { + SSI_LOG_DEBUG("weak 3DES key"); return -EINVAL; } /* STAT_PHASE_1: Copy key to ctx */ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr, - max_key_buf_size, DMA_TO_DEVICE); + max_key_buf_size, DMA_TO_DEVICE); if (ctx_p->flow_mode == S_DIN_to_MULTI2) { #if SSI_CC_HAS_MULTI2 @@ -405,7 +388,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS || ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval"); + SSI_LOG_DEBUG("SSI_CC_HAS_MULTI2 einval"); return -EINVAL; #endif /*SSI_CC_HAS_MULTI2*/ } else { @@ -429,10 +412,10 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm, } } dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, - max_key_buf_size, DMA_TO_DEVICE); + max_key_buf_size, DMA_TO_DEVICE); ctx_p->keylen = keylen; - SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely"); + SSI_LOG_DEBUG("return safely"); return 0; } @@ -632,17 +615,15 @@ ssi_blkcipher_create_data_desc( break; #endif /*SSI_CC_HAS_MULTI2*/ default: - SSI_LOG_ERR("invalid flow mode, flow_mode = %d \n", flow_mode); + SSI_LOG_ERR("invalid flow mode, flow_mode = %d\n", flow_mode); return; } /* Process */ if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) { - SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n", - (unsigned long long)sg_dma_address(src), - nbytes); - SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n", - (unsigned long long)sg_dma_address(dst), - nbytes); + SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n", + sg_dma_address(src), nbytes); + SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n", + sg_dma_address(dst), nbytes); hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src), nbytes, NS_BIT); @@ -655,9 +636,9 @@ ssi_blkcipher_create_data_desc( (*seq_size)++; } else { /* bypass */ - SSI_LOG_DEBUG(" bypass params addr 0x%llX " + SSI_LOG_DEBUG(" bypass params addr %pad " "length 0x%X addr 0x%08X\n", - (unsigned long long)req_ctx->mlli_params.mlli_dma_addr, + req_ctx->mlli_params.mlli_dma_addr, req_ctx->mlli_params.mlli_len, (unsigned int)ctx_p->drvdata->mlli_sram_addr); hw_desc_init(&desc[*seq_size]); @@ -706,13 +687,13 @@ ssi_blkcipher_create_data_desc( } static int ssi_blkcipher_complete(struct device *dev, - struct ssi_ablkcipher_ctx *ctx_p, - struct blkcipher_req_ctx *req_ctx, - struct scatterlist *dst, - struct scatterlist *src, - unsigned int ivsize, - void *areq, - void __iomem *cc_base) + struct ssi_ablkcipher_ctx *ctx_p, + struct blkcipher_req_ctx *req_ctx, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int ivsize, + void *areq, + void __iomem *cc_base) { int completion_error = 0; u32 inflight_counter; @@ -749,10 +730,9 @@ static int ssi_blkcipher_process( int rc, seq_len = 0, cts_restore_flag = 0; SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n", - ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), - areq, info, nbytes); + ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), + areq, info, nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); /* STAT_PHASE_0: Init and sanity checks */ /* TODO: check data length according to mode */ @@ -804,12 +784,8 @@ static int ssi_blkcipher_process( ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Data processing */ - ssi_blkcipher_create_data_desc(tfm, - req_ctx, - dst, src, - nbytes, - areq, - desc, &seq_len); + ssi_blkcipher_create_data_desc(tfm, req_ctx, dst, src, nbytes, areq, + desc, &seq_len); /* do we need to generate IV? */ if (req_ctx->is_giv) { @@ -853,8 +829,6 @@ static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __io struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm); unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); - CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR(); - ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src, ivsize, areq, cc_base); } @@ -871,8 +845,8 @@ static int ssi_ablkcipher_init(struct crypto_tfm *tfm) } static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm, - const u8 *key, - unsigned int keylen) + const u8 *key, + unsigned int keylen) { return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen); } @@ -1286,7 +1260,7 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata) if (blkcipher_handle) { /* Remove registered algs */ list_for_each_entry_safe(t_alg, n, - &blkcipher_handle->blkcipher_alg_list, + &blkcipher_handle->blkcipher_alg_list, entry) { crypto_unregister_alg(&t_alg->crypto_alg); list_del(&t_alg->entry); @@ -1306,7 +1280,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata) int alg; ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle), - GFP_KERNEL); + GFP_KERNEL); if (!ablkcipher_handle) return -ENOMEM; @@ -1322,7 +1296,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata) if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); SSI_LOG_ERR("%s alg allocation failed\n", - blkcipher_algs[alg].driver_name); + blkcipher_algs[alg].driver_name); goto fail0; } t_alg->drvdata = drvdata; @@ -1330,17 +1304,17 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata) SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name); rc = crypto_register_alg(&t_alg->crypto_alg); SSI_LOG_DEBUG("%s alg registration rc = %x\n", - t_alg->crypto_alg.cra_driver_name, rc); + t_alg->crypto_alg.cra_driver_name, rc); if (unlikely(rc != 0)) { SSI_LOG_ERR("%s alg registration failed\n", - t_alg->crypto_alg.cra_driver_name); + t_alg->crypto_alg.cra_driver_name); kfree(t_alg); goto fail0; } else { list_add_tail(&t_alg->entry, &ablkcipher_handle->blkcipher_alg_list); SSI_LOG_DEBUG("Registered %s\n", - t_alg->crypto_alg.cra_driver_name); + t_alg->crypto_alg.cra_driver_name); } } return 0; diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c index 78709b92736d..a4ab9ef4baf7 100644 --- a/drivers/staging/ccree/ssi_driver.c +++ b/drivers/staging/ccree/ssi_driver.c @@ -71,7 +71,7 @@ #include "ssi_ivgen.h" #include "ssi_sram_mgr.h" #include "ssi_pm.h" -#include "ssi_fips_local.h" +#include "ssi_fips.h" #ifdef DX_DUMP_BYTES void dump_byte_array(const char *name, const u8 *the_array, unsigned long size) @@ -81,12 +81,11 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size) char line_buf[80]; if (!the_array) { - SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n"); + SSI_LOG_ERR("cannot dump array - NULL pointer\n"); return; } - ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", - name, size); + ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", name, size); if (ret < 0) { SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret); return; @@ -95,8 +94,8 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size) for (i = 0, cur_byte = the_array; (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) { ret = snprintf(line_buf + line_offset, - sizeof(line_buf) - line_offset, - "0x%02X ", *cur_byte); + sizeof(line_buf) - line_offset, + "0x%02X ", *cur_byte); if (ret < 0) { SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret); return; @@ -193,11 +192,11 @@ int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe) #ifdef DX_IRQ_DELAY /* Set CC IRQ delay */ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL), - DX_IRQ_DELAY); + DX_IRQ_DELAY); #endif if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) { SSI_LOG_DEBUG("irq_delay=%d CC cycles\n", - CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL))); + CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL))); } #endif @@ -251,10 +250,10 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = -ENODEV; goto init_cc_res_err; } - SSI_LOG_DEBUG("Got MEM resource (%s): start=0x%llX end=0x%llX\n", - new_drvdata->res_mem->name, - (unsigned long long)new_drvdata->res_mem->start, - (unsigned long long)new_drvdata->res_mem->end); + SSI_LOG_DEBUG("Got MEM resource (%s): start=%pad end=%pad\n", + new_drvdata->res_mem->name, + new_drvdata->res_mem->start, + new_drvdata->res_mem->end); /* Map registers space */ req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs"); if (unlikely(!req_mem_cc_regs)) { @@ -266,7 +265,8 @@ static int init_cc_resources(struct platform_device *plat_dev) cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem)); if (unlikely(!cc_base)) { SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n", - (unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem)); + (unsigned int)new_drvdata->res_mem->start, + (unsigned int)resource_size(new_drvdata->res_mem)); rc = -ENOMEM; goto init_cc_res_err; } @@ -284,15 +284,15 @@ static int init_cc_resources(struct platform_device *plat_dev) IRQF_SHARED, "arm_cc7x", new_drvdata); if (unlikely(rc != 0)) { SSI_LOG_ERR("Could not register to interrupt %llu\n", - (unsigned long long)new_drvdata->res_irq->start); + (unsigned long long)new_drvdata->res_irq->start); goto init_cc_res_err; } init_completion(&new_drvdata->icache_setup_completion); irq_registered = true; SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n", - new_drvdata->res_irq->name, - (unsigned long long)new_drvdata->res_irq->start); + new_drvdata->res_irq->name, + (unsigned long long)new_drvdata->res_irq->start); new_drvdata->plat_dev = plat_dev; @@ -301,19 +301,16 @@ static int init_cc_resources(struct platform_device *plat_dev) goto init_cc_res_err; if (!new_drvdata->plat_dev->dev.dma_mask) - { new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask; - } + if (!new_drvdata->plat_dev->dev.coherent_dma_mask) - { new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN); - } /* Verify correct mapping */ signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE)); if (signature_val != DX_DEV_SIGNATURE) { SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", - signature_val, (u32)DX_DEV_SIGNATURE); + signature_val, (u32)DX_DEV_SIGNATURE); rc = -EINVAL; goto init_cc_res_err; } @@ -330,7 +327,7 @@ static int init_cc_resources(struct platform_device *plat_dev) } #ifdef ENABLE_CC_SYSFS - rc = ssi_sysfs_init(&(plat_dev->dev.kobj), new_drvdata); + rc = ssi_sysfs_init(&plat_dev->dev.kobj, new_drvdata); if (unlikely(rc != 0)) { SSI_LOG_ERR("init_stat_db failed\n"); goto init_cc_res_err; @@ -401,6 +398,12 @@ static int init_cc_resources(struct platform_device *plat_dev) goto init_cc_res_err; } + /* If we got here and FIPS mode is enabled + * it means all FIPS test passed, so let TEE + * know we're good. + */ + cc_set_ree_fips_status(new_drvdata, true); + return 0; init_cc_res_err: @@ -428,7 +431,7 @@ init_cc_res_err: new_drvdata->cc_base = NULL; } release_mem_region(new_drvdata->res_mem->start, - resource_size(new_drvdata->res_mem)); + resource_size(new_drvdata->res_mem)); new_drvdata->res_mem = NULL; } kfree(new_drvdata); @@ -471,7 +474,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev) if (drvdata->cc_base) { iounmap(drvdata->cc_base); release_mem_region(drvdata->res_mem->start, - resource_size(drvdata->res_mem)); + resource_size(drvdata->res_mem)); drvdata->cc_base = NULL; drvdata->res_mem = NULL; } @@ -516,12 +519,12 @@ static int cc7x_probe(struct platform_device *plat_dev) asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); cacheline_size = 4 << ((ctr >> 16) & 0xf); SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n", - cacheline_size, L1_CACHE_BYTES); + cacheline_size, L1_CACHE_BYTES); asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr)); - SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X," - " Part 0x%03X, Rev r%dp%d\n", - (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF, (ctr >> 20) & 0xF, ctr & 0xF); + SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n", + (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF, + (ctr >> 20) & 0xF, ctr & 0xF); #endif /* Map registers space */ diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h index c1ed61f1a202..b6ad89ae9bee 100644 --- a/drivers/staging/ccree/ssi_driver.h +++ b/drivers/staging/ccree/ssi_driver.h @@ -48,7 +48,6 @@ #include "cc_crypto_ctx.h" #include "ssi_sysfs.h" #include "hash_defs.h" -#include "ssi_fips_local.h" #include "cc_hw_queue_defs.h" #include "ssi_sram_mgr.h" diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c index fdc40f38332a..33d53d64603d 100644 --- a/drivers/staging/ccree/ssi_fips.c +++ b/drivers/staging/ccree/ssi_fips.c @@ -14,48 +14,115 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. */ -/************************************************************** - * This file defines the driver FIPS APIs * - **************************************************************/ +#include <linux/kernel.h> +#include <linux/fips.h> -#include <linux/module.h> +#include "ssi_config.h" +#include "ssi_driver.h" +#include "cc_hal.h" #include "ssi_fips.h" -extern int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state); -extern int ssi_fips_ext_get_error(enum cc_fips_error *p_err); +static void fips_dsr(unsigned long devarg); + +struct ssi_fips_handle { + struct tasklet_struct tasklet; +}; + +/* The function called once at driver entry point to check + * whether TEE FIPS error occurred. + */ +static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata) +{ + u32 reg; + void __iomem *cc_base = drvdata->cc_base; + + reg = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST)); + return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)); +} /* - * This function returns the REE FIPS state. - * It should be called by kernel module. + * This function should push the FIPS REE library status towards the TEE library + * by writing the error state to HOST_GPR0 register. */ -int ssi_fips_get_state(enum cc_fips_state_t *p_state) +void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status) +{ + void __iomem *cc_base = drvdata->cc_base; + int val = CC_FIPS_SYNC_REE_STATUS; + + val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR); + + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), val); +} + +void ssi_fips_fini(struct ssi_drvdata *drvdata) { - int rc = 0; + struct ssi_fips_handle *fips_h = drvdata->fips_handle; - if (!p_state) - return -EINVAL; + if (!fips_h) + return; /* Not allocated */ - rc = ssi_fips_ext_get_state(p_state); + /* Kill tasklet */ + tasklet_kill(&fips_h->tasklet); - return rc; + kfree(fips_h); + drvdata->fips_handle = NULL; } -EXPORT_SYMBOL(ssi_fips_get_state); +void fips_handler(struct ssi_drvdata *drvdata) +{ + struct ssi_fips_handle *fips_handle_ptr = + drvdata->fips_handle; -/* - * This function returns the REE FIPS error. - * It should be called by kernel module. - */ -int ssi_fips_get_error(enum cc_fips_error *p_err) + tasklet_schedule(&fips_handle_ptr->tasklet); +} + +static inline void tee_fips_error(void) { - int rc = 0; + if (fips_enabled) + panic("ccree: TEE reported cryptographic error in fips mode!\n"); + else + SSI_LOG_ERR("TEE reported error!\n"); +} - if (!p_err) - return -EINVAL; +/* Deferred service handler, run as interrupt-fired tasklet */ +static void fips_dsr(unsigned long devarg) +{ + struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg; + void __iomem *cc_base = drvdata->cc_base; + u32 irq, state, val; - rc = ssi_fips_ext_get_error(p_err); + irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK)); - return rc; + if (irq) { + state = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST)); + + if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) + tee_fips_error(); + } + + /* after verifing that there is nothing to do, + * unmask AXI completion interrupt. + */ + val = (CC_REG_OFFSET(HOST_RGF, HOST_IMR) & ~irq); + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val); } -EXPORT_SYMBOL(ssi_fips_get_error); +/* The function called once at driver entry point .*/ +int ssi_fips_init(struct ssi_drvdata *p_drvdata) +{ + struct ssi_fips_handle *fips_h; + + fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL); + if (!fips_h) + return -ENOMEM; + + p_drvdata->fips_handle = fips_h; + + SSI_LOG_DEBUG("Initializing fips tasklet\n"); + tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); + + if (!cc_get_tee_fips_status(p_drvdata)) + tee_fips_error(); + + return 0; +} diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h index 4f5c6a9a8363..369ddf9478e7 100644 --- a/drivers/staging/ccree/ssi_fips.h +++ b/drivers/staging/ccree/ssi_fips.h @@ -17,45 +17,33 @@ #ifndef __SSI_FIPS_H__ #define __SSI_FIPS_H__ -/*! - * @file - * @brief This file contains FIPS related defintions and APIs. - */ +#ifdef CONFIG_CRYPTO_FIPS -enum cc_fips_state { - CC_FIPS_STATE_NOT_SUPPORTED = 0, - CC_FIPS_STATE_SUPPORTED, - CC_FIPS_STATE_ERROR, - CC_FIPS_STATE_RESERVE32B = S32_MAX +enum cc_fips_status { + CC_FIPS_SYNC_MODULE_OK = 0x0, + CC_FIPS_SYNC_MODULE_ERROR = 0x1, + CC_FIPS_SYNC_REE_STATUS = 0x4, + CC_FIPS_SYNC_TEE_STATUS = 0x8, + CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX }; -enum cc_fips_error { - CC_REE_FIPS_ERROR_OK = 0, - CC_REE_FIPS_ERROR_GENERAL, - CC_REE_FIPS_ERROR_FROM_TEE, - CC_REE_FIPS_ERROR_AES_ECB_PUT, - CC_REE_FIPS_ERROR_AES_CBC_PUT, - CC_REE_FIPS_ERROR_AES_OFB_PUT, - CC_REE_FIPS_ERROR_AES_CTR_PUT, - CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT, - CC_REE_FIPS_ERROR_AES_XTS_PUT, - CC_REE_FIPS_ERROR_AES_CMAC_PUT, - CC_REE_FIPS_ERROR_AESCCM_PUT, - CC_REE_FIPS_ERROR_AESGCM_PUT, - CC_REE_FIPS_ERROR_DES_ECB_PUT, - CC_REE_FIPS_ERROR_DES_CBC_PUT, - CC_REE_FIPS_ERROR_SHA1_PUT, - CC_REE_FIPS_ERROR_SHA256_PUT, - CC_REE_FIPS_ERROR_SHA512_PUT, - CC_REE_FIPS_ERROR_HMAC_SHA1_PUT, - CC_REE_FIPS_ERROR_HMAC_SHA256_PUT, - CC_REE_FIPS_ERROR_HMAC_SHA512_PUT, - CC_REE_FIPS_ERROR_ROM_CHECKSUM, - CC_REE_FIPS_ERROR_RESERVE32B = S32_MAX -}; +int ssi_fips_init(struct ssi_drvdata *p_drvdata); +void ssi_fips_fini(struct ssi_drvdata *drvdata); +void fips_handler(struct ssi_drvdata *drvdata); +void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok); + +#else /* CONFIG_CRYPTO_FIPS */ + +static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata) +{ + return 0; +} + +static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {} +void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {} +void fips_handler(struct ssi_drvdata *drvdata) {} -int ssi_fips_get_state(enum cc_fips_state *p_state); -int ssi_fips_get_error(enum cc_fips_error *p_err); +#endif /* CONFIG_CRYPTO_FIPS */ #endif /*__SSI_FIPS_H__*/ diff --git a/drivers/staging/ccree/ssi_fips_data.h b/drivers/staging/ccree/ssi_fips_data.h deleted file mode 100644 index c41671dbee40..000000000000 --- a/drivers/staging/ccree/ssi_fips_data.h +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (C) 2012-2017 ARM Limited or its affiliates. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -/* - * The test vectors were taken from: - * - * * AES - * NIST Special Publication 800-38A 2001 Edition - * Recommendation for Block Cipher Modes of Operation - * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf - * Appendix F: Example Vectors for Modes of Operation of the AES - * - * * AES CTS - * Advanced Encryption Standard (AES) Encryption for Kerberos 5 - * February 2005 - * https://tools.ietf.org/html/rfc3962#appendix-B - * B. Sample Test Vectors - * - * * AES XTS - * http://csrc.nist.gov/groups/STM/cavp/#08 - * http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip - * - * * AES CMAC - * http://csrc.nist.gov/groups/STM/cavp/index.html#07 - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip - * - * * AES-CCM - * http://csrc.nist.gov/groups/STM/cavp/#07 - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip - * - * * AES-GCM - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip - * - * * Triple-DES - * NIST Special Publication 800-67 January 2012 - * Recommendation for the Triple Data Encryption Algorithm (TDEA) Block Cipher - * http://csrc.nist.gov/publications/nistpubs/800-67-Rev1/SP-800-67-Rev1.pdf - * APPENDIX B: EXAMPLE OF TDEA FORWARD AND INVERSE CIPHER OPERATIONS - * and - * http://csrc.nist.gov/groups/STM/cavp/#01 - * http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip - * - * * HASH - * http://csrc.nist.gov/groups/STM/cavp/#03 - * http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip - * - * * HMAC - * http://csrc.nist.gov/groups/STM/cavp/#07 - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip - */ - -/* NIST AES */ -#define AES_128_BIT_KEY_SIZE 16 -#define AES_192_BIT_KEY_SIZE 24 -#define AES_256_BIT_KEY_SIZE 32 -#define AES_512_BIT_KEY_SIZE 64 - -#define NIST_AES_IV_SIZE 16 - -#define NIST_AES_128_KEY { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c } -#define NIST_AES_192_KEY { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, \ - 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b } -#define NIST_AES_256_KEY { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, \ - 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 } -#define NIST_AES_VECTOR_SIZE 16 -#define NIST_AES_PLAIN_DATA { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a } - -#define NIST_AES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define NIST_AES_128_ECB_CIPHER { 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60, 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97 } -#define NIST_AES_192_ECB_CIPHER { 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f, 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc } -#define NIST_AES_256_ECB_CIPHER { 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c, 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8 } - -#define NIST_AES_CBC_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f } -#define NIST_AES_128_CBC_CIPHER { 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d } -#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 } -#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 } - -#define NIST_AES_OFB_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f } -#define NIST_AES_128_OFB_CIPHER { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a } -#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 } -#define NIST_AES_256_OFB_CIPHER { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b, 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60 } - -#define NIST_AES_CTR_IV { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff } -#define NIST_AES_128_CTR_CIPHER { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce } -#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b } -#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 } - -#define RFC3962_AES_128_KEY { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 } -#define RFC3962_AES_VECTOR_SIZE 17 -#define RFC3962_AES_PLAIN_DATA { 0x49, 0x20, 0x77, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20 } -#define RFC3962_AES_CBC_CTS_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } -#define RFC3962_AES_128_CBC_CTS_CIPHER { 0xc6, 0x35, 0x35, 0x68, 0xf2, 0xbf, 0x8c, 0xb4, 0xd8, 0xa5, 0x80, 0x36, 0x2d, 0xa7, 0xff, 0x7f, 0x97 } - -#define NIST_AES_256_XTS_KEY { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, \ - 0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f } -#define NIST_AES_256_XTS_IV { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 } -#define NIST_AES_256_XTS_VECTOR_SIZE 16 -#define NIST_AES_256_XTS_PLAIN { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c } -#define NIST_AES_256_XTS_CIPHER { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 } - -#define NIST_AES_512_XTS_KEY { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, \ - 0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, \ - 0xd6, 0xe1, 0x3f, 0xfd, 0xf2, 0x41, 0x8d, 0x8d, 0x19, 0x11, 0xc0, 0x04, 0xcd, 0xa5, 0x8d, 0xa3, \ - 0xd6, 0x19, 0xb7, 0xe2, 0xb9, 0x14, 0x1e, 0x58, 0x31, 0x8e, 0xea, 0x39, 0x2c, 0xf4, 0x1b, 0x08 } -#define NIST_AES_512_XTS_IV { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64, } -#define NIST_AES_512_XTS_VECTOR_SIZE 32 -#define NIST_AES_512_XTS_PLAIN { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, \ - 0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e } -#define NIST_AES_512_XTS_CIPHER { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \ - 0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb } - -/* NIST AES-CMAC */ -#define NIST_AES_128_CMAC_KEY { 0x67, 0x08, 0xc9, 0x88, 0x7b, 0x84, 0x70, 0x84, 0xf1, 0x23, 0xd3, 0xdd, 0x9c, 0x3a, 0x81, 0x36 } -#define NIST_AES_128_CMAC_PLAIN_DATA { 0xa8, 0xde, 0x55, 0x17, 0x0c, 0x6d, 0xc0, 0xd8, 0x0d, 0xe3, 0x2f, 0x50, 0x8b, 0xf4, 0x9b, 0x70 } -#define NIST_AES_128_CMAC_MAC { 0xcf, 0xef, 0x9b, 0x78, 0x39, 0x84, 0x1f, 0xdb, 0xcc, 0xbb, 0x6c, 0x2c, 0xf2, 0x38, 0xf7 } -#define NIST_AES_128_CMAC_VECTOR_SIZE 16 -#define NIST_AES_128_CMAC_OUTPUT_SIZE 15 - -#define NIST_AES_192_CMAC_KEY { 0x20, 0x51, 0xaf, 0x34, 0x76, 0x2e, 0xbe, 0x55, 0x6f, 0x72, 0xa5, 0xc6, 0xed, 0xc7, 0x77, 0x1e, \ - 0xb9, 0x24, 0x5f, 0xad, 0x76, 0xf0, 0x34, 0xbe } -#define NIST_AES_192_CMAC_PLAIN_DATA { 0xae, 0x8e, 0x93, 0xc9, 0xc9, 0x91, 0xcf, 0x89, 0x6a, 0x49, 0x1a, 0x89, 0x07, 0xdf, 0x4e, 0x4b, \ - 0xe5, 0x18, 0x6a, 0xe4, 0x96, 0xcd, 0x34, 0x0d, 0xc1, 0x9b, 0x23, 0x78, 0x21, 0xdb, 0x7b, 0x60 } -#define NIST_AES_192_CMAC_MAC { 0x74, 0xf7, 0x46, 0x08, 0xc0, 0x4f, 0x0f, 0x4e, 0x47, 0xfa, 0x64, 0x04, 0x33, 0xb6, 0xe6, 0xfb } -#define NIST_AES_192_CMAC_VECTOR_SIZE 32 -#define NIST_AES_192_CMAC_OUTPUT_SIZE 16 - -#define NIST_AES_256_CMAC_KEY { 0x3a, 0x75, 0xa9, 0xd2, 0xbd, 0xb8, 0xc8, 0x04, 0xba, 0x4a, 0xb4, 0x98, 0x35, 0x73, 0xa6, 0xb2, \ - 0x53, 0x16, 0x0d, 0xd9, 0x0f, 0x8e, 0xdd, 0xfb, 0x2f, 0xdc, 0x2a, 0xb1, 0x76, 0x04, 0xf5, 0xc5 } -#define NIST_AES_256_CMAC_PLAIN_DATA { 0x42, 0xf3, 0x5d, 0x5a, 0xa5, 0x33, 0xa7, 0xa0, 0xa5, 0xf7, 0x4e, 0x14, 0x4f, 0x2a, 0x5f, 0x20 } -#define NIST_AES_256_CMAC_MAC { 0xf1, 0x53, 0x2f, 0x87, 0x32, 0xd9, 0xf5, 0x90, 0x30, 0x07 } -#define NIST_AES_256_CMAC_VECTOR_SIZE 16 -#define NIST_AES_256_CMAC_OUTPUT_SIZE 10 - -/* NIST TDES */ -#define TDES_NUM_OF_KEYS 3 -#define NIST_TDES_VECTOR_SIZE 8 -#define NIST_TDES_IV_SIZE 8 - -#define NIST_TDES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } - -#define NIST_TDES_ECB3_KEY { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, \ - 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, \ - 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23 } -#define NIST_TDES_ECB3_PLAIN_DATA { 0x54, 0x68, 0x65, 0x20, 0x71, 0x75, 0x66, 0x63 } -#define NIST_TDES_ECB3_CIPHER { 0xa8, 0x26, 0xfd, 0x8c, 0xe5, 0x3b, 0x85, 0x5f } - -#define NIST_TDES_CBC3_IV { 0xf8, 0xee, 0xe1, 0x35, 0x9c, 0x6e, 0x54, 0x40 } -#define NIST_TDES_CBC3_KEY { 0xe9, 0xda, 0x37, 0xf8, 0xdc, 0x97, 0x6d, 0x5b, \ - 0xb6, 0x8c, 0x04, 0xe3, 0xec, 0x98, 0x20, 0x15, \ - 0xf4, 0x0e, 0x08, 0xb5, 0x97, 0x29, 0xf2, 0x8f } -#define NIST_TDES_CBC3_PLAIN_DATA { 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 } -#define NIST_TDES_CBC3_CIPHER { 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 } - -/* NIST AES-CCM */ -#define NIST_AESCCM_128_BIT_KEY_SIZE 16 -#define NIST_AESCCM_192_BIT_KEY_SIZE 24 -#define NIST_AESCCM_256_BIT_KEY_SIZE 32 - -#define NIST_AESCCM_B0_VAL 0x79 /* L'[0:2]=1 , M'[3-5]=7 , Adata[6]=1, reserved[7]=0 */ -#define NIST_AESCCM_NONCE_SIZE 13 -#define NIST_AESCCM_IV_SIZE 16 -#define NIST_AESCCM_ADATA_SIZE 32 -#define NIST_AESCCM_TEXT_SIZE 16 -#define NIST_AESCCM_TAG_SIZE 16 - -#define NIST_AESCCM_128_KEY { 0x70, 0x01, 0x0e, 0xd9, 0x0e, 0x61, 0x86, 0xec, 0xad, 0x41, 0xf0, 0xd3, 0xc7, 0xc4, 0x2f, 0xf8 } -#define NIST_AESCCM_128_NONCE { 0xa5, 0xf4, 0xf4, 0x98, 0x6e, 0x98, 0x47, 0x29, 0x65, 0xf5, 0xab, 0xcc, 0x4b } -#define NIST_AESCCM_128_ADATA { 0x3f, 0xec, 0x0e, 0x5c, 0xc2, 0x4d, 0x67, 0x13, 0x94, 0x37, 0xcb, 0xc8, 0x11, 0x24, 0x14, 0xfc, \ - 0x8d, 0xac, 0xcd, 0x1a, 0x94, 0xb4, 0x9a, 0x4c, 0x76, 0xe2, 0xd3, 0x93, 0x03, 0x54, 0x73, 0x17 } -#define NIST_AESCCM_128_PLAIN_TEXT { 0xbe, 0x32, 0x2f, 0x58, 0xef, 0xa7, 0xf8, 0xc6, 0x8a, 0x63, 0x5e, 0x0b, 0x9c, 0xce, 0x77, 0xf2 } -#define NIST_AESCCM_128_CIPHER { 0x8e, 0x44, 0x25, 0xae, 0x57, 0x39, 0x74, 0xf0, 0xf0, 0x69, 0x3a, 0x18, 0x8b, 0x52, 0x58, 0x12 } -#define NIST_AESCCM_128_MAC { 0xee, 0xf0, 0x8e, 0x3f, 0xb1, 0x5f, 0x42, 0x27, 0xe0, 0xd9, 0x89, 0xa4, 0xd5, 0x87, 0xa8, 0xcf } - -#define NIST_AESCCM_192_KEY { 0x68, 0x73, 0xf1, 0xc6, 0xc3, 0x09, 0x75, 0xaf, 0xf6, 0xf0, 0x84, 0x70, 0x26, 0x43, 0x21, 0x13, \ - 0x0a, 0x6e, 0x59, 0x84, 0xad, 0xe3, 0x24, 0xe9 } -#define NIST_AESCCM_192_NONCE { 0x7c, 0x4d, 0x2f, 0x7c, 0xec, 0x04, 0x36, 0x1f, 0x18, 0x7f, 0x07, 0x26, 0xd5 } -#define NIST_AESCCM_192_ADATA { 0x77, 0x74, 0x3b, 0x5d, 0x83, 0xa0, 0x0d, 0x2c, 0x8d, 0x5f, 0x7e, 0x10, 0x78, 0x15, 0x31, 0xb4, \ - 0x96, 0xe0, 0x9f, 0x3b, 0xc9, 0x29, 0x5d, 0x7a, 0xe9, 0x79, 0x9e, 0x64, 0x66, 0x8e, 0xf8, 0xc5 } -#define NIST_AESCCM_192_PLAIN_TEXT { 0x50, 0x51, 0xa0, 0xb0, 0xb6, 0x76, 0x6c, 0xd6, 0xea, 0x29, 0xa6, 0x72, 0x76, 0x9d, 0x40, 0xfe } -#define NIST_AESCCM_192_CIPHER { 0x0c, 0xe5, 0xac, 0x8d, 0x6b, 0x25, 0x6f, 0xb7, 0x58, 0x0b, 0xf6, 0xac, 0xc7, 0x64, 0x26, 0xaf } -#define NIST_AESCCM_192_MAC { 0x40, 0xbc, 0xe5, 0x8f, 0xd4, 0xcd, 0x65, 0x48, 0xdf, 0x90, 0xa0, 0x33, 0x7c, 0x84, 0x20, 0x04 } - -#define NIST_AESCCM_256_KEY { 0xee, 0x8c, 0xe1, 0x87, 0x16, 0x97, 0x79, 0xd1, 0x3e, 0x44, 0x3d, 0x64, 0x28, 0xe3, 0x8b, 0x38, \ - 0xb5, 0x5d, 0xfb, 0x90, 0xf0, 0x22, 0x8a, 0x8a, 0x4e, 0x62, 0xf8, 0xf5, 0x35, 0x80, 0x6e, 0x62 } -#define NIST_AESCCM_256_NONCE { 0x12, 0x16, 0x42, 0xc4, 0x21, 0x8b, 0x39, 0x1c, 0x98, 0xe6, 0x26, 0x9c, 0x8a } -#define NIST_AESCCM_256_ADATA { 0x71, 0x8d, 0x13, 0xe4, 0x75, 0x22, 0xac, 0x4c, 0xdf, 0x3f, 0x82, 0x80, 0x63, 0x98, 0x0b, 0x6d, \ - 0x45, 0x2f, 0xcd, 0xcd, 0x6e, 0x1a, 0x19, 0x04, 0xbf, 0x87, 0xf5, 0x48, 0xa5, 0xfd, 0x5a, 0x05 } -#define NIST_AESCCM_256_PLAIN_TEXT { 0xd1, 0x5f, 0x98, 0xf2, 0xc6, 0xd6, 0x70, 0xf5, 0x5c, 0x78, 0xa0, 0x66, 0x48, 0x33, 0x2b, 0xc9 } -#define NIST_AESCCM_256_CIPHER { 0xcc, 0x17, 0xbf, 0x87, 0x94, 0xc8, 0x43, 0x45, 0x7d, 0x89, 0x93, 0x91, 0x89, 0x8e, 0xd2, 0x2a } -#define NIST_AESCCM_256_MAC { 0x6f, 0x9d, 0x28, 0xfc, 0xb6, 0x42, 0x34, 0xe1, 0xcd, 0x79, 0x3c, 0x41, 0x44, 0xf1, 0xda, 0x50 } - -/* NIST AES-GCM */ -#define NIST_AESGCM_128_BIT_KEY_SIZE 16 -#define NIST_AESGCM_192_BIT_KEY_SIZE 24 -#define NIST_AESGCM_256_BIT_KEY_SIZE 32 - -#define NIST_AESGCM_IV_SIZE 12 -#define NIST_AESGCM_ADATA_SIZE 16 -#define NIST_AESGCM_TEXT_SIZE 16 -#define NIST_AESGCM_TAG_SIZE 16 - -#define NIST_AESGCM_128_KEY { 0x81, 0x6e, 0x39, 0x07, 0x04, 0x10, 0xcf, 0x21, 0x84, 0x90, 0x4d, 0xa0, 0x3e, 0xa5, 0x07, 0x5a } -#define NIST_AESGCM_128_IV { 0x32, 0xc3, 0x67, 0xa3, 0x36, 0x26, 0x13, 0xb2, 0x7f, 0xc3, 0xe6, 0x7e } -#define NIST_AESGCM_128_ADATA { 0xf2, 0xa3, 0x07, 0x28, 0xed, 0x87, 0x4e, 0xe0, 0x29, 0x83, 0xc2, 0x94, 0x43, 0x5d, 0x3c, 0x16 } -#define NIST_AESGCM_128_PLAIN_TEXT { 0xec, 0xaf, 0xe9, 0x6c, 0x67, 0xa1, 0x64, 0x67, 0x44, 0xf1, 0xc8, 0x91, 0xf5, 0xe6, 0x94, 0x27 } -#define NIST_AESGCM_128_CIPHER { 0x55, 0x2e, 0xbe, 0x01, 0x2e, 0x7b, 0xcf, 0x90, 0xfc, 0xef, 0x71, 0x2f, 0x83, 0x44, 0xe8, 0xf1 } -#define NIST_AESGCM_128_MAC { 0xec, 0xaa, 0xe9, 0xfc, 0x68, 0x27, 0x6a, 0x45, 0xab, 0x0c, 0xa3, 0xcb, 0x9d, 0xd9, 0x53, 0x9f } - -#define NIST_AESGCM_192_KEY { 0x0c, 0x44, 0xd6, 0xc9, 0x28, 0xee, 0x11, 0x2c, 0xe6, 0x65, 0xfe, 0x54, 0x7e, 0xbd, 0x38, 0x72, \ - 0x98, 0xa9, 0x54, 0xb4, 0x62, 0xf6, 0x95, 0xd8 } -#define NIST_AESGCM_192_IV { 0x18, 0xb8, 0xf3, 0x20, 0xfe, 0xf4, 0xae, 0x8c, 0xcb, 0xe8, 0xf9, 0x52 } -#define NIST_AESGCM_192_ADATA { 0x73, 0x41, 0xd4, 0x3f, 0x98, 0xcf, 0x38, 0x82, 0x21, 0x18, 0x09, 0x41, 0x97, 0x03, 0x76, 0xe8 } -#define NIST_AESGCM_192_PLAIN_TEXT { 0x96, 0xad, 0x07, 0xf9, 0xb6, 0x28, 0xb6, 0x52, 0xcf, 0x86, 0xcb, 0x73, 0x17, 0x88, 0x6f, 0x51 } -#define NIST_AESGCM_192_CIPHER { 0xa6, 0x64, 0x07, 0x81, 0x33, 0x40, 0x5e, 0xb9, 0x09, 0x4d, 0x36, 0xf7, 0xe0, 0x70, 0x19, 0x1f } -#define NIST_AESGCM_192_MAC { 0xe8, 0xf9, 0xc3, 0x17, 0x84, 0x7c, 0xe3, 0xf3, 0xc2, 0x39, 0x94, 0xa4, 0x02, 0xf0, 0x65, 0x81 } - -#define NIST_AESGCM_256_KEY { 0x54, 0xe3, 0x52, 0xea, 0x1d, 0x84, 0xbf, 0xe6, 0x4a, 0x10, 0x11, 0x09, 0x61, 0x11, 0xfb, 0xe7, \ - 0x66, 0x8a, 0xd2, 0x20, 0x3d, 0x90, 0x2a, 0x01, 0x45, 0x8c, 0x3b, 0xbd, 0x85, 0xbf, 0xce, 0x14 } -#define NIST_AESGCM_256_IV { 0xdf, 0x7c, 0x3b, 0xca, 0x00, 0x39, 0x6d, 0x0c, 0x01, 0x84, 0x95, 0xd9 } -#define NIST_AESGCM_256_ADATA { 0x7e, 0x96, 0x8d, 0x71, 0xb5, 0x0c, 0x1f, 0x11, 0xfd, 0x00, 0x1f, 0x3f, 0xef, 0x49, 0xd0, 0x45 } -#define NIST_AESGCM_256_PLAIN_TEXT { 0x85, 0xfc, 0x3d, 0xfa, 0xd9, 0xb5, 0xa8, 0xd3, 0x25, 0x8e, 0x4f, 0xc4, 0x45, 0x71, 0xbd, 0x3b } -#define NIST_AESGCM_256_CIPHER { 0x42, 0x6e, 0x0e, 0xfc, 0x69, 0x3b, 0x7b, 0xe1, 0xf3, 0x01, 0x8d, 0xb7, 0xdd, 0xbb, 0x7e, 0x4d } -#define NIST_AESGCM_256_MAC { 0xee, 0x82, 0x57, 0x79, 0x5b, 0xe6, 0xa1, 0x16, 0x4d, 0x7e, 0x1d, 0x2d, 0x6c, 0xac, 0x77, 0xa7 } - -/* NIST HASH */ -#define NIST_SHA_MSG_SIZE 16 - -#define NIST_SHA_1_MSG { 0x35, 0x52, 0x69, 0x4c, 0xdf, 0x66, 0x3f, 0xd9, 0x4b, 0x22, 0x47, 0x47, 0xac, 0x40, 0x6a, 0xaf } -#define NIST_SHA_1_MD { 0xa1, 0x50, 0xde, 0x92, 0x74, 0x54, 0x20, 0x2d, 0x94, 0xe6, 0x56, 0xde, 0x4c, 0x7c, 0x0c, 0xa6, \ - 0x91, 0xde, 0x95, 0x5d } - -#define NIST_SHA_256_MSG { 0x0a, 0x27, 0x84, 0x7c, 0xdc, 0x98, 0xbd, 0x6f, 0x62, 0x22, 0x0b, 0x04, 0x6e, 0xdd, 0x76, 0x2b } -#define NIST_SHA_256_MD { 0x80, 0xc2, 0x5e, 0xc1, 0x60, 0x05, 0x87, 0xe7, 0xf2, 0x8b, 0x18, 0xb1, 0xb1, 0x8e, 0x3c, 0xdc, \ - 0x89, 0x92, 0x8e, 0x39, 0xca, 0xb3, 0xbc, 0x25, 0xe4, 0xd4, 0xa4, 0xc1, 0x39, 0xbc, 0xed, 0xc4 } - -#define NIST_SHA_512_MSG { 0xcd, 0x67, 0xbd, 0x40, 0x54, 0xaa, 0xa3, 0xba, 0xa0, 0xdb, 0x17, 0x8c, 0xe2, 0x32, 0xfd, 0x5a } -#define NIST_SHA_512_MD { 0x0d, 0x85, 0x21, 0xf8, 0xf2, 0xf3, 0x90, 0x03, 0x32, 0xd1, 0xa1, 0xa5, 0x5c, 0x60, 0xba, 0x81, \ - 0xd0, 0x4d, 0x28, 0xdf, 0xe8, 0xc5, 0x04, 0xb6, 0x32, 0x8a, 0xe7, 0x87, 0x92, 0x5f, 0xe0, 0x18, \ - 0x8f, 0x2b, 0xa9, 0x1c, 0x3a, 0x9f, 0x0c, 0x16, 0x53, 0xc4, 0xbf, 0x0a, 0xda, 0x35, 0x64, 0x55, \ - 0xea, 0x36, 0xfd, 0x31, 0xf8, 0xe7, 0x3e, 0x39, 0x51, 0xca, 0xd4, 0xeb, 0xba, 0x8c, 0x6e, 0x04 } - -/* NIST HMAC */ -#define NIST_HMAC_MSG_SIZE 128 - -#define NIST_HMAC_SHA1_KEY_SIZE 10 -#define NIST_HMAC_SHA1_KEY { 0x59, 0x78, 0x59, 0x28, 0xd7, 0x25, 0x16, 0xe3, 0x12, 0x72 } -#define NIST_HMAC_SHA1_MSG { 0xa3, 0xce, 0x88, 0x99, 0xdf, 0x10, 0x22, 0xe8, 0xd2, 0xd5, 0x39, 0xb4, 0x7b, 0xf0, 0xe3, 0x09, \ - 0xc6, 0x6f, 0x84, 0x09, 0x5e, 0x21, 0x43, 0x8e, 0xc3, 0x55, 0xbf, 0x11, 0x9c, 0xe5, 0xfd, 0xcb, \ - 0x4e, 0x73, 0xa6, 0x19, 0xcd, 0xf3, 0x6f, 0x25, 0xb3, 0x69, 0xd8, 0xc3, 0x8f, 0xf4, 0x19, 0x99, \ - 0x7f, 0x0c, 0x59, 0x83, 0x01, 0x08, 0x22, 0x36, 0x06, 0xe3, 0x12, 0x23, 0x48, 0x3f, 0xd3, 0x9e, \ - 0xde, 0xaa, 0x4d, 0x3f, 0x0d, 0x21, 0x19, 0x88, 0x62, 0xd2, 0x39, 0xc9, 0xfd, 0x26, 0x07, 0x41, \ - 0x30, 0xff, 0x6c, 0x86, 0x49, 0x3f, 0x52, 0x27, 0xab, 0x89, 0x5c, 0x8f, 0x24, 0x4b, 0xd4, 0x2c, \ - 0x7a, 0xfc, 0xe5, 0xd1, 0x47, 0xa2, 0x0a, 0x59, 0x07, 0x98, 0xc6, 0x8e, 0x70, 0x8e, 0x96, 0x49, \ - 0x02, 0xd1, 0x24, 0xda, 0xde, 0xcd, 0xbd, 0xa9, 0xdb, 0xd0, 0x05, 0x1e, 0xd7, 0x10, 0xe9, 0xbf } -#define NIST_HMAC_SHA1_MD { 0x3c, 0x81, 0x62, 0x58, 0x9a, 0xaf, 0xae, 0xe0, 0x24, 0xfc, 0x9a, 0x5c, 0xa5, 0x0d, 0xd2, 0x33, \ - 0x6f, 0xe3, 0xeb, 0x28 } - -#define NIST_HMAC_SHA256_KEY_SIZE 40 -#define NIST_HMAC_SHA256_KEY { 0x97, 0x79, 0xd9, 0x12, 0x06, 0x42, 0x79, 0x7f, 0x17, 0x47, 0x02, 0x5d, 0x5b, 0x22, 0xb7, 0xac, \ - 0x60, 0x7c, 0xab, 0x08, 0xe1, 0x75, 0x8f, 0x2f, 0x3a, 0x46, 0xc8, 0xbe, 0x1e, 0x25, 0xc5, 0x3b, \ - 0x8c, 0x6a, 0x8f, 0x58, 0xff, 0xef, 0xa1, 0x76 } -#define NIST_HMAC_SHA256_MSG { 0xb1, 0x68, 0x9c, 0x25, 0x91, 0xea, 0xf3, 0xc9, 0xe6, 0x60, 0x70, 0xf8, 0xa7, 0x79, 0x54, 0xff, \ - 0xb8, 0x17, 0x49, 0xf1, 0xb0, 0x03, 0x46, 0xf9, 0xdf, 0xe0, 0xb2, 0xee, 0x90, 0x5d, 0xcc, 0x28, \ - 0x8b, 0xaf, 0x4a, 0x92, 0xde, 0x3f, 0x40, 0x01, 0xdd, 0x9f, 0x44, 0xc4, 0x68, 0xc3, 0xd0, 0x7d, \ - 0x6c, 0x6e, 0xe8, 0x2f, 0xac, 0xea, 0xfc, 0x97, 0xc2, 0xfc, 0x0f, 0xc0, 0x60, 0x17, 0x19, 0xd2, \ - 0xdc, 0xd0, 0xaa, 0x2a, 0xec, 0x92, 0xd1, 0xb0, 0xae, 0x93, 0x3c, 0x65, 0xeb, 0x06, 0xa0, 0x3c, \ - 0x9c, 0x93, 0x5c, 0x2b, 0xad, 0x04, 0x59, 0x81, 0x02, 0x41, 0x34, 0x7a, 0xb8, 0x7e, 0x9f, 0x11, \ - 0xad, 0xb3, 0x04, 0x15, 0x42, 0x4c, 0x6c, 0x7f, 0x5f, 0x22, 0xa0, 0x03, 0xb8, 0xab, 0x8d, 0xe5, \ - 0x4f, 0x6d, 0xed, 0x0e, 0x3a, 0xb9, 0x24, 0x5f, 0xa7, 0x95, 0x68, 0x45, 0x1d, 0xfa, 0x25, 0x8e } -#define NIST_HMAC_SHA256_MD { 0x76, 0x9f, 0x00, 0xd3, 0xe6, 0xa6, 0xcc, 0x1f, 0xb4, 0x26, 0xa1, 0x4a, 0x4f, 0x76, 0xc6, 0x46, \ - 0x2e, 0x61, 0x49, 0x72, 0x6e, 0x0d, 0xee, 0x0e, 0xc0, 0xcf, 0x97, 0xa1, 0x66, 0x05, 0xac, 0x8b } - -#define NIST_HMAC_SHA512_KEY_SIZE 100 -#define NIST_HMAC_SHA512_KEY { 0x57, 0xc2, 0xeb, 0x67, 0x7b, 0x50, 0x93, 0xb9, 0xe8, 0x29, 0xea, 0x4b, 0xab, 0xb5, 0x0b, 0xde, \ - 0x55, 0xd0, 0xad, 0x59, 0xfe, 0xc3, 0x4a, 0x61, 0x89, 0x73, 0x80, 0x2b, 0x2a, 0xd9, 0xb7, 0x8e, \ - 0x26, 0xb2, 0x04, 0x5d, 0xda, 0x78, 0x4d, 0xf3, 0xff, 0x90, 0xae, 0x0f, 0x2c, 0xc5, 0x1c, 0xe3, \ - 0x9c, 0xf5, 0x48, 0x67, 0x32, 0x0a, 0xc6, 0xf3, 0xba, 0x2c, 0x6f, 0x0d, 0x72, 0x36, 0x04, 0x80, \ - 0xc9, 0x66, 0x14, 0xae, 0x66, 0x58, 0x1f, 0x26, 0x6c, 0x35, 0xfb, 0x79, 0xfd, 0x28, 0x77, 0x4a, \ - 0xfd, 0x11, 0x3f, 0xa5, 0x18, 0x7e, 0xff, 0x92, 0x06, 0xd7, 0xcb, 0xe9, 0x0d, 0xd8, 0xbf, 0x67, \ - 0xc8, 0x44, 0xe2, 0x02 } -#define NIST_HMAC_SHA512_MSG { 0x24, 0x23, 0xdf, 0xf4, 0x8b, 0x31, 0x2b, 0xe8, 0x64, 0xcb, 0x34, 0x90, 0x64, 0x1f, 0x79, 0x3d, \ - 0x2b, 0x9f, 0xb6, 0x8a, 0x77, 0x63, 0xb8, 0xe2, 0x98, 0xc8, 0x6f, 0x42, 0x24, 0x5e, 0x45, 0x40, \ - 0xeb, 0x01, 0xae, 0x4d, 0x2d, 0x45, 0x00, 0x37, 0x0b, 0x18, 0x86, 0xf2, 0x3c, 0xa2, 0xcf, 0x97, \ - 0x01, 0x70, 0x4c, 0xad, 0x5b, 0xd2, 0x1b, 0xa8, 0x7b, 0x81, 0x1d, 0xaf, 0x7a, 0x85, 0x4e, 0xa2, \ - 0x4a, 0x56, 0x56, 0x5c, 0xed, 0x42, 0x5b, 0x35, 0xe4, 0x0e, 0x1a, 0xcb, 0xeb, 0xe0, 0x36, 0x03, \ - 0xe3, 0x5d, 0xcf, 0x4a, 0x10, 0x0e, 0x57, 0x21, 0x84, 0x08, 0xa1, 0xd8, 0xdb, 0xcc, 0x3b, 0x99, \ - 0x29, 0x6c, 0xfe, 0xa9, 0x31, 0xef, 0xe3, 0xeb, 0xd8, 0xf7, 0x19, 0xa6, 0xd9, 0xa1, 0x54, 0x87, \ - 0xb9, 0xad, 0x67, 0xea, 0xfe, 0xdf, 0x15, 0x55, 0x9c, 0xa4, 0x24, 0x45, 0xb0, 0xf9, 0xb4, 0x2e } -#define NIST_HMAC_SHA512_MD { 0x33, 0xc5, 0x11, 0xe9, 0xbc, 0x23, 0x07, 0xc6, 0x27, 0x58, 0xdf, 0x61, 0x12, 0x5a, 0x98, 0x0e, \ - 0xe6, 0x4c, 0xef, 0xeb, 0xd9, 0x09, 0x31, 0xcb, 0x91, 0xc1, 0x37, 0x42, 0xd4, 0x71, 0x4c, 0x06, \ - 0xde, 0x40, 0x03, 0xfa, 0xf3, 0xc4, 0x1c, 0x06, 0xae, 0xfc, 0x63, 0x8a, 0xd4, 0x7b, 0x21, 0x90, \ - 0x6e, 0x6b, 0x10, 0x48, 0x16, 0xb7, 0x2d, 0xe6, 0x26, 0x9e, 0x04, 0x5a, 0x1f, 0x44, 0x29, 0xd4 } - diff --git a/drivers/staging/ccree/ssi_fips_ext.c b/drivers/staging/ccree/ssi_fips_ext.c deleted file mode 100644 index e7bf1843f60c..000000000000 --- a/drivers/staging/ccree/ssi_fips_ext.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (C) 2012-2017 ARM Limited or its affiliates. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -/************************************************************** - * This file defines the driver FIPS functions that should be - * implemented by the driver user. Current implementation is sample code only. - ***************************************************************/ - -#include <linux/module.h> -#include "ssi_fips_local.h" -#include "ssi_driver.h" - -static bool tee_error; -module_param(tee_error, bool, 0644); -MODULE_PARM_DESC(tee_error, "Simulate TEE library failure flag: 0 - no error (default), 1 - TEE error occured "); - -static enum cc_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED; -static enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK; - -/* - * This function returns the FIPS REE state. - * The function should be implemented by the driver user, depends on where - * the state value is stored. - * The reference code uses global variable. - */ -int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state) -{ - int rc = 0; - - if (!p_state) - return -EINVAL; - - *p_state = fips_state; - - return rc; -} - -/* - * This function returns the FIPS REE error. - * The function should be implemented by the driver user, depends on where - * the error value is stored. - * The reference code uses global variable. - */ -int ssi_fips_ext_get_error(enum cc_fips_error *p_err) -{ - int rc = 0; - - if (!p_err) - return -EINVAL; - - *p_err = fips_error; - - return rc; -} - -/* - * This function sets the FIPS REE state. - * The function should be implemented by the driver user, depends on where - * the state value is stored. - * The reference code uses global variable. - */ -int ssi_fips_ext_set_state(enum cc_fips_state_t state) -{ - fips_state = state; - return 0; -} - -/* - * This function sets the FIPS REE error. - * The function should be implemented by the driver user, depends on where - * the error value is stored. - * The reference code uses global variable. - */ -int ssi_fips_ext_set_error(enum cc_fips_error err) -{ - fips_error = err; - return 0; -} - diff --git a/drivers/staging/ccree/ssi_fips_ll.c b/drivers/staging/ccree/ssi_fips_ll.c deleted file mode 100644 index 3557e20c9e36..000000000000 --- a/drivers/staging/ccree/ssi_fips_ll.c +++ /dev/null @@ -1,1649 +0,0 @@ -/* - * Copyright (C) 2012-2017 ARM Limited or its affiliates. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -/************************************************************** - * This file defines the driver FIPS Low Level implmentaion functions, - * that executes the KAT. - ***************************************************************/ -#include <linux/kernel.h> - -#include "ssi_driver.h" -#include "ssi_fips_local.h" -#include "ssi_fips_data.h" -#include "cc_crypto_ctx.h" -#include "ssi_hash.h" -#include "ssi_request_mgr.h" - -static const u32 digest_len_init[] = { - 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; -static const u32 sha1_init[] = { - SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; -static const u32 sha256_init[] = { - SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, - SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; -#if (CC_SUPPORT_SHA > 256) -static const u32 digest_len_sha512_init[] = { - 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; -static const u64 sha512_init[] = { - SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, - SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; -#endif - -#define NIST_CIPHER_AES_MAX_VECTOR_SIZE 32 - -struct fips_cipher_ctx { - u8 iv[CC_AES_IV_SIZE]; - u8 key[AES_512_BIT_KEY_SIZE]; - u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE]; - u8 dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE]; -}; - -typedef struct _FipsCipherData { - u8 isAes; - u8 key[AES_512_BIT_KEY_SIZE]; - size_t keySize; - u8 iv[CC_AES_IV_SIZE]; - enum drv_crypto_direction direction; - enum drv_cipher_mode oprMode; - u8 dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE]; - u8 dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE]; - size_t dataInSize; -} FipsCipherData; - -struct fips_cmac_ctx { - u8 key[AES_256_BIT_KEY_SIZE]; - u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE]; - u8 mac_res[CC_DIGEST_SIZE_MAX]; -}; - -typedef struct _FipsCmacData { - enum drv_crypto_direction direction; - u8 key[AES_256_BIT_KEY_SIZE]; - size_t key_size; - u8 data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE]; - size_t data_in_size; - u8 mac_res[CC_DIGEST_SIZE_MAX]; - size_t mac_res_size; -} FipsCmacData; - -struct fips_hash_ctx { - u8 initial_digest[CC_DIGEST_SIZE_MAX]; - u8 din[NIST_SHA_MSG_SIZE]; - u8 mac_res[CC_DIGEST_SIZE_MAX]; -}; - -typedef struct _FipsHashData { - enum drv_hash_mode hash_mode; - u8 data_in[NIST_SHA_MSG_SIZE]; - size_t data_in_size; - u8 mac_res[CC_DIGEST_SIZE_MAX]; -} FipsHashData; - -/* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */ -struct fips_hmac_ctx { - u8 initial_digest[CC_DIGEST_SIZE_MAX]; - u8 key[CC_HMAC_BLOCK_SIZE_MAX]; - u8 k0[CC_HMAC_BLOCK_SIZE_MAX]; - u8 digest_bytes_len[HASH_LEN_SIZE]; - u8 tmp_digest[CC_DIGEST_SIZE_MAX]; - u8 din[NIST_HMAC_MSG_SIZE]; - u8 mac_res[CC_DIGEST_SIZE_MAX]; -}; - -typedef struct _FipsHmacData { - enum drv_hash_mode hash_mode; - u8 key[CC_HMAC_BLOCK_SIZE_MAX]; - size_t key_size; - u8 data_in[NIST_HMAC_MSG_SIZE]; - size_t data_in_size; - u8 mac_res[CC_DIGEST_SIZE_MAX]; -} FipsHmacData; - -#define FIPS_CCM_B0_A0_ADATA_SIZE (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE) - -struct fips_ccm_ctx { - u8 b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE]; - u8 iv[NIST_AESCCM_IV_SIZE]; - u8 ctr_cnt_0[NIST_AESCCM_IV_SIZE]; - u8 key[CC_AES_KEY_SIZE_MAX]; - u8 din[NIST_AESCCM_TEXT_SIZE]; - u8 dout[NIST_AESCCM_TEXT_SIZE]; - u8 mac_res[NIST_AESCCM_TAG_SIZE]; -}; - -typedef struct _FipsCcmData { - enum drv_crypto_direction direction; - u8 key[CC_AES_KEY_SIZE_MAX]; - size_t keySize; - u8 nonce[NIST_AESCCM_NONCE_SIZE]; - u8 adata[NIST_AESCCM_ADATA_SIZE]; - size_t adataSize; - u8 dataIn[NIST_AESCCM_TEXT_SIZE]; - size_t dataInSize; - u8 dataOut[NIST_AESCCM_TEXT_SIZE]; - u8 tagSize; - u8 macResOut[NIST_AESCCM_TAG_SIZE]; -} FipsCcmData; - -struct fips_gcm_ctx { - u8 adata[NIST_AESGCM_ADATA_SIZE]; - u8 key[CC_AES_KEY_SIZE_MAX]; - u8 hkey[CC_AES_KEY_SIZE_MAX]; - u8 din[NIST_AESGCM_TEXT_SIZE]; - u8 dout[NIST_AESGCM_TEXT_SIZE]; - u8 mac_res[NIST_AESGCM_TAG_SIZE]; - u8 len_block[AES_BLOCK_SIZE]; - u8 iv_inc1[AES_BLOCK_SIZE]; - u8 iv_inc2[AES_BLOCK_SIZE]; -}; - -typedef struct _FipsGcmData { - enum drv_crypto_direction direction; - u8 key[CC_AES_KEY_SIZE_MAX]; - size_t keySize; - u8 iv[NIST_AESGCM_IV_SIZE]; - u8 adata[NIST_AESGCM_ADATA_SIZE]; - size_t adataSize; - u8 dataIn[NIST_AESGCM_TEXT_SIZE]; - size_t dataInSize; - u8 dataOut[NIST_AESGCM_TEXT_SIZE]; - u8 tagSize; - u8 macResOut[NIST_AESGCM_TAG_SIZE]; -} FipsGcmData; - -typedef union _fips_ctx { - struct fips_cipher_ctx cipher; - struct fips_cmac_ctx cmac; - struct fips_hash_ctx hash; - struct fips_hmac_ctx hmac; - struct fips_ccm_ctx ccm; - struct fips_gcm_ctx gcm; -} fips_ctx; - -/* test data tables */ -static const FipsCipherData FipsCipherDataTable[] = { - /* AES */ - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_128_ECB_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_128_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_192_ECB_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_192_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_256_ECB_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_256_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_128_CBC_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_128_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_192_CBC_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_192_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_256_CBC_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_256_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_128_OFB_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_128_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_192_OFB_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_192_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_256_OFB_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_256_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_128_CTR_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_128_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_192_CTR_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_192_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_256_CTR_CIPHER, NIST_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_256_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE }, - { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_PLAIN_DATA, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_VECTOR_SIZE }, - { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_PLAIN_DATA, RFC3962_AES_VECTOR_SIZE }, - { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_VECTOR_SIZE }, - { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_VECTOR_SIZE }, -#if (CC_SUPPORT_SHA > 256) - { 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_VECTOR_SIZE }, - { 1, NIST_AES_512_XTS_KEY, 2 * CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_VECTOR_SIZE }, -#endif - /* DES */ - { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_ECB3_CIPHER, NIST_TDES_VECTOR_SIZE }, - { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_CIPHER, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE }, - { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_CBC3_CIPHER, NIST_TDES_VECTOR_SIZE }, - { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_CIPHER, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE }, -}; - -#define FIPS_CIPHER_NUM_OF_TESTS (sizeof(FipsCipherDataTable) / sizeof(FipsCipherData)) - -static const FipsCmacData FipsCmacDataTable[] = { - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_128_CMAC_KEY, AES_128_BIT_KEY_SIZE, NIST_AES_128_CMAC_PLAIN_DATA, NIST_AES_128_CMAC_VECTOR_SIZE, NIST_AES_128_CMAC_MAC, NIST_AES_128_CMAC_OUTPUT_SIZE }, - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_192_CMAC_KEY, AES_192_BIT_KEY_SIZE, NIST_AES_192_CMAC_PLAIN_DATA, NIST_AES_192_CMAC_VECTOR_SIZE, NIST_AES_192_CMAC_MAC, NIST_AES_192_CMAC_OUTPUT_SIZE }, - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_256_CMAC_KEY, AES_256_BIT_KEY_SIZE, NIST_AES_256_CMAC_PLAIN_DATA, NIST_AES_256_CMAC_VECTOR_SIZE, NIST_AES_256_CMAC_MAC, NIST_AES_256_CMAC_OUTPUT_SIZE }, -}; - -#define FIPS_CMAC_NUM_OF_TESTS (sizeof(FipsCmacDataTable) / sizeof(FipsCmacData)) - -static const FipsHashData FipsHashDataTable[] = { - { DRV_HASH_SHA1, NIST_SHA_1_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_1_MD }, - { DRV_HASH_SHA256, NIST_SHA_256_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_256_MD }, -#if (CC_SUPPORT_SHA > 256) -// { DRV_HASH_SHA512, NIST_SHA_512_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_512_MD }, -#endif -}; - -#define FIPS_HASH_NUM_OF_TESTS (sizeof(FipsHashDataTable) / sizeof(FipsHashData)) - -static const FipsHmacData FipsHmacDataTable[] = { - { DRV_HASH_SHA1, NIST_HMAC_SHA1_KEY, NIST_HMAC_SHA1_KEY_SIZE, NIST_HMAC_SHA1_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA1_MD }, - { DRV_HASH_SHA256, NIST_HMAC_SHA256_KEY, NIST_HMAC_SHA256_KEY_SIZE, NIST_HMAC_SHA256_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA256_MD }, -#if (CC_SUPPORT_SHA > 256) -// { DRV_HASH_SHA512, NIST_HMAC_SHA512_KEY, NIST_HMAC_SHA512_KEY_SIZE, NIST_HMAC_SHA512_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA512_MD }, -#endif -}; - -#define FIPS_HMAC_NUM_OF_TESTS (sizeof(FipsHmacDataTable) / sizeof(FipsHmacData)) - -static const FipsCcmData FipsCcmDataTable[] = { - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC }, - { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC }, - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC }, - { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC }, - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC }, - { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC }, -}; - -#define FIPS_CCM_NUM_OF_TESTS (sizeof(FipsCcmDataTable) / sizeof(FipsCcmData)) - -static const FipsGcmData FipsGcmDataTable[] = { - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC }, - { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC }, - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC }, - { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC }, - { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC }, - { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC }, -}; - -#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData)) - -static inline enum cc_fips_error -FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes) -{ - switch (mode) - { - case DRV_CIPHER_ECB: - return is_aes ? CC_REE_FIPS_ERROR_AES_ECB_PUT : CC_REE_FIPS_ERROR_DES_ECB_PUT; - case DRV_CIPHER_CBC: - return is_aes ? CC_REE_FIPS_ERROR_AES_CBC_PUT : CC_REE_FIPS_ERROR_DES_CBC_PUT; - case DRV_CIPHER_OFB: - return CC_REE_FIPS_ERROR_AES_OFB_PUT; - case DRV_CIPHER_CTR: - return CC_REE_FIPS_ERROR_AES_CTR_PUT; - case DRV_CIPHER_CBC_CTS: - return CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT; - case DRV_CIPHER_XTS: - return CC_REE_FIPS_ERROR_AES_XTS_PUT; - default: - return CC_REE_FIPS_ERROR_GENERAL; - } - - return CC_REE_FIPS_ERROR_GENERAL; -} - -static inline int -ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata, - bool is_aes, - int cipher_mode, - int direction, - dma_addr_t key_dma_addr, - size_t key_len, - dma_addr_t iv_dma_addr, - size_t iv_len, - dma_addr_t din_dma_addr, - dma_addr_t dout_dma_addr, - size_t data_size) -{ - /* max number of descriptors used for the flow */ - #define FIPS_CIPHER_MAX_SEQ_LEN 6 - - int rc; - struct ssi_crypto_req ssi_req = {0}; - struct cc_hw_desc desc[FIPS_CIPHER_MAX_SEQ_LEN]; - int idx = 0; - int s_flow_mode = is_aes ? S_DIN_to_AES : S_DIN_to_DES; - - /* create setup descriptors */ - switch (cipher_mode) { - case DRV_CIPHER_CBC: - case DRV_CIPHER_CBC_CTS: - case DRV_CIPHER_CTR: - case DRV_CIPHER_OFB: - /* Load cipher state */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, - iv_dma_addr, iv_len, NS_BIT); - set_cipher_config0(&desc[idx], direction); - set_flow_mode(&desc[idx], s_flow_mode); - set_cipher_mode(&desc[idx], cipher_mode); - if ((cipher_mode == DRV_CIPHER_CTR) || - (cipher_mode == DRV_CIPHER_OFB)) { - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - } else { - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - } - idx++; - /*FALLTHROUGH*/ - case DRV_CIPHER_ECB: - /* Load key */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], cipher_mode); - set_cipher_config0(&desc[idx], direction); - if (is_aes) { - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, - ((key_len == 24) ? AES_MAX_KEY_SIZE : - key_len), NS_BIT); - set_key_size_aes(&desc[idx], key_len); - } else {/*des*/ - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, - key_len, NS_BIT); - set_key_size_des(&desc[idx], key_len); - } - set_flow_mode(&desc[idx], s_flow_mode); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - break; - case DRV_CIPHER_XTS: - /* Load AES key */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], cipher_mode); - set_cipher_config0(&desc[idx], direction); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, (key_len / 2), - NS_BIT); - set_key_size_aes(&desc[idx], (key_len / 2)); - set_flow_mode(&desc[idx], s_flow_mode); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - - /* load XEX key */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], cipher_mode); - set_cipher_config0(&desc[idx], direction); - set_din_type(&desc[idx], DMA_DLLI, - (key_dma_addr + (key_len / 2)), - (key_len / 2), NS_BIT); - set_xex_data_unit_size(&desc[idx], data_size); - set_flow_mode(&desc[idx], s_flow_mode); - set_key_size_aes(&desc[idx], (key_len / 2)); - set_setup_mode(&desc[idx], SETUP_LOAD_XEX_KEY); - idx++; - - /* Set state */ - hw_desc_init(&desc[idx]); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - set_cipher_mode(&desc[idx], cipher_mode); - set_cipher_config0(&desc[idx], direction); - set_key_size_aes(&desc[idx], (key_len / 2)); - set_flow_mode(&desc[idx], s_flow_mode); - set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr, - CC_AES_BLOCK_SIZE, NS_BIT); - idx++; - break; - default: - FIPS_LOG("Unsupported cipher mode (%d)\n", cipher_mode); - BUG(); - } - - /* create data descriptor */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_size, NS_BIT); - set_dout_dlli(&desc[idx], dout_dma_addr, data_size, NS_BIT, 0); - set_flow_mode(&desc[idx], is_aes ? DIN_AES_DOUT : DIN_DES_DOUT); - idx++; - - /* perform the operation - Lock HW and push sequence */ - BUG_ON(idx > FIPS_CIPHER_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, false); - - // send_request returns error just in some corner cases which should not appear in this flow. - return rc; -} - -enum cc_fips_error -ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer) -{ - enum cc_fips_error error = CC_REE_FIPS_ERROR_OK; - size_t i; - struct fips_cipher_ctx *virt_ctx = (struct fips_cipher_ctx *)cpu_addr_buffer; - - /* set the phisical pointers for iv, key, din, dout */ - dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, iv); - dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, key); - dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, din); - dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, dout); - - for (i = 0; i < FIPS_CIPHER_NUM_OF_TESTS; ++i) - { - FipsCipherData *cipherData = (FipsCipherData *)&FipsCipherDataTable[i]; - int rc = 0; - size_t iv_size = cipherData->isAes ? NIST_AES_IV_SIZE : NIST_TDES_IV_SIZE; - - memset(cpu_addr_buffer, 0, sizeof(struct fips_cipher_ctx)); - - /* copy into the allocated buffer */ - memcpy(virt_ctx->iv, cipherData->iv, iv_size); - memcpy(virt_ctx->key, cipherData->key, cipherData->keySize); - memcpy(virt_ctx->din, cipherData->dataIn, cipherData->dataInSize); - - FIPS_DBG("ssi_cipher_fips_run_test - (i = %d) \n", i); - rc = ssi_cipher_fips_run_test(drvdata, - cipherData->isAes, - cipherData->oprMode, - cipherData->direction, - key_dma_addr, - cipherData->keySize, - iv_dma_addr, - iv_size, - din_dma_addr, - dout_dma_addr, - cipherData->dataInSize); - if (rc != 0) - { - FIPS_LOG("ssi_cipher_fips_run_test %d returned error - rc = %d \n", i, rc); - error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes); - break; - } - - /* compare actual dout to expected */ - if (memcmp(virt_ctx->dout, cipherData->dataOut, cipherData->dataInSize) != 0) - { - FIPS_LOG("dout comparison error %d - oprMode=%d, isAes=%d\n", i, cipherData->oprMode, cipherData->isAes); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x (size=%d) \n", (size_t)cipherData->dataOut, (size_t)virt_ctx->dout, cipherData->dataInSize); - for (i = 0; i < cipherData->dataInSize; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cipherData->dataOut[i], virt_ctx->dout[i]); - } - - error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes); - break; - } - } - - return error; -} - -static inline int -ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata, - dma_addr_t key_dma_addr, - size_t key_len, - dma_addr_t din_dma_addr, - size_t din_len, - dma_addr_t digest_dma_addr, - size_t digest_len) -{ - /* max number of descriptors used for the flow */ - #define FIPS_CMAC_MAX_SEQ_LEN 4 - - int rc; - struct ssi_crypto_req ssi_req = {0}; - struct cc_hw_desc desc[FIPS_CMAC_MAX_SEQ_LEN]; - int idx = 0; - - /* Setup CMAC Key */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, - ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len), NS_BIT); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_key_size_aes(&desc[idx], key_len); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* Load MAC state */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, digest_dma_addr, CC_AES_BLOCK_SIZE, - NS_BIT); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_key_size_aes(&desc[idx], key_len); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - //ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_len, NS_BIT); - set_flow_mode(&desc[idx], DIN_AES_DOUT); - idx++; - - /* Get final MAC result */ - hw_desc_init(&desc[idx]); - set_dout_dlli(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, - 0); - set_flow_mode(&desc[idx], S_AES_to_DOUT); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC); - idx++; - - /* perform the operation - Lock HW and push sequence */ - BUG_ON(idx > FIPS_CMAC_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, false); - - // send_request returns error just in some corner cases which should not appear in this flow. - return rc; -} - -enum cc_fips_error -ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer) -{ - enum cc_fips_error error = CC_REE_FIPS_ERROR_OK; - size_t i; - struct fips_cmac_ctx *virt_ctx = (struct fips_cmac_ctx *)cpu_addr_buffer; - - /* set the phisical pointers for key, din, dout */ - dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, key); - dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, din); - dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, mac_res); - - for (i = 0; i < FIPS_CMAC_NUM_OF_TESTS; ++i) - { - FipsCmacData *cmac_data = (FipsCmacData *)&FipsCmacDataTable[i]; - int rc = 0; - - memset(cpu_addr_buffer, 0, sizeof(struct fips_cmac_ctx)); - - /* copy into the allocated buffer */ - memcpy(virt_ctx->key, cmac_data->key, cmac_data->key_size); - memcpy(virt_ctx->din, cmac_data->data_in, cmac_data->data_in_size); - - BUG_ON(cmac_data->direction != DRV_CRYPTO_DIRECTION_ENCRYPT); - - FIPS_DBG("ssi_cmac_fips_run_test - (i = %d) \n", i); - rc = ssi_cmac_fips_run_test(drvdata, - key_dma_addr, - cmac_data->key_size, - din_dma_addr, - cmac_data->data_in_size, - mac_res_dma_addr, - cmac_data->mac_res_size); - if (rc != 0) - { - FIPS_LOG("ssi_cmac_fips_run_test %d returned error - rc = %d \n", i, rc); - error = CC_REE_FIPS_ERROR_AES_CMAC_PUT; - break; - } - - /* compare actual mac result to expected */ - if (memcmp(virt_ctx->mac_res, cmac_data->mac_res, cmac_data->mac_res_size) != 0) - { - FIPS_LOG("comparison error %d - digest_size=%d \n", i, cmac_data->mac_res_size); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)cmac_data->mac_res, (size_t)virt_ctx->mac_res); - for (i = 0; i < cmac_data->mac_res_size; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cmac_data->mac_res[i], virt_ctx->mac_res[i]); - } - - error = CC_REE_FIPS_ERROR_AES_CMAC_PUT; - break; - } - } - - return error; -} - -static inline enum cc_fips_error -FIPS_HashToFipsError(enum drv_hash_mode hash_mode) -{ - switch (hash_mode) { - case DRV_HASH_SHA1: - return CC_REE_FIPS_ERROR_SHA1_PUT; - case DRV_HASH_SHA256: - return CC_REE_FIPS_ERROR_SHA256_PUT; -#if (CC_SUPPORT_SHA > 256) - case DRV_HASH_SHA512: - return CC_REE_FIPS_ERROR_SHA512_PUT; -#endif - default: - return CC_REE_FIPS_ERROR_GENERAL; - } - - return CC_REE_FIPS_ERROR_GENERAL; -} - -static inline int -ssi_hash_fips_run_test(struct ssi_drvdata *drvdata, - dma_addr_t initial_digest_dma_addr, - dma_addr_t din_dma_addr, - size_t data_in_size, - dma_addr_t mac_res_dma_addr, - enum drv_hash_mode hash_mode, - enum drv_hash_hw_mode hw_mode, - int digest_size, - int inter_digestsize) -{ - /* max number of descriptors used for the flow */ - #define FIPS_HASH_MAX_SEQ_LEN 4 - - int rc; - struct ssi_crypto_req ssi_req = {0}; - struct cc_hw_desc desc[FIPS_HASH_MAX_SEQ_LEN]; - int idx = 0; - - /* Load initial digest */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr, - inter_digestsize, NS_BIT); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - idx++; - - /* Load the hash current length */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_din_const(&desc[idx], 0, HASH_LEN_SIZE); - set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - - /* data descriptor */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - - /* Get final MAC result */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); - if (unlikely((hash_mode == DRV_HASH_MD5) || - (hash_mode == DRV_HASH_SHA384) || - (hash_mode == DRV_HASH_SHA512))) { - set_bytes_swap(&desc[idx], 1); - } else { - set_cipher_config0(&desc[idx], - HASH_DIGEST_RESULT_LITTLE_ENDIAN); - } - idx++; - - /* perform the operation - Lock HW and push sequence */ - BUG_ON(idx > FIPS_HASH_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, false); - - return rc; -} - -enum cc_fips_error -ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer) -{ - enum cc_fips_error error = CC_REE_FIPS_ERROR_OK; - size_t i; - struct fips_hash_ctx *virt_ctx = (struct fips_hash_ctx *)cpu_addr_buffer; - - /* set the phisical pointers for initial_digest, din, mac_res */ - dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, initial_digest); - dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, din); - dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, mac_res); - - for (i = 0; i < FIPS_HASH_NUM_OF_TESTS; ++i) - { - FipsHashData *hash_data = (FipsHashData *)&FipsHashDataTable[i]; - int rc = 0; - enum drv_hash_hw_mode hw_mode = 0; - int digest_size = 0; - int inter_digestsize = 0; - - memset(cpu_addr_buffer, 0, sizeof(struct fips_hash_ctx)); - - switch (hash_data->hash_mode) { - case DRV_HASH_SHA1: - hw_mode = DRV_HASH_HW_SHA1; - digest_size = CC_SHA1_DIGEST_SIZE; - inter_digestsize = CC_SHA1_DIGEST_SIZE; - /* copy the initial digest into the allocated cache coherent buffer */ - memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE); - break; - case DRV_HASH_SHA256: - hw_mode = DRV_HASH_HW_SHA256; - digest_size = CC_SHA256_DIGEST_SIZE; - inter_digestsize = CC_SHA256_DIGEST_SIZE; - memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE); - break; -#if (CC_SUPPORT_SHA > 256) - case DRV_HASH_SHA512: - hw_mode = DRV_HASH_HW_SHA512; - digest_size = CC_SHA512_DIGEST_SIZE; - inter_digestsize = CC_SHA512_DIGEST_SIZE; - memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE); - break; -#endif - default: - error = FIPS_HashToFipsError(hash_data->hash_mode); - break; - } - - /* copy the din data into the allocated buffer */ - memcpy(virt_ctx->din, hash_data->data_in, hash_data->data_in_size); - - /* run the test on HW */ - FIPS_DBG("ssi_hash_fips_run_test - (i = %d) \n", i); - rc = ssi_hash_fips_run_test(drvdata, - initial_digest_dma_addr, - din_dma_addr, - hash_data->data_in_size, - mac_res_dma_addr, - hash_data->hash_mode, - hw_mode, - digest_size, - inter_digestsize); - if (rc != 0) - { - FIPS_LOG("ssi_hash_fips_run_test %d returned error - rc = %d \n", i, rc); - error = FIPS_HashToFipsError(hash_data->hash_mode); - break; - } - - /* compare actual mac result to expected */ - if (memcmp(virt_ctx->mac_res, hash_data->mac_res, digest_size) != 0) - { - FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hash_data->hash_mode, digest_size); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hash_data->mac_res, (size_t)virt_ctx->mac_res); - for (i = 0; i < digest_size; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hash_data->mac_res[i], virt_ctx->mac_res[i]); - } - - error = FIPS_HashToFipsError(hash_data->hash_mode); - break; - } - } - - return error; -} - -static inline enum cc_fips_error -FIPS_HmacToFipsError(enum drv_hash_mode hash_mode) -{ - switch (hash_mode) { - case DRV_HASH_SHA1: - return CC_REE_FIPS_ERROR_HMAC_SHA1_PUT; - case DRV_HASH_SHA256: - return CC_REE_FIPS_ERROR_HMAC_SHA256_PUT; -#if (CC_SUPPORT_SHA > 256) - case DRV_HASH_SHA512: - return CC_REE_FIPS_ERROR_HMAC_SHA512_PUT; -#endif - default: - return CC_REE_FIPS_ERROR_GENERAL; - } - - return CC_REE_FIPS_ERROR_GENERAL; -} - -static inline int -ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata, - dma_addr_t initial_digest_dma_addr, - dma_addr_t key_dma_addr, - size_t key_size, - dma_addr_t din_dma_addr, - size_t data_in_size, - dma_addr_t mac_res_dma_addr, - enum drv_hash_mode hash_mode, - enum drv_hash_hw_mode hw_mode, - size_t digest_size, - size_t inter_digestsize, - size_t block_size, - dma_addr_t k0_dma_addr, - dma_addr_t tmp_digest_dma_addr, - dma_addr_t digest_bytes_len_dma_addr) -{ - /* The implemented flow is not the same as the one implemented in ssi_hash.c (setkey + digest flows). - * In this flow, there is no need to store and reload some of the intermidiate results. - */ - - /* max number of descriptors used for the flow */ - #define FIPS_HMAC_MAX_SEQ_LEN 12 - - int rc; - struct ssi_crypto_req ssi_req = {0}; - struct cc_hw_desc desc[FIPS_HMAC_MAX_SEQ_LEN]; - int idx = 0; - int i; - /* calc the hash opad first and ipad only afterwards (unlike the flow in ssi_hash.c) */ - unsigned int hmacPadConst[2] = { HMAC_OPAD_CONST, HMAC_IPAD_CONST }; - - // assume (key_size <= block_size) - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT); - set_flow_mode(&desc[idx], BYPASS); - set_dout_dlli(&desc[idx], k0_dma_addr, key_size, NS_BIT, 0); - idx++; - - // if needed, append Key with zeros to create K0 - if ((block_size - key_size) != 0) { - hw_desc_init(&desc[idx]); - set_din_const(&desc[idx], 0, (block_size - key_size)); - set_flow_mode(&desc[idx], BYPASS); - set_dout_dlli(&desc[idx], (k0_dma_addr + key_size), - (block_size - key_size), NS_BIT, 0); - idx++; - } - - BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, 0); - if (unlikely(rc != 0)) { - SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); - return rc; - } - idx = 0; - - /* calc derived HMAC key */ - for (i = 0; i < 2; i++) { - /* Load hash initial state */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_din_type(&desc[idx], DMA_DLLI, initial_digest_dma_addr, - inter_digestsize, NS_BIT); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - idx++; - - /* Load the hash current length*/ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_din_const(&desc[idx], 0, HASH_LEN_SIZE); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - - /* Prepare opad/ipad key */ - hw_desc_init(&desc[idx]); - set_xor_val(&desc[idx], hmacPadConst[i]); - set_cipher_mode(&desc[idx], hw_mode); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - idx++; - - /* Perform HASH update */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, block_size, - NS_BIT); - set_cipher_mode(&desc[idx], hw_mode); - set_xor_active(&desc[idx]); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - - if (i == 0) { - /* First iteration - calc H(K0^opad) into tmp_digest_dma_addr */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_dout_dlli(&desc[idx], tmp_digest_dma_addr, - inter_digestsize, NS_BIT, 0); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - idx++; - - // is this needed?? or continue with current descriptors?? - BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, 0); - if (unlikely(rc != 0)) { - SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); - return rc; - } - idx = 0; - } - } - - /* data descriptor */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - - /* HW last hash block padding (aka. "DO_PAD") */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_dout_dlli(&desc[idx], k0_dma_addr, HASH_LEN_SIZE, NS_BIT, 0); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); - set_cipher_do(&desc[idx], DO_PAD); - idx++; - - /* store the hash digest result in the context */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_dout_dlli(&desc[idx], k0_dma_addr, digest_size, NS_BIT, 0); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - if (unlikely((hash_mode == DRV_HASH_MD5) || - (hash_mode == DRV_HASH_SHA384) || - (hash_mode == DRV_HASH_SHA512))) { - set_bytes_swap(&desc[idx], 1); - } else { - set_cipher_config0(&desc[idx], - HASH_DIGEST_RESULT_LITTLE_ENDIAN); - } - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - idx++; - - /* at this point: - * tmp_digest = H(o_key_pad) - * k0 = H(i_key_pad || m) - */ - - /* Loading hash opad xor key state */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_din_type(&desc[idx], DMA_DLLI, tmp_digest_dma_addr, - inter_digestsize, NS_BIT); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - idx++; - - /* Load the hash current length */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_din_type(&desc[idx], DMA_DLLI, digest_bytes_len_dma_addr, - HASH_LEN_SIZE, NS_BIT); - set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - - /* Memory Barrier: wait for IPAD/OPAD axi write to complete */ - hw_desc_init(&desc[idx]); - set_din_no_dma(&desc[idx], 0, 0xfffff0); - set_dout_no_dma(&desc[idx], 0, 0, 1); - idx++; - - /* Perform HASH update */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, k0_dma_addr, digest_size, NS_BIT); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - - /* Get final MAC result */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], hw_mode); - set_dout_dlli(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); - if (unlikely((hash_mode == DRV_HASH_MD5) || - (hash_mode == DRV_HASH_SHA384) || - (hash_mode == DRV_HASH_SHA512))) { - set_bytes_swap(&desc[idx], 1); - } else { - set_cipher_config0(&desc[idx], - HASH_DIGEST_RESULT_LITTLE_ENDIAN); - } - idx++; - - /* perform the operation - Lock HW and push sequence */ - BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, false); - - return rc; -} - -enum cc_fips_error -ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer) -{ - enum cc_fips_error error = CC_REE_FIPS_ERROR_OK; - size_t i; - struct fips_hmac_ctx *virt_ctx = (struct fips_hmac_ctx *)cpu_addr_buffer; - - /* set the phisical pointers */ - dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, initial_digest); - dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, key); - dma_addr_t k0_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, k0); - dma_addr_t tmp_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, tmp_digest); - dma_addr_t digest_bytes_len_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, digest_bytes_len); - dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, din); - dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, mac_res); - - for (i = 0; i < FIPS_HMAC_NUM_OF_TESTS; ++i) - { - FipsHmacData *hmac_data = (FipsHmacData *)&FipsHmacDataTable[i]; - int rc = 0; - enum drv_hash_hw_mode hw_mode = 0; - int digest_size = 0; - int block_size = 0; - int inter_digestsize = 0; - - memset(cpu_addr_buffer, 0, sizeof(struct fips_hmac_ctx)); - - switch (hmac_data->hash_mode) { - case DRV_HASH_SHA1: - hw_mode = DRV_HASH_HW_SHA1; - digest_size = CC_SHA1_DIGEST_SIZE; - block_size = CC_SHA1_BLOCK_SIZE; - inter_digestsize = CC_SHA1_DIGEST_SIZE; - memcpy(virt_ctx->initial_digest, (void *)sha1_init, CC_SHA1_DIGEST_SIZE); - memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE); - break; - case DRV_HASH_SHA256: - hw_mode = DRV_HASH_HW_SHA256; - digest_size = CC_SHA256_DIGEST_SIZE; - block_size = CC_SHA256_BLOCK_SIZE; - inter_digestsize = CC_SHA256_DIGEST_SIZE; - memcpy(virt_ctx->initial_digest, (void *)sha256_init, CC_SHA256_DIGEST_SIZE); - memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE); - break; -#if (CC_SUPPORT_SHA > 256) - case DRV_HASH_SHA512: - hw_mode = DRV_HASH_HW_SHA512; - digest_size = CC_SHA512_DIGEST_SIZE; - block_size = CC_SHA512_BLOCK_SIZE; - inter_digestsize = CC_SHA512_DIGEST_SIZE; - memcpy(virt_ctx->initial_digest, (void *)sha512_init, CC_SHA512_DIGEST_SIZE); - memcpy(virt_ctx->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE); - break; -#endif - default: - error = FIPS_HmacToFipsError(hmac_data->hash_mode); - break; - } - - /* copy into the allocated buffer */ - memcpy(virt_ctx->key, hmac_data->key, hmac_data->key_size); - memcpy(virt_ctx->din, hmac_data->data_in, hmac_data->data_in_size); - - /* run the test on HW */ - FIPS_DBG("ssi_hmac_fips_run_test - (i = %d) \n", i); - rc = ssi_hmac_fips_run_test(drvdata, - initial_digest_dma_addr, - key_dma_addr, - hmac_data->key_size, - din_dma_addr, - hmac_data->data_in_size, - mac_res_dma_addr, - hmac_data->hash_mode, - hw_mode, - digest_size, - inter_digestsize, - block_size, - k0_dma_addr, - tmp_digest_dma_addr, - digest_bytes_len_dma_addr); - if (rc != 0) - { - FIPS_LOG("ssi_hmac_fips_run_test %d returned error - rc = %d \n", i, rc); - error = FIPS_HmacToFipsError(hmac_data->hash_mode); - break; - } - - /* compare actual mac result to expected */ - if (memcmp(virt_ctx->mac_res, hmac_data->mac_res, digest_size) != 0) - { - FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hmac_data->hash_mode, digest_size); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hmac_data->mac_res, (size_t)virt_ctx->mac_res); - for (i = 0; i < digest_size; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hmac_data->mac_res[i], virt_ctx->mac_res[i]); - } - - error = FIPS_HmacToFipsError(hmac_data->hash_mode); - break; - } - } - - return error; -} - -static inline int -ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata, - enum drv_crypto_direction direction, - dma_addr_t key_dma_addr, - size_t key_size, - dma_addr_t iv_dma_addr, - dma_addr_t ctr_cnt_0_dma_addr, - dma_addr_t b0_a0_adata_dma_addr, - size_t b0_a0_adata_size, - dma_addr_t din_dma_addr, - size_t din_size, - dma_addr_t dout_dma_addr, - dma_addr_t mac_res_dma_addr) -{ - /* max number of descriptors used for the flow */ - #define FIPS_CCM_MAX_SEQ_LEN 10 - - int rc; - struct ssi_crypto_req ssi_req = {0}; - struct cc_hw_desc desc[FIPS_CCM_MAX_SEQ_LEN]; - unsigned int idx = 0; - unsigned int cipher_flow_mode; - - if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) { - cipher_flow_mode = AES_to_HASH_and_DOUT; - } else { /* Encrypt */ - cipher_flow_mode = AES_and_HASH; - } - - /* load key */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, - ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ? - CC_AES_KEY_SIZE_MAX : key_size), NS_BIT) - set_key_size_aes(&desc[idx], key_size); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* load ctr state */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); - set_key_size_aes(&desc[idx], key_size); - set_din_type(&desc[idx], DMA_DLLI, iv_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* load MAC key */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, - ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ? - CC_AES_KEY_SIZE_MAX : key_size), NS_BIT); - set_key_size_aes(&desc[idx], key_size); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_aes_not_hash_mode(&desc[idx]); - idx++; - - /* load MAC state */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); - set_key_size_aes(&desc[idx], key_size); - set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr, - NIST_AESCCM_TAG_SIZE, NS_BIT); - set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_aes_not_hash_mode(&desc[idx]); - idx++; - - /* prcess assoc data */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, b0_a0_adata_dma_addr, - b0_a0_adata_size, NS_BIT); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - - /* process the cipher */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT); - set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0); - set_flow_mode(&desc[idx], cipher_flow_mode); - idx++; - - /* Read temporal MAC */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); - set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, - NS_BIT, 0); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - set_aes_not_hash_mode(&desc[idx]); - idx++; - - /* load AES-CTR state (for last MAC calculation)*/ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); - set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); - set_din_type(&desc[idx], DMA_DLLI, ctr_cnt_0_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_key_size_aes(&desc[idx], key_size); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* Memory Barrier */ - hw_desc_init(&desc[idx]); - set_din_no_dma(&desc[idx], 0, 0xfffff0); - set_dout_no_dma(&desc[idx], 0, 0, 1); - idx++; - - /* encrypt the "T" value and store MAC inplace */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr, - NIST_AESCCM_TAG_SIZE, NS_BIT); - set_dout_dlli(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, - NS_BIT, 0); - set_flow_mode(&desc[idx], DIN_AES_DOUT); - idx++; - - /* perform the operation - Lock HW and push sequence */ - BUG_ON(idx > FIPS_CCM_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, false); - - return rc; -} - -enum cc_fips_error -ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer) -{ - enum cc_fips_error error = CC_REE_FIPS_ERROR_OK; - size_t i; - struct fips_ccm_ctx *virt_ctx = (struct fips_ccm_ctx *)cpu_addr_buffer; - - /* set the phisical pointers */ - dma_addr_t b0_a0_adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, b0_a0_adata); - dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, iv); - dma_addr_t ctr_cnt_0_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, ctr_cnt_0); - dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, key); - dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, din); - dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, dout); - dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, mac_res); - - for (i = 0; i < FIPS_CCM_NUM_OF_TESTS; ++i) - { - FipsCcmData *ccmData = (FipsCcmData *)&FipsCcmDataTable[i]; - int rc = 0; - - memset(cpu_addr_buffer, 0, sizeof(struct fips_ccm_ctx)); - - /* copy the nonce, key, adata, din data into the allocated buffer */ - memcpy(virt_ctx->key, ccmData->key, ccmData->keySize); - memcpy(virt_ctx->din, ccmData->dataIn, ccmData->dataInSize); - { - /* build B0 -- B0, nonce, l(m) */ - __be16 data = cpu_to_be16(NIST_AESCCM_TEXT_SIZE); - - virt_ctx->b0_a0_adata[0] = NIST_AESCCM_B0_VAL; - memcpy(virt_ctx->b0_a0_adata + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE); - memcpy(virt_ctx->b0_a0_adata + 14, (u8 *)&data, sizeof(__be16)); - /* build A0+ADATA */ - virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 0] = (ccmData->adataSize >> 8) & 0xFF; - virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 1] = ccmData->adataSize & 0xFF; - memcpy(virt_ctx->b0_a0_adata + NIST_AESCCM_IV_SIZE + 2, ccmData->adata, ccmData->adataSize); - /* iv */ - virt_ctx->iv[0] = 1; /* L' */ - memcpy(virt_ctx->iv + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE); - virt_ctx->iv[15] = 1; - /* ctr_count_0 */ - memcpy(virt_ctx->ctr_cnt_0, virt_ctx->iv, NIST_AESCCM_IV_SIZE); - virt_ctx->ctr_cnt_0[15] = 0; - } - - FIPS_DBG("ssi_ccm_fips_run_test - (i = %d) \n", i); - rc = ssi_ccm_fips_run_test(drvdata, - ccmData->direction, - key_dma_addr, - ccmData->keySize, - iv_dma_addr, - ctr_cnt_0_dma_addr, - b0_a0_adata_dma_addr, - FIPS_CCM_B0_A0_ADATA_SIZE, - din_dma_addr, - ccmData->dataInSize, - dout_dma_addr, - mac_res_dma_addr); - if (rc != 0) - { - FIPS_LOG("ssi_ccm_fips_run_test %d returned error - rc = %d \n", i, rc); - error = CC_REE_FIPS_ERROR_AESCCM_PUT; - break; - } - - /* compare actual dout to expected */ - if (memcmp(virt_ctx->dout, ccmData->dataOut, ccmData->dataInSize) != 0) - { - FIPS_LOG("dout comparison error %d - size=%d \n", i, ccmData->dataInSize); - error = CC_REE_FIPS_ERROR_AESCCM_PUT; - break; - } - - /* compare actual mac result to expected */ - if (memcmp(virt_ctx->mac_res, ccmData->macResOut, ccmData->tagSize) != 0) - { - FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, ccmData->tagSize); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)ccmData->macResOut, (size_t)virt_ctx->mac_res); - for (i = 0; i < ccmData->tagSize; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, ccmData->macResOut[i], virt_ctx->mac_res[i]); - } - - error = CC_REE_FIPS_ERROR_AESCCM_PUT; - break; - } - } - - return error; -} - -static inline int -ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata, - enum drv_crypto_direction direction, - dma_addr_t key_dma_addr, - size_t key_size, - dma_addr_t hkey_dma_addr, - dma_addr_t block_len_dma_addr, - dma_addr_t iv_inc1_dma_addr, - dma_addr_t iv_inc2_dma_addr, - dma_addr_t adata_dma_addr, - size_t adata_size, - dma_addr_t din_dma_addr, - size_t din_size, - dma_addr_t dout_dma_addr, - dma_addr_t mac_res_dma_addr) -{ - /* max number of descriptors used for the flow */ - #define FIPS_GCM_MAX_SEQ_LEN 15 - - int rc; - struct ssi_crypto_req ssi_req = {0}; - struct cc_hw_desc desc[FIPS_GCM_MAX_SEQ_LEN]; - unsigned int idx = 0; - unsigned int cipher_flow_mode; - - if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) { - cipher_flow_mode = AES_and_HASH; - } else { /* Encrypt */ - cipher_flow_mode = AES_to_HASH_and_DOUT; - } - -///////////////////////////////// 1 //////////////////////////////////// -// ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size); -///////////////////////////////// 1 //////////////////////////////////// - - /* load key to AES */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_ECB); - set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT); - set_key_size_aes(&desc[idx], key_size); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* process one zero block to generate hkey */ - hw_desc_init(&desc[idx]); - set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE); - set_dout_dlli(&desc[idx], hkey_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0); - set_flow_mode(&desc[idx], DIN_AES_DOUT); - idx++; - - /* Memory Barrier */ - hw_desc_init(&desc[idx]); - set_din_no_dma(&desc[idx], 0, 0xfffff0); - set_dout_no_dma(&desc[idx], 0, 0, 1); - idx++; - - /* Load GHASH subkey */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, hkey_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_dout_no_dma(&desc[idx], 0, 0, 1); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_aes_not_hash_mode(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); - set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - - /* Configure Hash Engine to work with GHASH. - * Since it was not possible to extend HASH submodes to add GHASH, - * The following command is necessary in order to - * select GHASH (according to HW designers) - */ - hw_desc_init(&desc[idx]); - set_din_no_dma(&desc[idx], 0, 0xfffff0); - set_dout_no_dma(&desc[idx], 0, 0, 1); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_aes_not_hash_mode(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); - set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK - set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); - set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - idx++; - - /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */ - hw_desc_init(&desc[idx]); - set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE); - set_dout_no_dma(&desc[idx], 0, 0, 1); - set_flow_mode(&desc[idx], S_DIN_to_HASH); - set_aes_not_hash_mode(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); - set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); - idx++; - -///////////////////////////////// 2 //////////////////////////////////// - /* prcess(ghash) assoc data */ -// if (req->assoclen > 0) -// ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size); -///////////////////////////////// 2 //////////////////////////////////// - - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, adata_dma_addr, adata_size, NS_BIT); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - -///////////////////////////////// 3 //////////////////////////////////// -// ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size); -///////////////////////////////// 3 //////////////////////////////////// - - /* load key to AES*/ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); - set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); - set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT); - set_key_size_aes(&desc[idx], key_size); - set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* load AES/CTR initial CTR value inc by 2*/ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); - set_key_size_aes(&desc[idx], key_size); - set_din_type(&desc[idx], DMA_DLLI, iv_inc2_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - -///////////////////////////////// 4 //////////////////////////////////// - /* process(gctr+ghash) */ -// if (req_ctx->cryptlen != 0) -// ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size); -///////////////////////////////// 4 //////////////////////////////////// - - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT); - set_dout_dlli(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0); - set_flow_mode(&desc[idx], cipher_flow_mode); - idx++; - -///////////////////////////////// 5 //////////////////////////////////// -// ssi_aead_process_gcm_result_desc(req, desc, seq_size); -///////////////////////////////// 5 //////////////////////////////////// - - /* prcess(ghash) gcm_block_len */ - hw_desc_init(&desc[idx]); - set_din_type(&desc[idx], DMA_DLLI, block_len_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_flow_mode(&desc[idx], DIN_HASH); - idx++; - - /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); - set_din_no_dma(&desc[idx], 0, 0xfffff0); - set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0); - set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); - set_flow_mode(&desc[idx], S_HASH_to_DOUT); - set_aes_not_hash_mode(&desc[idx]); - idx++; - - /* load AES/CTR initial CTR value inc by 1*/ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); - set_key_size_aes(&desc[idx], key_size); - set_din_type(&desc[idx], DMA_DLLI, iv_inc1_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); - set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); - set_flow_mode(&desc[idx], S_DIN_to_AES); - idx++; - - /* Memory Barrier */ - hw_desc_init(&desc[idx]); - set_din_no_dma(&desc[idx], 0, 0xfffff0); - set_dout_no_dma(&desc[idx], 0, 0, 1); - idx++; - - /* process GCTR on stored GHASH and store MAC inplace */ - hw_desc_init(&desc[idx]); - set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); - set_din_type(&desc[idx], DMA_DLLI, mac_res_dma_addr, AES_BLOCK_SIZE, - NS_BIT); - set_dout_dlli(&desc[idx], mac_res_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0); - set_flow_mode(&desc[idx], DIN_AES_DOUT); - idx++; - - /* perform the operation - Lock HW and push sequence */ - BUG_ON(idx > FIPS_GCM_MAX_SEQ_LEN); - rc = send_request(drvdata, &ssi_req, desc, idx, false); - - return rc; -} - -enum cc_fips_error -ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer) -{ - enum cc_fips_error error = CC_REE_FIPS_ERROR_OK; - size_t i; - struct fips_gcm_ctx *virt_ctx = (struct fips_gcm_ctx *)cpu_addr_buffer; - - /* set the phisical pointers */ - dma_addr_t adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, adata); - dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, key); - dma_addr_t hkey_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, hkey); - dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, din); - dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, dout); - dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, mac_res); - dma_addr_t len_block_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, len_block); - dma_addr_t iv_inc1_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc1); - dma_addr_t iv_inc2_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc2); - - for (i = 0; i < FIPS_GCM_NUM_OF_TESTS; ++i) - { - FipsGcmData *gcmData = (FipsGcmData *)&FipsGcmDataTable[i]; - int rc = 0; - - memset(cpu_addr_buffer, 0, sizeof(struct fips_gcm_ctx)); - - /* copy the key, adata, din data - into the allocated buffer */ - memcpy(virt_ctx->key, gcmData->key, gcmData->keySize); - memcpy(virt_ctx->adata, gcmData->adata, gcmData->adataSize); - memcpy(virt_ctx->din, gcmData->dataIn, gcmData->dataInSize); - - /* len_block */ - { - __be64 len_bits; - - len_bits = cpu_to_be64(gcmData->adataSize * 8); - memcpy(virt_ctx->len_block, &len_bits, sizeof(len_bits)); - len_bits = cpu_to_be64(gcmData->dataInSize * 8); - memcpy(virt_ctx->len_block + 8, &len_bits, sizeof(len_bits)); - } - /* iv_inc1, iv_inc2 */ - { - __be32 counter = cpu_to_be32(1); - - memcpy(virt_ctx->iv_inc1, gcmData->iv, NIST_AESGCM_IV_SIZE); - memcpy(virt_ctx->iv_inc1 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter)); - counter = cpu_to_be32(2); - memcpy(virt_ctx->iv_inc2, gcmData->iv, NIST_AESGCM_IV_SIZE); - memcpy(virt_ctx->iv_inc2 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter)); - } - - FIPS_DBG("ssi_gcm_fips_run_test - (i = %d) \n", i); - rc = ssi_gcm_fips_run_test(drvdata, - gcmData->direction, - key_dma_addr, - gcmData->keySize, - hkey_dma_addr, - len_block_dma_addr, - iv_inc1_dma_addr, - iv_inc2_dma_addr, - adata_dma_addr, - gcmData->adataSize, - din_dma_addr, - gcmData->dataInSize, - dout_dma_addr, - mac_res_dma_addr); - if (rc != 0) - { - FIPS_LOG("ssi_gcm_fips_run_test %d returned error - rc = %d \n", i, rc); - error = CC_REE_FIPS_ERROR_AESGCM_PUT; - break; - } - - if (gcmData->direction == DRV_CRYPTO_DIRECTION_ENCRYPT) { - /* compare actual dout to expected */ - if (memcmp(virt_ctx->dout, gcmData->dataOut, gcmData->dataInSize) != 0) - { - FIPS_LOG("dout comparison error %d - size=%d \n", i, gcmData->dataInSize); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->dataOut, (size_t)virt_ctx->dout); - for (i = 0; i < gcmData->dataInSize; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->dataOut[i], virt_ctx->dout[i]); - } - - error = CC_REE_FIPS_ERROR_AESGCM_PUT; - break; - } - } - - /* compare actual mac result to expected */ - if (memcmp(virt_ctx->mac_res, gcmData->macResOut, gcmData->tagSize) != 0) - { - FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, gcmData->tagSize); - FIPS_LOG(" i expected received \n"); - FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->macResOut, (size_t)virt_ctx->mac_res); - for (i = 0; i < gcmData->tagSize; ++i) - { - FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->macResOut[i], virt_ctx->mac_res[i]); - } - - error = CC_REE_FIPS_ERROR_AESGCM_PUT; - break; - } - } - return error; -} - -size_t ssi_fips_max_mem_alloc_size(void) -{ - FIPS_DBG("sizeof(struct fips_cipher_ctx) %d \n", sizeof(struct fips_cipher_ctx)); - FIPS_DBG("sizeof(struct fips_cmac_ctx) %d \n", sizeof(struct fips_cmac_ctx)); - FIPS_DBG("sizeof(struct fips_hash_ctx) %d \n", sizeof(struct fips_hash_ctx)); - FIPS_DBG("sizeof(struct fips_hmac_ctx) %d \n", sizeof(struct fips_hmac_ctx)); - FIPS_DBG("sizeof(struct fips_ccm_ctx) %d \n", sizeof(struct fips_ccm_ctx)); - FIPS_DBG("sizeof(struct fips_gcm_ctx) %d \n", sizeof(struct fips_gcm_ctx)); - - return sizeof(fips_ctx); -} - diff --git a/drivers/staging/ccree/ssi_fips_local.c b/drivers/staging/ccree/ssi_fips_local.c deleted file mode 100644 index aefb71dc9e9a..000000000000 --- a/drivers/staging/ccree/ssi_fips_local.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Copyright (C) 2012-2017 ARM Limited or its affiliates. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -/************************************************************** - * This file defines the driver FIPS internal function, used by the driver itself. - ***************************************************************/ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <crypto/des.h> - -#include "ssi_config.h" -#include "ssi_driver.h" -#include "cc_hal.h" - -#define FIPS_POWER_UP_TEST_CIPHER 1 -#define FIPS_POWER_UP_TEST_CMAC 1 -#define FIPS_POWER_UP_TEST_HASH 1 -#define FIPS_POWER_UP_TEST_HMAC 1 -#define FIPS_POWER_UP_TEST_CCM 1 -#define FIPS_POWER_UP_TEST_GCM 1 - -static bool ssi_fips_support = 1; -module_param(ssi_fips_support, bool, 0644); -MODULE_PARM_DESC(ssi_fips_support, "FIPS supported flag: 0 - off , 1 - on (default)"); - -static void fips_dsr(unsigned long devarg); - -struct ssi_fips_handle { -#ifdef COMP_IN_WQ - struct workqueue_struct *workq; - struct delayed_work fipswork; -#else - struct tasklet_struct fipstask; -#endif -}; - -extern int ssi_fips_get_state(enum cc_fips_state_t *p_state); -extern int ssi_fips_get_error(enum cc_fips_error *p_err); -extern int ssi_fips_ext_set_state(enum cc_fips_state_t state); -extern int ssi_fips_ext_set_error(enum cc_fips_error err); - -/* FIPS power-up tests */ -extern enum cc_fips_error ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer); -extern enum cc_fips_error ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer); -extern enum cc_fips_error ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer); -extern enum cc_fips_error ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer); -extern enum cc_fips_error ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer); -extern enum cc_fips_error ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer); -extern size_t ssi_fips_max_mem_alloc_size(void); - -/* The function called once at driver entry point to check whether TEE FIPS error occured.*/ -static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata) -{ - u32 regVal; - void __iomem *cc_base = drvdata->cc_base; - - regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST)); - if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) - return CC_REE_FIPS_ERROR_OK; - - return CC_REE_FIPS_ERROR_FROM_TEE; -} - -/* - * This function should push the FIPS REE library status towards the TEE library. - * By writing the error state to HOST_GPR0 register. The function is called from - * driver entry point so no need to protect by mutex. - */ -static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, enum cc_fips_error err) -{ - void __iomem *cc_base = drvdata->cc_base; - - if (err == CC_REE_FIPS_ERROR_OK) - CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_OK)); - else - CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_ERROR)); -} - -void ssi_fips_fini(struct ssi_drvdata *drvdata) -{ - struct ssi_fips_handle *fips_h = drvdata->fips_handle; - - if (!fips_h) - return; /* Not allocated */ - -#ifdef COMP_IN_WQ - if (fips_h->workq) { - flush_workqueue(fips_h->workq); - destroy_workqueue(fips_h->workq); - } -#else - /* Kill tasklet */ - tasklet_kill(&fips_h->fipstask); -#endif - memset(fips_h, 0, sizeof(struct ssi_fips_handle)); - kfree(fips_h); - drvdata->fips_handle = NULL; -} - -void fips_handler(struct ssi_drvdata *drvdata) -{ - struct ssi_fips_handle *fips_handle_ptr = - drvdata->fips_handle; -#ifdef COMP_IN_WQ - queue_delayed_work(fips_handle_ptr->workq, &fips_handle_ptr->fipswork, 0); -#else - tasklet_schedule(&fips_handle_ptr->fipstask); -#endif -} - -#ifdef COMP_IN_WQ -static void fips_wq_handler(struct work_struct *work) -{ - struct ssi_drvdata *drvdata = - container_of(work, struct ssi_drvdata, fipswork.work); - - fips_dsr((unsigned long)drvdata); -} -#endif - -/* Deferred service handler, run as interrupt-fired tasklet */ -static void fips_dsr(unsigned long devarg) -{ - struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg; - void __iomem *cc_base = drvdata->cc_base; - u32 irq; - u32 teeFipsError = 0; - - irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK)); - - if (irq & SSI_GPR0_IRQ_MASK) { - teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST)); - if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) - ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE); - } - - /* after verifing that there is nothing to do, Unmask AXI completion interrupt */ - CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), - CC_HAL_READ_REGISTER( - CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq); -} - -enum cc_fips_error cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata) -{ - enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK; - void *cpu_addr_buffer = NULL; - dma_addr_t dma_handle; - size_t alloc_buff_size = ssi_fips_max_mem_alloc_size(); - struct device *dev = &drvdata->plat_dev->dev; - - // allocate memory using dma_alloc_coherent - for phisical, consecutive and cache coherent buffer (memory map is not needed) - // the return value is the virtual address - use it to copy data into the buffer - // the dma_handle is the returned phy address - use it in the HW descriptor - FIPS_DBG("dma_alloc_coherent \n"); - cpu_addr_buffer = dma_alloc_coherent(dev, alloc_buff_size, &dma_handle, GFP_KERNEL); - if (!cpu_addr_buffer) - return CC_REE_FIPS_ERROR_GENERAL; - - FIPS_DBG("allocated coherent buffer - addr 0x%08X , size = %d \n", (size_t)cpu_addr_buffer, alloc_buff_size); - -#if FIPS_POWER_UP_TEST_CIPHER - FIPS_DBG("ssi_cipher_fips_power_up_tests ...\n"); - fips_error = ssi_cipher_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle); - FIPS_DBG("ssi_cipher_fips_power_up_tests - done. (fips_error = %d) \n", fips_error); -#endif -#if FIPS_POWER_UP_TEST_CMAC - if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) { - FIPS_DBG("ssi_cmac_fips_power_up_tests ...\n"); - fips_error = ssi_cmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle); - FIPS_DBG("ssi_cmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error); - } -#endif -#if FIPS_POWER_UP_TEST_HASH - if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) { - FIPS_DBG("ssi_hash_fips_power_up_tests ...\n"); - fips_error = ssi_hash_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle); - FIPS_DBG("ssi_hash_fips_power_up_tests - done. (fips_error = %d) \n", fips_error); - } -#endif -#if FIPS_POWER_UP_TEST_HMAC - if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) { - FIPS_DBG("ssi_hmac_fips_power_up_tests ...\n"); - fips_error = ssi_hmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle); - FIPS_DBG("ssi_hmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error); - } -#endif -#if FIPS_POWER_UP_TEST_CCM - if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) { - FIPS_DBG("ssi_ccm_fips_power_up_tests ...\n"); - fips_error = ssi_ccm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle); - FIPS_DBG("ssi_ccm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error); - } -#endif -#if FIPS_POWER_UP_TEST_GCM - if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) { - FIPS_DBG("ssi_gcm_fips_power_up_tests ...\n"); - fips_error = ssi_gcm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle); - FIPS_DBG("ssi_gcm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error); - } -#endif - /* deallocate the buffer when all tests are done... */ - FIPS_DBG("dma_free_coherent \n"); - dma_free_coherent(dev, alloc_buff_size, cpu_addr_buffer, dma_handle); - - return fips_error; -} - -/* The function checks if FIPS supported and FIPS error exists.* - * It should be used in every driver API. - */ -int ssi_fips_check_fips_error(void) -{ - enum cc_fips_state_t fips_state; - - if (ssi_fips_get_state(&fips_state) != 0) { - FIPS_LOG("ssi_fips_get_state FAILED, returning.. \n"); - return -ENOEXEC; - } - if (fips_state == CC_FIPS_STATE_ERROR) { - FIPS_LOG("ssi_fips_get_state: fips_state is %d, returning.. \n", fips_state); - return -ENOEXEC; - } - return 0; -} - -/* The function sets the REE FIPS state.* - * It should be used while driver is being loaded. - */ -int ssi_fips_set_state(enum cc_fips_state_t state) -{ - return ssi_fips_ext_set_state(state); -} - -/* The function sets the REE FIPS error, and pushes the error to TEE library. * - * It should be used when any of the KAT tests fails. - */ -int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err) -{ - int rc = 0; - enum cc_fips_error current_err; - - FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err); - - // setting no error is not allowed - if (err == CC_REE_FIPS_ERROR_OK) - return -ENOEXEC; - - // If error exists, do not set new error - if (ssi_fips_get_error(¤t_err) != 0) - return -ENOEXEC; - - if (current_err != CC_REE_FIPS_ERROR_OK) - return -ENOEXEC; - - // set REE internal error and state - rc = ssi_fips_ext_set_error(err); - if (rc != 0) - return -ENOEXEC; - - rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR); - if (rc != 0) - return -ENOEXEC; - - // push error towards TEE libraray, if it's not TEE error - if (err != CC_REE_FIPS_ERROR_FROM_TEE) - ssi_fips_update_tee_upon_ree_status(p_drvdata, err); - - return rc; -} - -/* The function called once at driver entry point .*/ -int ssi_fips_init(struct ssi_drvdata *p_drvdata) -{ - enum cc_fips_error rc = CC_REE_FIPS_ERROR_OK; - struct ssi_fips_handle *fips_h; - - FIPS_DBG("CC FIPS code .. (fips=%d) \n", ssi_fips_support); - - fips_h = kzalloc(sizeof(struct ssi_fips_handle), GFP_KERNEL); - if (!fips_h) { - ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL); - return -ENOMEM; - } - - p_drvdata->fips_handle = fips_h; - -#ifdef COMP_IN_WQ - SSI_LOG_DEBUG("Initializing fips workqueue\n"); - fips_h->workq = create_singlethread_workqueue("arm_cc7x_fips_wq"); - if (unlikely(!fips_h->workq)) { - SSI_LOG_ERR("Failed creating fips work queue\n"); - ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL); - rc = -ENOMEM; - goto ssi_fips_init_err; - } - INIT_DELAYED_WORK(&fips_h->fipswork, fips_wq_handler); -#else - SSI_LOG_DEBUG("Initializing fips tasklet\n"); - tasklet_init(&fips_h->fipstask, fips_dsr, (unsigned long)p_drvdata); -#endif - - /* init fips driver data */ - rc = ssi_fips_set_state((ssi_fips_support == 0) ? CC_FIPS_STATE_NOT_SUPPORTED : CC_FIPS_STATE_SUPPORTED); - if (unlikely(rc != 0)) { - ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL); - rc = -EAGAIN; - goto ssi_fips_init_err; - } - - /* Run power up tests (before registration and operating the HW engines) */ - FIPS_DBG("ssi_fips_get_tee_error \n"); - rc = ssi_fips_get_tee_error(p_drvdata); - if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) { - ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_FROM_TEE); - rc = -EAGAIN; - goto ssi_fips_init_err; - } - - FIPS_DBG("cc_fips_run_power_up_tests \n"); - rc = cc_fips_run_power_up_tests(p_drvdata); - if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) { - ssi_fips_set_error(p_drvdata, rc); - rc = -EAGAIN; - goto ssi_fips_init_err; - } - FIPS_LOG("cc_fips_run_power_up_tests - done ... fips_error = %d \n", rc); - - /* when all tests passed, update TEE with fips OK status after power up tests */ - ssi_fips_update_tee_upon_ree_status(p_drvdata, CC_REE_FIPS_ERROR_OK); - - if (unlikely(rc != 0)) { - rc = -EAGAIN; - ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL); - goto ssi_fips_init_err; - } - - return 0; - -ssi_fips_init_err: - ssi_fips_fini(p_drvdata); - return rc; -} - diff --git a/drivers/staging/ccree/ssi_fips_local.h b/drivers/staging/ccree/ssi_fips_local.h deleted file mode 100644 index 8c7994fe9fae..000000000000 --- a/drivers/staging/ccree/ssi_fips_local.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (C) 2012-2017 ARM Limited or its affiliates. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __SSI_FIPS_LOCAL_H__ -#define __SSI_FIPS_LOCAL_H__ - -#ifdef CONFIG_CCX7REE_FIPS_SUPPORT - -#include "ssi_fips.h" -struct ssi_drvdata; - -#define CHECK_AND_RETURN_UPON_FIPS_ERROR() {\ - if (ssi_fips_check_fips_error() != 0) {\ - return -ENOEXEC;\ - } \ -} - -#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() {\ - if (ssi_fips_check_fips_error() != 0) {\ - return;\ - } \ -} - -#define SSI_FIPS_INIT(p_drvData) (ssi_fips_init(p_drvData)) -#define SSI_FIPS_FINI(p_drvData) (ssi_fips_fini(p_drvData)) - -#define FIPS_LOG(...) SSI_LOG(KERN_INFO, __VA_ARGS__) -#define FIPS_DBG(...) //SSI_LOG(KERN_INFO, __VA_ARGS__) - -/* FIPS functions */ -int ssi_fips_init(struct ssi_drvdata *p_drvdata); -void ssi_fips_fini(struct ssi_drvdata *drvdata); -int ssi_fips_check_fips_error(void); -int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, enum cc_fips_error err); -void fips_handler(struct ssi_drvdata *drvdata); - -#else /* CONFIG_CC7XXREE_FIPS_SUPPORT */ - -#define CHECK_AND_RETURN_UPON_FIPS_ERROR() -#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() - -static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata) -{ - return 0; -} - -static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {} - -void fips_handler(struct ssi_drvdata *drvdata); - -#endif /* CONFIG_CC7XXREE_FIPS_SUPPORT */ - -#endif /*__SSI_FIPS_LOCAL_H__*/ - diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c index ae8f36af3837..1a405bbadf6d 100644 --- a/drivers/staging/ccree/ssi_hash.c +++ b/drivers/staging/ccree/ssi_hash.c @@ -30,7 +30,6 @@ #include "ssi_sysfs.h" #include "ssi_hash.h" #include "ssi_sram_mgr.h" -#include "ssi_fips_local.h" #define SSI_MAX_AHASH_SEQ_LEN 12 #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE) @@ -71,8 +70,8 @@ static void ssi_hash_create_xcbc_setup( unsigned int *seq_size); static void ssi_hash_create_cmac_setup(struct ahash_request *areq, - struct cc_hw_desc desc[], - unsigned int *seq_size); + struct cc_hw_desc desc[], + unsigned int *seq_size); struct ssi_hash_alg { struct list_head entry; @@ -118,8 +117,8 @@ static void ssi_hash_create_data_desc( static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc) { if (unlikely((mode == DRV_HASH_MD5) || - (mode == DRV_HASH_SHA384) || - (mode == DRV_HASH_SHA512))) { + (mode == DRV_HASH_SHA384) || + (mode == DRV_HASH_SHA512))) { set_bytes_swap(desc, 1); } else { set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN); @@ -136,13 +135,13 @@ static int ssi_hash_map_result(struct device *dev, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) { SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n", - digestsize); + digestsize); return -ENOMEM; } SSI_LOG_DEBUG("Mapped digest result buffer %u B " - "at va=%pK to dma=0x%llX\n", + "at va=%pK to dma=%pad\n", digestsize, state->digest_result_buff, - (unsigned long long)state->digest_result_dma_addr); + state->digest_result_dma_addr); return 0; } @@ -201,12 +200,12 @@ static int ssi_hash_map_request(struct device *dev, state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_buff_dma_addr)) { SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n", - ctx->inter_digestsize, state->digest_buff); + ctx->inter_digestsize, state->digest_buff); goto fail3; } - SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n", - ctx->inter_digestsize, state->digest_buff, - (unsigned long long)state->digest_buff_dma_addr); + SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n", + ctx->inter_digestsize, state->digest_buff, + state->digest_buff_dma_addr); if (is_hmac) { dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); @@ -250,12 +249,12 @@ static int ssi_hash_map_request(struct device *dev, state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) { SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n", - HASH_LEN_SIZE, state->digest_bytes_len); + HASH_LEN_SIZE, state->digest_bytes_len); goto fail4; } - SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n", - HASH_LEN_SIZE, state->digest_bytes_len, - (unsigned long long)state->digest_bytes_len_dma_addr); + SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n", + HASH_LEN_SIZE, state->digest_bytes_len, + state->digest_bytes_len_dma_addr); } else { state->digest_bytes_len_dma_addr = 0; } @@ -264,12 +263,13 @@ static int ssi_hash_map_request(struct device *dev, state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->opad_digest_dma_addr)) { SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n", - ctx->inter_digestsize, state->opad_digest_buff); + ctx->inter_digestsize, + state->opad_digest_buff); goto fail5; } - SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n", - ctx->inter_digestsize, state->opad_digest_buff, - (unsigned long long)state->opad_digest_dma_addr); + SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n", + ctx->inter_digestsize, state->opad_digest_buff, + state->opad_digest_dma_addr); } else { state->opad_digest_dma_addr = 0; } @@ -297,20 +297,14 @@ fail2: fail1: kfree(state->digest_buff); fail_digest_result_buff: - if (state->digest_result_buff) { - kfree(state->digest_result_buff); - state->digest_result_buff = NULL; - } + kfree(state->digest_result_buff); + state->digest_result_buff = NULL; fail_buff1: - if (state->buff1) { - kfree(state->buff1); - state->buff1 = NULL; - } + kfree(state->buff1); + state->buff1 = NULL; fail_buff0: - if (state->buff0) { - kfree(state->buff0); - state->buff0 = NULL; - } + kfree(state->buff0); + state->buff0 = NULL; fail0: return rc; } @@ -322,22 +316,22 @@ static void ssi_hash_unmap_request(struct device *dev, if (state->digest_buff_dma_addr != 0) { dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); - SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n", - (unsigned long long)state->digest_buff_dma_addr); + SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n", + state->digest_buff_dma_addr); state->digest_buff_dma_addr = 0; } if (state->digest_bytes_len_dma_addr != 0) { dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL); - SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n", - (unsigned long long)state->digest_bytes_len_dma_addr); + SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n", + state->digest_bytes_len_dma_addr); state->digest_bytes_len_dma_addr = 0; } if (state->opad_digest_dma_addr != 0) { dma_unmap_single(dev, state->opad_digest_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); - SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n", - (unsigned long long)state->opad_digest_dma_addr); + SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n", + state->opad_digest_dma_addr); state->opad_digest_dma_addr = 0; } @@ -359,9 +353,9 @@ static void ssi_hash_unmap_result(struct device *dev, digestsize, DMA_BIDIRECTIONAL); SSI_LOG_DEBUG("unmpa digest result buffer " - "va (%pK) pa (%llx) len %u\n", + "va (%pK) pa (%pad) len %u\n", state->digest_result_buff, - (unsigned long long)state->digest_result_dma_addr, + state->digest_result_dma_addr, digestsize); memcpy(result, state->digest_result_buff, @@ -431,8 +425,6 @@ static int ssi_hash_digest(struct ahash_req_ctx *state, SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); - if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) { SSI_LOG_ERR("map_ahash_source() failed\n"); return -ENOMEM; @@ -596,16 +588,16 @@ static int ssi_hash_update(struct ahash_req_ctx *state, SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ? "hmac" : "hash", nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); if (nbytes == 0) { /* no real updates required */ return 0; } - if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) { + rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size); + if (unlikely(rc)) { if (rc == 1) { SSI_LOG_DEBUG(" data size not require HW update %x\n", - nbytes); + nbytes); /* No hardware updates are required */ return 0; } @@ -693,8 +685,6 @@ static int ssi_hash_finup(struct ahash_req_ctx *state, SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); - if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) { SSI_LOG_ERR("map_ahash_request_final() failed\n"); return -ENOMEM; @@ -829,8 +819,6 @@ static int ssi_hash_final(struct ahash_req_ctx *state, SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); - if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) { SSI_LOG_ERR("map_ahash_request_final() failed\n"); return -ENOMEM; @@ -964,7 +952,6 @@ static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx) state->xcbc_count = 0; - CHECK_AND_RETURN_UPON_FIPS_ERROR(); ssi_hash_map_request(dev, state, ctx); return 0; @@ -975,7 +962,7 @@ static int ssi_hash_setkey(void *hash, unsigned int keylen, bool synchronize) { - unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; + unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; struct ssi_crypto_req ssi_req = {}; struct ssi_hash_ctx *ctx = NULL; int blocksize = 0; @@ -984,9 +971,8 @@ static int ssi_hash_setkey(void *hash, struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN]; ssi_sram_addr_t larval_addr; - SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen); + SSI_LOG_DEBUG("start keylen: %d", keylen); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash)); blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base); digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash)); @@ -1012,9 +998,8 @@ static int ssi_hash_setkey(void *hash, " DMA failed\n", key, keylen); return -ENOMEM; } - SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX " - "keylen=%u\n", - (unsigned long long)ctx->key_params.key_dma_addr, + SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad " + "keylen=%u\n", ctx->key_params.key_dma_addr, ctx->key_params.keylen); if (keylen > blocksize) { @@ -1118,7 +1103,7 @@ static int ssi_hash_setkey(void *hash, /* Prepare ipad key */ hw_desc_init(&desc[idx]); - set_xor_val(&desc[idx], hmacPadConst[i]); + set_xor_val(&desc[idx], hmac_pad_const[i]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); @@ -1155,17 +1140,17 @@ out: if (ctx->key_params.key_dma_addr) { dma_unmap_single(&ctx->drvdata->plat_dev->dev, - ctx->key_params.key_dma_addr, - ctx->key_params.keylen, DMA_TO_DEVICE); - SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n", - (unsigned long long)ctx->key_params.key_dma_addr, - ctx->key_params.keylen); + ctx->key_params.key_dma_addr, + ctx->key_params.keylen, DMA_TO_DEVICE); + SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", + ctx->key_params.key_dma_addr, + ctx->key_params.keylen); } return rc; } static int ssi_xcbc_setkey(struct crypto_ahash *ahash, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct ssi_crypto_req ssi_req = {}; struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash); @@ -1173,15 +1158,14 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash, struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN]; SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); switch (keylen) { - case AES_KEYSIZE_128: - case AES_KEYSIZE_192: - case AES_KEYSIZE_256: - break; - default: - return -EINVAL; + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + return -EINVAL; } ctx->key_params.keylen = keylen; @@ -1196,9 +1180,9 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash, " DMA failed\n", key, keylen); return -ENOMEM; } - SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX " + SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad " "keylen=%u\n", - (unsigned long long)ctx->key_params.key_dma_addr, + ctx->key_params.key_dma_addr, ctx->key_params.keylen); ctx->is_hmac = true; @@ -1243,33 +1227,32 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash, crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); dma_unmap_single(&ctx->drvdata->plat_dev->dev, - ctx->key_params.key_dma_addr, - ctx->key_params.keylen, DMA_TO_DEVICE); - SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n", - (unsigned long long)ctx->key_params.key_dma_addr, - ctx->key_params.keylen); + ctx->key_params.key_dma_addr, + ctx->key_params.keylen, DMA_TO_DEVICE); + SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", + ctx->key_params.key_dma_addr, + ctx->key_params.keylen); return rc; } #if SSI_CC_HAS_CMAC static int ssi_cmac_setkey(struct crypto_ahash *ahash, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash); SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); ctx->is_hmac = true; switch (keylen) { - case AES_KEYSIZE_128: - case AES_KEYSIZE_192: - case AES_KEYSIZE_256: - break; - default: - return -EINVAL; + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + return -EINVAL; } ctx->key_params.keylen = keylen; @@ -1302,8 +1285,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx) dma_unmap_single(dev, ctx->digest_buff_dma_addr, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); SSI_LOG_DEBUG("Unmapped digest-buffer: " - "digest_buff_dma_addr=0x%llX\n", - (unsigned long long)ctx->digest_buff_dma_addr); + "digest_buff_dma_addr=%pad\n", + ctx->digest_buff_dma_addr); ctx->digest_buff_dma_addr = 0; } if (ctx->opad_tmp_keys_dma_addr != 0) { @@ -1311,8 +1294,8 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx) sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); SSI_LOG_DEBUG("Unmapped opad-digest: " - "opad_tmp_keys_dma_addr=0x%llX\n", - (unsigned long long)ctx->opad_tmp_keys_dma_addr); + "opad_tmp_keys_dma_addr=%pad\n", + ctx->opad_tmp_keys_dma_addr); ctx->opad_tmp_keys_dma_addr = 0; } @@ -1328,23 +1311,23 @@ static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx) ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n", - sizeof(ctx->digest_buff), ctx->digest_buff); + sizeof(ctx->digest_buff), ctx->digest_buff); goto fail; } - SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n", - sizeof(ctx->digest_buff), ctx->digest_buff, - (unsigned long long)ctx->digest_buff_dma_addr); + SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n", + sizeof(ctx->digest_buff), ctx->digest_buff, + ctx->digest_buff_dma_addr); ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n", - sizeof(ctx->opad_tmp_keys_buff), - ctx->opad_tmp_keys_buff); + sizeof(ctx->opad_tmp_keys_buff), + ctx->opad_tmp_keys_buff); goto fail; } - SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n", - sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, - (unsigned long long)ctx->opad_tmp_keys_dma_addr); + SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n", + sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, + ctx->opad_tmp_keys_dma_addr); ctx->is_hmac = false; return 0; @@ -1364,9 +1347,8 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm) struct ssi_hash_alg *ssi_alg = container_of(ahash_alg, struct ssi_hash_alg, ahash_alg); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_req_ctx)); + sizeof(struct ahash_req_ctx)); ctx->hash_mode = ssi_alg->hash_mode; ctx->hw_mode = ssi_alg->hw_mode; @@ -1396,7 +1378,6 @@ static int ssi_mac_update(struct ahash_request *req) int rc; u32 idx = 0; - CHECK_AND_RETURN_UPON_FIPS_ERROR(); if (req->nbytes == 0) { /* no real updates required */ return 0; @@ -1404,10 +1385,11 @@ static int ssi_mac_update(struct ahash_request *req) state->xcbc_count++; - if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) { + rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size); + if (unlikely(rc)) { if (rc == 1) { SSI_LOG_DEBUG(" data size not require HW update %x\n", - req->nbytes); + req->nbytes); /* No hardware updates are required */ return 0; } @@ -1454,19 +1436,19 @@ static int ssi_mac_final(struct ahash_request *req) struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN]; int idx = 0; int rc = 0; - u32 keySize, keyLen; + u32 key_size, key_len; u32 digestsize = crypto_ahash_digestsize(tfm); u32 rem_cnt = state->buff_index ? state->buff1_cnt : state->buff0_cnt; - CHECK_AND_RETURN_UPON_FIPS_ERROR(); if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { - keySize = CC_AES_128_BIT_KEY_SIZE; - keyLen = CC_AES_128_BIT_KEY_SIZE; + key_size = CC_AES_128_BIT_KEY_SIZE; + key_len = CC_AES_128_BIT_KEY_SIZE; } else { - keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen; - keyLen = ctx->key_params.keylen; + key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : + ctx->key_params.keylen; + key_len = ctx->key_params.keylen; } SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt); @@ -1492,8 +1474,8 @@ static int ssi_mac_final(struct ahash_request *req) set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT); set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr + - XCBC_MAC_K1_OFFSET), keySize, NS_BIT); - set_key_size_aes(&desc[idx], keyLen); + XCBC_MAC_K1_OFFSET), key_size, NS_BIT); + set_key_size_aes(&desc[idx], key_len); set_flow_mode(&desc[idx], S_DIN_to_AES); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; @@ -1522,7 +1504,7 @@ static int ssi_mac_final(struct ahash_request *req) if (state->xcbc_count == 0) { hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); - set_key_size_aes(&desc[idx], keyLen); + set_key_size_aes(&desc[idx], key_len); set_cmac_size0_mode(&desc[idx]); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; @@ -1569,9 +1551,8 @@ static int ssi_mac_finup(struct ahash_request *req) u32 digestsize = crypto_ahash_digestsize(tfm); SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); if (state->xcbc_count > 0 && req->nbytes == 0) { - SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n"); + SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n"); return ssi_mac_final(req); } @@ -1636,12 +1617,11 @@ static int ssi_mac_digest(struct ahash_request *req) u32 digestsize = crypto_ahash_digestsize(tfm); struct ssi_crypto_req ssi_req = {}; struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN]; - u32 keyLen; + u32 key_len; int idx = 0; int rc; SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes); - CHECK_AND_RETURN_UPON_FIPS_ERROR(); if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) { SSI_LOG_ERR("map_ahash_source() failed\n"); @@ -1662,17 +1642,17 @@ static int ssi_mac_digest(struct ahash_request *req) ssi_req.user_arg = (void *)req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { - keyLen = CC_AES_128_BIT_KEY_SIZE; + key_len = CC_AES_128_BIT_KEY_SIZE; ssi_hash_create_xcbc_setup(req, desc, &idx); } else { - keyLen = ctx->key_params.keylen; + key_len = ctx->key_params.keylen; ssi_hash_create_cmac_setup(req, desc, &idx); } if (req->nbytes == 0) { hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); - set_key_size_aes(&desc[idx], keyLen); + set_key_size_aes(&desc[idx], key_len); set_cmac_size0_mode(&desc[idx]); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; @@ -1764,8 +1744,6 @@ static int ssi_ahash_export(struct ahash_request *req, void *out) state->buff0_cnt; const u32 tmp = CC_EXPORT_MAGIC; - CHECK_AND_RETURN_UPON_FIPS_ERROR(); - memcpy(out, &tmp, sizeof(u32)); out += sizeof(u32); @@ -1805,8 +1783,6 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) u32 tmp; int rc; - CHECK_AND_RETURN_UPON_FIPS_ERROR(); - memcpy(&tmp, in, sizeof(u32)); if (tmp != CC_EXPORT_MAGIC) { rc = -EINVAL; @@ -1856,7 +1832,7 @@ out: } static int ssi_ahash_setkey(struct crypto_ahash *ahash, - const u8 *key, unsigned int keylen) + const u8 *key, unsigned int keylen) { return ssi_hash_setkey((void *)ahash, key, keylen, false); } @@ -2138,7 +2114,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) /* Copy-to-sram digest-len */ ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs, - ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len); + ARRAY_SIZE(digest_len_init), + larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (unlikely(rc != 0)) goto init_digest_const_err; @@ -2149,7 +2126,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) #if (DX_DEV_SHA_MAX > 256) /* Copy-to-sram digest-len for sha384/512 */ ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs, - ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len); + ARRAY_SIZE(digest_len_sha512_init), + larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (unlikely(rc != 0)) goto init_digest_const_err; @@ -2163,7 +2141,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) /* Copy-to-sram initial SHA* digests */ ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs, - ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len); + ARRAY_SIZE(md5_init), larval_seq, + &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (unlikely(rc != 0)) goto init_digest_const_err; @@ -2171,7 +2150,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) larval_seq_len = 0; ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs, - ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len); + ARRAY_SIZE(sha1_init), larval_seq, + &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (unlikely(rc != 0)) goto init_digest_const_err; @@ -2179,7 +2159,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) larval_seq_len = 0; ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs, - ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len); + ARRAY_SIZE(sha224_init), larval_seq, + &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (unlikely(rc != 0)) goto init_digest_const_err; @@ -2187,7 +2168,8 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) larval_seq_len = 0; ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs, - ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len); + ARRAY_SIZE(sha256_init), larval_seq, + &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (unlikely(rc != 0)) goto init_digest_const_err; @@ -2201,10 +2183,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0]; ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1, - larval_seq, &larval_seq_len); + larval_seq, &larval_seq_len); sram_buff_ofs += sizeof(u32); ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1, - larval_seq, &larval_seq_len); + larval_seq, &larval_seq_len); sram_buff_ofs += sizeof(u32); } rc = send_request_init(drvdata, larval_seq, larval_seq_len); @@ -2219,10 +2201,10 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0]; ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1, - larval_seq, &larval_seq_len); + larval_seq, &larval_seq_len); sram_buff_ofs += sizeof(u32); ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1, - larval_seq, &larval_seq_len); + larval_seq, &larval_seq_len); sram_buff_ofs += sizeof(u32); } rc = send_request_init(drvdata, larval_seq, larval_seq_len); @@ -2247,7 +2229,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata) hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL); if (!hash_handle) { SSI_LOG_ERR("kzalloc failed to allocate %zu B\n", - sizeof(struct ssi_hash_handle)); + sizeof(struct ssi_hash_handle)); rc = -ENOMEM; goto fail; } @@ -2319,7 +2301,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata) if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); SSI_LOG_ERR("%s alg allocation failed\n", - driver_hash[alg].driver_name); + driver_hash[alg].driver_name); goto fail; } t_alg->drvdata = drvdata; @@ -2338,11 +2320,8 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata) return 0; fail: - - if (drvdata->hash_handle) { - kfree(drvdata->hash_handle); - drvdata->hash_handle = NULL; - } + kfree(drvdata->hash_handle); + drvdata->hash_handle = NULL; return rc; } @@ -2365,8 +2344,9 @@ int ssi_hash_free(struct ssi_drvdata *drvdata) } static void ssi_hash_create_xcbc_setup(struct ahash_request *areq, - struct cc_hw_desc desc[], - unsigned int *seq_size) { + struct cc_hw_desc desc[], + unsigned int *seq_size) +{ unsigned int idx = *seq_size; struct ahash_req_ctx *state = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); @@ -2422,8 +2402,8 @@ static void ssi_hash_create_xcbc_setup(struct ahash_request *areq, } static void ssi_hash_create_cmac_setup(struct ahash_request *areq, - struct cc_hw_desc desc[], - unsigned int *seq_size) + struct cc_hw_desc desc[], + unsigned int *seq_size) { unsigned int idx = *seq_size; struct ahash_req_ctx *state = ahash_request_ctx(areq); diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c index 5ff3368c04d9..86364f81acab 100644 --- a/drivers/staging/ccree/ssi_ivgen.c +++ b/drivers/staging/ccree/ssi_ivgen.c @@ -158,7 +158,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata) void ssi_ivgen_fini(struct ssi_drvdata *drvdata) { struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle; - struct device *device = &(drvdata->plat_dev->dev); + struct device *device = &drvdata->plat_dev->dev; if (!ivgen_ctx) return; @@ -166,7 +166,8 @@ void ssi_ivgen_fini(struct ssi_drvdata *drvdata) if (ivgen_ctx->pool_meta) { memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE); dma_free_coherent(device, SSI_IVPOOL_META_SIZE, - ivgen_ctx->pool_meta, ivgen_ctx->pool_meta_dma); + ivgen_ctx->pool_meta, + ivgen_ctx->pool_meta_dma); } ivgen_ctx->pool = NULL_SRAM_ADDR; @@ -201,7 +202,8 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata) /* Allocate pool's header for intial enc. key/IV */ ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE, - &ivgen_ctx->pool_meta_dma, GFP_KERNEL); + &ivgen_ctx->pool_meta_dma, + GFP_KERNEL); if (!ivgen_ctx->pool_meta) { SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta " "(%u B)\n", SSI_IVPOOL_META_SIZE); diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c index 52a8ed579177..31325e6cd4b4 100644 --- a/drivers/staging/ccree/ssi_pm.c +++ b/drivers/staging/ccree/ssi_pm.c @@ -40,7 +40,7 @@ int ssi_power_mgr_runtime_suspend(struct device *dev) (struct ssi_drvdata *)dev_get_drvdata(dev); int rc; - SSI_LOG_DEBUG("ssi_power_mgr_runtime_suspend: set HOST_POWER_DOWN_EN\n"); + SSI_LOG_DEBUG("set HOST_POWER_DOWN_EN\n"); WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); rc = ssi_request_mgr_runtime_suspend_queue(drvdata); if (rc != 0) { @@ -58,7 +58,7 @@ int ssi_power_mgr_runtime_resume(struct device *dev) struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_get_drvdata(dev); - SSI_LOG_DEBUG("ssi_power_mgr_runtime_resume , unset HOST_POWER_DOWN_EN\n"); + SSI_LOG_DEBUG("unset HOST_POWER_DOWN_EN\n"); WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); rc = cc_clk_on(drvdata); diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c index 46d9396f9ff9..2eda82f317d2 100644 --- a/drivers/staging/ccree/ssi_request_mgr.c +++ b/drivers/staging/ccree/ssi_request_mgr.c @@ -30,8 +30,6 @@ #include "ssi_sysfs.h" #include "ssi_ivgen.h" #include "ssi_pm.h" -#include "ssi_fips.h" -#include "ssi_fips_local.h" #define SSI_MAX_POLL_ITER 10 @@ -129,7 +127,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata) SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n", - req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); + req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); rc = -ENOMEM; goto req_mgr_init_err; } @@ -138,7 +136,9 @@ int request_mgr_init(struct ssi_drvdata *drvdata) /* Allocate DMA word for "dummy" completion descriptor use */ req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev, - sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL); + sizeof(u32), + &req_mgr_h->dummy_comp_buff_dma, + GFP_KERNEL); if (!req_mgr_h->dummy_comp_buff) { SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped " "buffer\n", sizeof(u32)); @@ -177,7 +177,8 @@ static inline void enqueue_seq( writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); #ifdef DX_DUMP_DESCS SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i, - seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]); + seq[i].word[0], seq[i].word[1], seq[i].word[2], + seq[i].word[3], seq[i].word[4], seq[i].word[5]); #endif } } @@ -211,7 +212,7 @@ static inline int request_mgr_queues_status_check( (MAX_REQUEST_QUEUE_SIZE - 1)) == req_mgr_h->req_queue_tail)) { SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", - req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); + req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); return -EBUSY; } @@ -221,9 +222,8 @@ static inline int request_mgr_queues_status_check( /* Wait for space in HW queue. Poll constant num of iterations. */ for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) { req_mgr_h->q_free_slots = - CC_HAL_READ_REGISTER( - CC_REG_OFFSET(CRY_KERNEL, - DSCRPTR_QUEUE_CONTENT)); + CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, + DSCRPTR_QUEUE_CONTENT)); if (unlikely(req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)) { req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; @@ -235,7 +235,7 @@ static inline int request_mgr_queues_status_check( } SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n", - req_mgr_h->q_free_slots, total_seq_len); + req_mgr_h->q_free_slots, total_seq_len); } /* No room in the HW queue try again later */ SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d " @@ -291,9 +291,8 @@ int send_request( * in case iv gen add the max size and in case of no dout add 1 * for the internal completion descriptor */ - rc = request_mgr_queues_status_check(req_mgr_h, - cc_base, - max_required_seq_len); + rc = request_mgr_queues_status_check(req_mgr_h, cc_base, + max_required_seq_len); if (likely(rc == 0)) /* There is enough place in the queue */ break; @@ -320,21 +319,22 @@ int send_request( if (!is_dout) { init_completion(&ssi_req->seq_compl); ssi_req->user_cb = request_mgr_complete; - ssi_req->user_arg = &(ssi_req->seq_compl); + ssi_req->user_arg = &ssi_req->seq_compl; total_seq_len++; } if (ssi_req->ivgen_dma_addr_len > 0) { - SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n", - ssi_req->ivgen_dma_addr_len, - (unsigned long long)ssi_req->ivgen_dma_addr[0], - (unsigned long long)ssi_req->ivgen_dma_addr[1], - (unsigned long long)ssi_req->ivgen_dma_addr[2], - ssi_req->ivgen_size); + SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n", + ssi_req->ivgen_dma_addr_len, + ssi_req->ivgen_dma_addr[0], + ssi_req->ivgen_dma_addr[1], + ssi_req->ivgen_dma_addr[2], + ssi_req->ivgen_size); /* Acquire IV from pool */ - rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr, ssi_req->ivgen_dma_addr_len, - ssi_req->ivgen_size, iv_seq, &iv_seq_len); + rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr, + ssi_req->ivgen_dma_addr_len, + ssi_req->ivgen_size, iv_seq, &iv_seq_len); if (unlikely(rc != 0)) { SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc); @@ -418,9 +418,8 @@ int send_request_init( enqueue_seq(cc_base, desc, len); /* Update the free slots in HW queue */ - req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER( - CC_REG_OFFSET(CRY_KERNEL, - DSCRPTR_QUEUE_CONTENT)); + req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, + DSCRPTR_QUEUE_CONTENT)); return 0; } @@ -545,8 +544,7 @@ static void comp_handler(unsigned long devarg) } /* after verifing that there is nothing to do, Unmask AXI completion interrupt */ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), - CC_HAL_READ_REGISTER( - CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq); + CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq); } /* diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c index e05c0c13c2eb..f11116afe89a 100644 --- a/drivers/staging/ccree/ssi_sram_mgr.c +++ b/drivers/staging/ccree/ssi_sram_mgr.c @@ -58,7 +58,7 @@ int ssi_sram_mgr_init(struct ssi_drvdata *drvdata) sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL); if (!drvdata->sram_mgr_handle) { SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n", - sizeof(struct ssi_sram_mgr_ctx)); + sizeof(struct ssi_sram_mgr_ctx)); rc = -ENOMEM; goto out; } @@ -90,12 +90,12 @@ ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size) if (unlikely((size & 0x3) != 0)) { SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4", - size); + size); return NULL_SRAM_ADDR; } if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) { SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n", - size, smgr_ctx->sram_free_offset); + size, smgr_ctx->sram_free_offset); return NULL_SRAM_ADDR; } diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c index dbcd1634aad1..0655658bba4d 100644 --- a/drivers/staging/ccree/ssi_sysfs.c +++ b/drivers/staging/ccree/ssi_sysfs.c @@ -40,8 +40,7 @@ struct stat_name { const char *stat_phase_name[MAX_STAT_PHASES]; }; -static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = -{ +static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = { { /* STAT_OP_TYPE_NULL */ .op_type_name = "NULL", @@ -144,8 +143,12 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES] avg = (u64)item[i][j].sum; do_div(avg, item[i][j].count); SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n", - stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j], - item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count); + stat_name_db[i].op_type_name, + stat_name_db[i].stat_phase_name[j], + item[i][j].min, (int)avg, + item[i][j].max, + (long long)item[i][j].sum, + item[i][j].count); } } } @@ -156,21 +159,23 @@ static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES] **************************************/ static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj, - struct kobj_attribute *attr, const char *buf, size_t count) + struct kobj_attribute *attr, + const char *buf, size_t count) { init_db(stat_host_db); return count; } static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj, - struct kobj_attribute *attr, const char *buf, size_t count) + struct kobj_attribute *attr, + const char *buf, size_t count) { init_db(stat_cc_db); return count; } static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { int i, j; char line[512]; @@ -179,7 +184,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj, ssize_t buf_len, tmp_len = 0; buf_len = scnprintf(buf, PAGE_SIZE, - "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n"); + "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n"); if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/ return buf_len; for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) { @@ -193,11 +198,11 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj, avg = min_cyc = max_cyc = 0; } tmp_len = scnprintf(line, 512, - "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n", - stat_name_db[i].op_type_name, - stat_name_db[i].stat_phase_name[j], - min_cyc, (unsigned int)avg, max_cyc, - stat_host_db[i][j].count); + "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n", + stat_name_db[i].op_type_name, + stat_name_db[i].stat_phase_name[j], + min_cyc, (unsigned int)avg, max_cyc, + stat_host_db[i][j].count); if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/ return buf_len; if (buf_len + tmp_len >= PAGE_SIZE) @@ -210,7 +215,7 @@ static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj, } static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { int i; char line[256]; @@ -219,7 +224,7 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj, ssize_t buf_len, tmp_len = 0; buf_len = scnprintf(buf, PAGE_SIZE, - "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n"); + "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n"); if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/ return buf_len; for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) { @@ -231,13 +236,10 @@ static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj, } else { avg = min_cyc = max_cyc = 0; } - tmp_len = scnprintf(line, 256, - "%s\t%6u\t%6u\t%6u\t%7u\n", - stat_name_db[i].op_type_name, - min_cyc, - (unsigned int)avg, - max_cyc, - stat_cc_db[i][STAT_PHASE_6].count); + tmp_len = scnprintf(line, 256, "%s\t%6u\t%6u\t%6u\t%7u\n", + stat_name_db[i].op_type_name, min_cyc, + (unsigned int)avg, max_cyc, + stat_cc_db[i][STAT_PHASE_6].count); if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/ return buf_len; @@ -255,7 +257,7 @@ void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result) unsigned long flags; spin_lock_irqsave(&stat_lock, flags); - update_db(&(stat_host_db[op_type][phase]), (unsigned int)result); + update_db(&stat_host_db[op_type][phase], (unsigned int)result); spin_unlock_irqrestore(&stat_lock, flags); } @@ -264,7 +266,7 @@ void update_cc_stat( unsigned int phase, unsigned int elapsed_cycles) { - update_db(&(stat_cc_db[op_type][phase]), elapsed_cycles); + update_db(&stat_cc_db[op_type][phase], elapsed_cycles); } void display_all_stat_db(void) @@ -277,7 +279,7 @@ void display_all_stat_db(void) #endif /*CC_CYCLE_COUNT*/ static ssize_t ssi_sys_regdump_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct ssi_drvdata *drvdata = sys_get_drvdata(); u32 register_value; @@ -285,20 +287,20 @@ static ssize_t ssi_sys_regdump_show(struct kobject *kobj, int offset = 0; register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE)); - offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value); register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR)); - offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value); register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN)); - offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value); register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR)); - offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value); register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT)); - offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value); return offset; } static ssize_t ssi_sys_help_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { char *help_str[] = { "cat reg_dump ", "Print several of CC register values", @@ -357,8 +359,8 @@ static struct ssi_drvdata *sys_get_drvdata(void) } static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata, - struct kobject *parent_dir_kobj, const char *dir_name, - struct kobj_attribute *attrs, u32 num_of_attrs) + struct kobject *parent_dir_kobj, const char *dir_name, + struct kobj_attribute *attrs, u32 num_of_attrs) { int i; @@ -375,7 +377,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata, /* allocate memory for directory's attributes list */ sys_dir->sys_dir_attr_list = kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1), - GFP_KERNEL); + GFP_KERNEL); if (!(sys_dir->sys_dir_attr_list)) { kobject_put(sys_dir->sys_dir_kobj); @@ -386,7 +388,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata, /* initialize attributes list */ for (i = 0; i < num_of_attrs; ++i) - sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr); + sys_dir->sys_dir_attr_list[i] = &attrs[i].attr; /* last list entry should be NULL */ sys_dir->sys_dir_attr_list[num_of_attrs] = NULL; @@ -394,7 +396,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata, sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list; return sysfs_create_group(sys_dir->sys_dir_kobj, - &(sys_dir->sys_dir_attr_group)); + &sys_dir->sys_dir_attr_group); } static void sys_free_dir(struct sys_dir *sys_dir) @@ -421,9 +423,9 @@ int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata) SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name); /* Initialize top directory */ - retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, - "cc_info", ssi_sys_top_level_attrs, - ARRAY_SIZE(ssi_sys_top_level_attrs)); + retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info", + ssi_sys_top_level_attrs, + ARRAY_SIZE(ssi_sys_top_level_attrs)); return retval; } diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c index 8e9b30b26810..b455ff6714eb 100644 --- a/drivers/staging/comedi/comedi_buf.c +++ b/drivers/staging/comedi/comedi_buf.c @@ -165,7 +165,7 @@ int comedi_buf_map_put(struct comedi_buf_map *bm) int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset, void *buf, int len, int write) { - unsigned int pgoff = offset & ~PAGE_MASK; + unsigned int pgoff = offset_in_page(offset); unsigned long pg = offset >> PAGE_SHIFT; int done = 0; diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 2f7bfc1c59e5..398347fedc47 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1962,7 +1962,8 @@ static unsigned int ni_timer_to_ns(const struct comedi_device *dev, int timer) static void ni_cmd_set_mite_transfer(struct mite_ring *ring, struct comedi_subdevice *sdev, const struct comedi_cmd *cmd, - unsigned int max_count) { + unsigned int max_count) +{ #ifdef PCIDMA unsigned int nbytes = max_count; diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c index 8935a97ec048..a5d7c87557f8 100644 --- a/drivers/staging/fbtft/fb_st7789v.c +++ b/drivers/staging/fbtft/fb_st7789v.c @@ -189,7 +189,7 @@ static int set_gamma(struct fbtft_par *par, u32 *curves) * The masks are the same for both positive and negative voltage * gamma curves. */ - const u8 gamma_par_mask[] = { + static const u8 gamma_par_mask[] = { 0xFF, /* V63[3:0], V0[3:0]*/ 0x3F, /* V1[5:0] */ 0x3F, /* V2[5:0] */ diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index b742ee786615..6d0363deba61 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -84,7 +84,7 @@ static unsigned long fbtft_request_gpios_match(struct fbtft_par *par, const struct fbtft_gpio *gpio) { int ret; - long val; + unsigned int val; fbtft_par_dbg(DEBUG_REQUEST_GPIOS_MATCH, par, "%s('%s')\n", __func__, gpio->name); @@ -108,7 +108,7 @@ static unsigned long fbtft_request_gpios_match(struct fbtft_par *par, par->gpio.latch = gpio->gpio; return GPIOF_OUT_INIT_LOW; } else if (gpio->name[0] == 'd' && gpio->name[1] == 'b') { - ret = kstrtol(&gpio->name[2], 10, &val); + ret = kstrtouint(&gpio->name[2], 10, &val); if (ret == 0 && val < 16) { par->gpio.db[val] = gpio->gpio; return GPIOF_OUT_INIT_LOW; diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig index 730fd6d4db33..dfff675b3055 100644 --- a/drivers/staging/fsl-dpaa2/Kconfig +++ b/drivers/staging/fsl-dpaa2/Kconfig @@ -4,7 +4,7 @@ config FSL_DPAA2 bool "Freescale DPAA2 devices" - depends on FSL_MC_BUS + depends on FSL_MC_BUS && ARCH_LAYERSCAPE ---help--- Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index b9a0a315e6fb..26017fe9df93 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -616,7 +616,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) free_tx_fd(priv, &fd, NULL); } else { percpu_stats->tx_packets++; - percpu_stats->tx_bytes += skb->len; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); } return NETDEV_TX_OK; @@ -656,7 +656,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) && !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); if (net_ratelimit()) - netdev_dbg(priv->net_dev, "TX frame FD error: %x08\n", + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", fd_errors); } @@ -670,7 +670,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, percpu_stats->tx_errors++; if (has_fas_errors && net_ratelimit()) - netdev_dbg(priv->net_dev, "TX frame FAS error: %x08\n", + netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n", status & DPAA2_FAS_TX_ERR_MASK); } diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c index 5312edc26f01..031179ab3a22 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c @@ -217,8 +217,6 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, case 2: num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); break; - default: - break; } for (k = 0; k < num_cnt; k++) *(data + i++) = dpni_stats.raw.counter[k]; diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index a10aaf03f314..1af8d1d23fd9 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig @@ -8,7 +8,7 @@ config FSL_MC_BUS bool "QorIQ DPAA2 fsl-mc bus driver" - depends on OF && ARCH_LAYERSCAPE + depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST) select GENERIC_MSI_IRQ_DOMAIN help Driver to enable the bus infrastructure for the QorIQ DPAA2 @@ -18,7 +18,7 @@ config FSL_MC_BUS config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" - depends on FSL_MC_BUS + depends on FSL_MC_BUS && ARCH_LAYERSCAPE help Driver for the DPAA2 DPIO object. A DPIO provides queue and buffer management facilities for software to interact with diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c index 7988612aaecf..163bdac6b051 100644 --- a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c +++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c @@ -136,18 +136,18 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm, u8 epm, int sd, int sp, int se, int dp, int de, int ep) { - return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT | - est << SWP_CFG_EST_SHIFT | - wn << SWP_CFG_WN_SHIFT | - rpm << SWP_CFG_RPM_SHIFT | - dcm << SWP_CFG_DCM_SHIFT | - epm << SWP_CFG_EPM_SHIFT | - sd << SWP_CFG_SD_SHIFT | - sp << SWP_CFG_SP_SHIFT | - se << SWP_CFG_SE_SHIFT | - dp << SWP_CFG_DP_SHIFT | - de << SWP_CFG_DE_SHIFT | - ep << SWP_CFG_EP_SHIFT); + return (max_fill << SWP_CFG_DQRR_MF_SHIFT | + est << SWP_CFG_EST_SHIFT | + wn << SWP_CFG_WN_SHIFT | + rpm << SWP_CFG_RPM_SHIFT | + dcm << SWP_CFG_DCM_SHIFT | + epm << SWP_CFG_EPM_SHIFT | + sd << SWP_CFG_SD_SHIFT | + sp << SWP_CFG_SP_SHIFT | + se << SWP_CFG_SE_SHIFT | + dp << SWP_CFG_DP_SHIFT | + de << SWP_CFG_DE_SHIFT | + ep << SWP_CFG_EP_SHIFT); } /** diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c index 19606e8d25dd..409f2b9e70ff 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c @@ -757,8 +757,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) error = of_address_to_resource(pdev->dev.of_node, 0, &res); if (error < 0) { dev_err(&pdev->dev, - "of_address_to_resource() failed for %s\n", - pdev->dev.of_node->full_name); + "of_address_to_resource() failed for %pOF\n", + pdev->dev.of_node); return error; } diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c index c04a2f2b3409..038da4d1ebd0 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c @@ -11,13 +11,13 @@ #include <linux/of_device.h> #include <linux/of_address.h> -#include <linux/irqchip/arm-gic-v3.h> #include <linux/of_irq.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include "fsl-mc-private.h" +#ifdef GENERIC_MSI_DOMAIN_OPS /* * Generate a unique ID identifying the interrupt (only used within the MSI * irqdomain. Combine the icid with the interrupt index. @@ -39,6 +39,9 @@ static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg, arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev), desc); } +#else +#define fsl_mc_msi_set_desc NULL +#endif static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info) { @@ -183,8 +186,8 @@ int fsl_mc_find_msi_domain(struct device *mc_platform_dev, msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node, DOMAIN_BUS_FSL_MC_MSI); if (!msi_domain) { - pr_err("Unable to find fsl-mc MSI domain for %s\n", - mc_of_node->full_name); + pr_err("Unable to find fsl-mc MSI domain for %pOF\n", + mc_of_node); return -ENOENT; } diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c index 865d38517508..123e4af58408 100644 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c @@ -11,7 +11,6 @@ #include <linux/of_device.h> #include <linux/of_address.h> -#include <linux/irqchip/arm-gic-v3.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/of.h> @@ -46,7 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, * NOTE: This device id corresponds to the IOMMU stream ID * associated with the DPRC object (ICID). */ +#ifdef GENERIC_MSI_DOMAIN_OPS info->scratchpad[0].ul = mc_bus_dev->icid; +#endif msi_info = msi_get_domain_info(msi_domain->parent); return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); } @@ -79,8 +80,7 @@ int __init its_fsl_mc_msi_init(void) parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); if (!parent || !msi_get_domain_info(parent)) { - pr_err("%s: unable to locate ITS domain\n", - np->full_name); + pr_err("%pOF: unable to locate ITS domain\n", np); continue; } @@ -89,15 +89,14 @@ int __init its_fsl_mc_msi_init(void) &its_fsl_mc_msi_domain_info, parent); if (!mc_msi_domain) { - pr_err("%s: unable to create fsl-mc domain\n", - np->full_name); + pr_err("%pOF: unable to create fsl-mc domain\n", np); continue; } WARN_ON(mc_msi_domain->host_data != &its_fsl_mc_msi_domain_info); - pr_info("fsl-mc MSI: %s domain created\n", np->full_name); + pr_info("fsl-mc MSI: %pOF domain created\n", np); } return 0; diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c index 35221a17858b..f65c23ce83f1 100644 --- a/drivers/staging/fsl-mc/bus/mc-io.c +++ b/drivers/staging/fsl-mc/bus/mc-io.c @@ -129,8 +129,8 @@ int __must_check fsl_create_mc_io(struct device *dev, "mc_portal"); if (!res) { dev_err(dev, - "devm_request_mem_region failed for MC portal %#llx\n", - mc_portal_phys_addr); + "devm_request_mem_region failed for MC portal %pa\n", + &mc_portal_phys_addr); return -EBUSY; } @@ -139,8 +139,8 @@ int __must_check fsl_create_mc_io(struct device *dev, mc_portal_size); if (!mc_portal_virt_addr) { dev_err(dev, - "devm_ioremap_nocache failed for MC portal %#llx\n", - mc_portal_phys_addr); + "devm_ioremap_nocache failed for MC portal %pa\n", + &mc_portal_phys_addr); return -ENXIO; } @@ -242,8 +242,7 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, goto error_cleanup_resource; mc_portal_phys_addr = dpmcp_dev->regions[0].start; - mc_portal_size = dpmcp_dev->regions[0].end - - dpmcp_dev->regions[0].start + 1; + mc_portal_size = resource_size(dpmcp_dev->regions); if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size)) goto error_cleanup_resource; diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c index a1704c3a6a78..7ce105bd3977 100644 --- a/drivers/staging/fsl-mc/bus/mc-sys.c +++ b/drivers/staging/fsl-mc/bus/mc-sys.c @@ -37,6 +37,7 @@ #include <linux/ioport.h> #include <linux/device.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include "../include/mc.h" #include "dpmcp.h" @@ -126,11 +127,15 @@ static inline void mc_write_command(struct mc_command __iomem *portal, /* copy command parameters into the portal */ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) - __raw_writeq(cmd->params[i], &portal->params[i]); - __iowmb(); + /* + * Data is already in the expected LE byte-order. Do an + * extra LE -> CPU conversion so that the CPU -> LE done in + * the device io write api puts it back in the right order. + */ + writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]); /* submit the command by writing the header */ - __raw_writeq(cmd->header, &portal->header); + writeq(le64_to_cpu(cmd->header), &portal->header); } /** @@ -150,17 +155,20 @@ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * enum mc_cmd_status status; /* Copy command response header from MC portal: */ - __iormb(); - resp->header = __raw_readq(&portal->header); - __iormb(); + resp->header = cpu_to_le64(readq_relaxed(&portal->header)); status = mc_cmd_hdr_read_status(resp); if (status != MC_CMD_STATUS_OK) return status; /* Copy command response data from MC portal: */ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) - resp->params[i] = __raw_readq(&portal->params[i]); - __iormb(); + /* + * Data is expected to be in LE byte-order. Do an + * extra CPU -> LE to revert the LE -> CPU done in + * the device io read api. + */ + resp->params[i] = + cpu_to_le64(readq_relaxed(&portal->params[i])); return status; } @@ -198,8 +206,8 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, if (time_after_eq(jiffies, jiffies_until_timeout)) { dev_dbg(mc_io->dev, - "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n", - mc_io->portal_phys_addr, + "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n", + &mc_io->portal_phys_addr, (unsigned int)mc_cmd_hdr_read_token(cmd), (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); @@ -238,8 +246,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; if (timeout_usecs == 0) { dev_dbg(mc_io->dev, - "MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n", - mc_io->portal_phys_addr, + "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n", + &mc_io->portal_phys_addr, (unsigned int)mc_cmd_hdr_read_token(cmd), (unsigned int)mc_cmd_hdr_read_cmdid(cmd)); @@ -292,8 +300,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) if (status != MC_CMD_STATUS_OK) { dev_dbg(mc_io->dev, - "MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n", - mc_io->portal_phys_addr, + "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n", + &mc_io->portal_phys_addr, (unsigned int)mc_cmd_hdr_read_token(cmd), (unsigned int)mc_cmd_hdr_read_cmdid(cmd), mc_status_to_string(status), diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h index 002829cecd75..c5646096c5d4 100644 --- a/drivers/staging/fsl-mc/include/dpaa2-io.h +++ b/drivers/staging/fsl-mc/include/dpaa2-io.h @@ -34,6 +34,7 @@ #include <linux/types.h> #include <linux/cpumask.h> +#include <linux/irqreturn.h> #include "dpaa2-fd.h" #include "dpaa2-global.h" diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c index 8f92ff4ba4b8..e742b92bb153 100644 --- a/drivers/staging/goldfish/goldfish_nand.c +++ b/drivers/staging/goldfish/goldfish_nand.c @@ -157,8 +157,8 @@ static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, return 0; invalid_arg: - pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", - ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); + pr_err("%s: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", + __func__, ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); return -EINVAL; } @@ -189,8 +189,8 @@ static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, return 0; invalid_arg: - pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", - ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); + pr_err("%s: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", + __func__, ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); return -EINVAL; } @@ -211,8 +211,8 @@ static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, return 0; invalid_arg: - pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n", - from, len, mtd->size, mtd->writesize); + pr_err("%s: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n", + __func__, from, len, mtd->size, mtd->writesize); return -EINVAL; } @@ -233,8 +233,8 @@ static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, return 0; invalid_arg: - pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n", - to, len, mtd->size, mtd->writesize); + pr_err("%s: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n", + __func__, to, len, mtd->size, mtd->writesize); return -EINVAL; } @@ -340,8 +340,8 @@ static int goldfish_nand_init_device(struct platform_device *pdev, name); if (result != name_len) { dev_err(&pdev->dev, - "goldfish_nand_init_device failed to get dev name %d != %d\n", - result, name_len); + "%s: failed to get dev name %d != %d\n", + __func__, result, name_len); return -ENODEV; } ((char *)mtd->name)[name_len] = '\0'; diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c index eced2d26467b..4837aca41389 100644 --- a/drivers/staging/greybus/arche-platform.c +++ b/drivers/staging/greybus/arche-platform.c @@ -176,7 +176,10 @@ static irqreturn_t arche_platform_wd_irq(int irq, void *devid) arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE); } else { - /* Check we are not in middle of irq thread already */ + /* + * Check we are not in middle of irq thread + * already + */ if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_START) { arche_platform_set_wake_detect_state(arche_pdata, @@ -607,7 +610,6 @@ static int arche_platform_remove(struct platform_device *pdev) device_remove_file(&pdev->dev, &dev_attr_state); device_for_each_child(&pdev->dev, NULL, arche_remove_child); arche_platform_poweroff_seq(arche_pdata); - platform_set_drvdata(pdev, NULL); if (usb3613_hub_mode_ctrl(false)) dev_warn(arche_pdata->dev, "failed to control hub device\n"); @@ -657,12 +659,14 @@ static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops, arche_platform_resume); static const struct of_device_id arche_platform_of_match[] = { - { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */ + /* Use PID/VID of SVC device */ + { .compatible = "google,arche-platform", }, { }, }; static const struct of_device_id arche_combined_id[] = { - { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */ + /* Use PID/VID of SVC device */ + { .compatible = "google,arche-platform", }, { .compatible = "usbffff,2", }, { }, }; diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c index a4fd51632232..71e5cc234e78 100644 --- a/drivers/staging/greybus/interface.c +++ b/drivers/staging/greybus/interface.c @@ -47,7 +47,7 @@ static int gb_interface_hibernate_link(struct gb_interface *intf); static int gb_interface_refclk_set(struct gb_interface *intf, bool enable); static int gb_interface_dme_attr_get(struct gb_interface *intf, - u16 attr, u32 *val) + u16 attr, u32 *val) { return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, attr, DME_SELECTOR_INDEX_NULL, val); @@ -64,7 +64,7 @@ static int gb_interface_read_ara_dme(struct gb_interface *intf) */ if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) { dev_err(&intf->dev, "unknown manufacturer %08x\n", - intf->ddbl1_manufacturer_id); + intf->ddbl1_manufacturer_id); return -ENODEV; } @@ -110,7 +110,7 @@ static int gb_interface_read_dme(struct gb_interface *intf) return ret; if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID && - intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { + intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) { intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS; intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS; } @@ -144,7 +144,7 @@ static int gb_interface_route_create(struct gb_interface *intf) ret = gb_svc_intf_device_id(svc, intf_id, device_id); if (ret) { dev_err(&intf->dev, "failed to set device id %u: %d\n", - device_id, ret); + device_id, ret); goto err_ida_remove; } @@ -205,21 +205,21 @@ static int gb_interface_legacy_mode_switch(struct gb_interface *intf) } void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, - u32 mailbox) + u32 mailbox) { mutex_lock(&intf->mutex); if (result) { dev_warn(&intf->dev, - "mailbox event with UniPro error: 0x%04x\n", - result); + "mailbox event with UniPro error: 0x%04x\n", + result); goto err_disable; } if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) { dev_warn(&intf->dev, - "mailbox event with unexpected value: 0x%08x\n", - mailbox); + "mailbox event with unexpected value: 0x%08x\n", + mailbox); goto err_disable; } @@ -230,7 +230,7 @@ void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, if (!intf->mode_switch) { dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n", - mailbox); + mailbox); goto err_disable; } @@ -299,7 +299,7 @@ static void gb_interface_mode_switch_work(struct work_struct *work) ret = gb_interface_enable(intf); if (ret) { dev_err(&intf->dev, "failed to re-enable interface: %d\n", - ret); + ret); gb_interface_deactivate(intf); } } @@ -619,7 +619,7 @@ static struct attribute *interface_common_attrs[] = { }; static umode_t interface_unipro_is_visible(struct kobject *kobj, - struct attribute *attr, int n) + struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct gb_interface *intf = to_gb_interface(dev); @@ -634,7 +634,7 @@ static umode_t interface_unipro_is_visible(struct kobject *kobj, } static umode_t interface_greybus_is_visible(struct kobject *kobj, - struct attribute *attr, int n) + struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct gb_interface *intf = to_gb_interface(dev); @@ -648,7 +648,7 @@ static umode_t interface_greybus_is_visible(struct kobject *kobj, } static umode_t interface_power_is_visible(struct kobject *kobj, - struct attribute *attr, int n) + struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct gb_interface *intf = to_gb_interface(dev); @@ -813,7 +813,7 @@ struct gb_interface *gb_interface_create(struct gb_module *module, intf->dev.dma_mask = module->dev.dma_mask; device_initialize(&intf->dev); dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev), - interface_id); + interface_id); pm_runtime_set_autosuspend_delay(&intf->dev, GB_INTERFACE_AUTOSUSPEND_MS); @@ -1083,7 +1083,7 @@ int gb_interface_enable(struct gb_interface *intf) control = gb_control_create(intf); if (IS_ERR(control)) { dev_err(&intf->dev, "failed to create control device: %ld\n", - PTR_ERR(control)); + PTR_ERR(control)); return PTR_ERR(control); } intf->control = control; @@ -1228,17 +1228,17 @@ int gb_interface_add(struct gb_interface *intf) trace_gb_interface_add(intf); dev_info(&intf->dev, "Interface added (%s)\n", - gb_interface_type_string(intf)); + gb_interface_type_string(intf)); switch (intf->type) { case GB_INTERFACE_TYPE_GREYBUS: dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n", - intf->vendor_id, intf->product_id); + intf->vendor_id, intf->product_id); /* fall-through */ case GB_INTERFACE_TYPE_UNIPRO: dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n", - intf->ddbl1_manufacturer_id, - intf->ddbl1_product_id); + intf->ddbl1_manufacturer_id, + intf->ddbl1_product_id); break; default: break; diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h index 566d0dde7f79..cb6092578a92 100644 --- a/drivers/staging/greybus/spilib.h +++ b/drivers/staging/greybus/spilib.h @@ -18,7 +18,8 @@ struct spilib_ops { void (*unprepare_transfer_hardware)(struct device *dev); }; -int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, struct spilib_ops *ops); +int gb_spilib_master_init(struct gb_connection *connection, + struct device *dev, struct spilib_ops *ops); void gb_spilib_master_exit(struct gb_connection *connection); #endif /* __SPILIB_H */ diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c index 32a43693181c..d86bcce53e6b 100644 --- a/drivers/staging/greybus/tools/loopback_test.c +++ b/drivers/staging/greybus/tools/loopback_test.c @@ -528,11 +528,11 @@ static int log_results(struct loopback_test *t) tm = *localtime(&local_time); /* - * file name will test_name_size_iteration_max.csv - * every time the same test with the same parameters is run we will then - * append to the same CSV with datestamp - representing each test - * dataset. - */ + * file name will test_name_size_iteration_max.csv + * every time the same test with the same parameters is run we will then + * append to the same CSV with datestamp - representing each test + * dataset. + */ if (t->file_output && !t->porcelain) { snprintf(file_name, sizeof(file_name), "%s_%d_%d.csv", t->test_name, t->size, t->iteration_max); @@ -779,7 +779,8 @@ static void prepare_devices(struct loopback_test *t) { int i; - /* Cancel any running tests on enabled devices. If + /* + * Cancel any running tests on enabled devices. If * stop_all option is given, stop test on all devices. */ for (i = 0; i < t->device_count; i++) diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c index ccadda084b76..f93a76d02de6 100644 --- a/drivers/staging/greybus/usb.c +++ b/drivers/staging/greybus/usb.c @@ -139,7 +139,7 @@ out: return ret; } -static struct hc_driver usb_gb_hc_driver = { +static const struct hc_driver usb_gb_hc_driver = { .description = "greybus-hcd", .product_desc = "Greybus USB Host Controller", .hcd_priv_size = sizeof(struct gb_usb_device), diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c index 19b550fff04b..bcbdc7340b55 100644 --- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c +++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c @@ -23,6 +23,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/firmware.h> +#include <asm/unaligned.h> #include "gs_fpgaboot.h" #include "io.h" @@ -41,16 +42,16 @@ static char *file = "xlinx_fpga_firmware.bit"; module_param(file, charp, 0444); MODULE_PARM_DESC(file, "Xilinx FPGA firmware file."); -static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize) +static void read_bitstream(u8 *bitdata, u8 *buf, int *offset, int rdsize) { memcpy(buf, bitdata + *offset, rdsize); *offset += rdsize; } -static void readinfo_bitstream(char *bitdata, char *buf, int *offset) +static int readinfo_bitstream(u8 *bitdata, u8 *buf, int size, int *offset) { - char tbuf[64]; - s32 len; + u8 tbuf[2]; + u16 len; /* read section char */ read_bitstream(bitdata, tbuf, offset, 1); @@ -58,18 +59,24 @@ static void readinfo_bitstream(char *bitdata, char *buf, int *offset) /* read length */ read_bitstream(bitdata, tbuf, offset, 2); - len = tbuf[0] << 8 | tbuf[1]; + len = get_unaligned_be16(tbuf); + if (len >= size) { + pr_err("error: readinfo buffer too small\n"); + return -EINVAL; + } read_bitstream(bitdata, buf, offset, len); buf[len] = '\0'; + + return 0; } /* * read bitdata length */ -static int readlength_bitstream(char *bitdata, int *lendata, int *offset) +static int readlength_bitstream(u8 *bitdata, int *lendata, int *offset) { - char tbuf[64]; + u8 tbuf[4]; /* read section char */ read_bitstream(bitdata, tbuf, offset, 1); @@ -77,14 +84,13 @@ static int readlength_bitstream(char *bitdata, int *lendata, int *offset) /* make sure it is section 'e' */ if (tbuf[0] != 'e') { pr_err("error: length section is not 'e', but %c\n", tbuf[0]); - return -1; + return -EINVAL; } /* read 4bytes length */ read_bitstream(bitdata, tbuf, offset, 4); - *lendata = tbuf[0] << 24 | tbuf[1] << 16 | - tbuf[2] << 8 | tbuf[3]; + *lendata = get_unaligned_be32(tbuf); return 0; } @@ -92,16 +98,16 @@ static int readlength_bitstream(char *bitdata, int *lendata, int *offset) /* * read first 13 bytes to check bitstream magic number */ -static int readmagic_bitstream(char *bitdata, int *offset) +static int readmagic_bitstream(u8 *bitdata, int *offset) { - char buf[13]; + u8 buf[13]; int r; read_bitstream(bitdata, buf, offset, 13); r = memcmp(buf, bits_magic, 13); if (r) { pr_err("error: corrupted header"); - return -1; + return -EINVAL; } pr_info("bitstream file magic number Ok\n"); @@ -113,7 +119,7 @@ static int readmagic_bitstream(char *bitdata, int *offset) /* * NOTE: supports only bitstream format */ -static enum fmt_image get_imageformat(struct fpgaimage *fimage) +static enum fmt_image get_imageformat(void) { return f_bit; } @@ -127,38 +133,58 @@ static void gs_print_header(struct fpgaimage *fimage) pr_info("lendata: %d\n", fimage->lendata); } -static void gs_read_bitstream(struct fpgaimage *fimage) +static int gs_read_bitstream(struct fpgaimage *fimage) { - char *bitdata; + u8 *bitdata; int offset; + int err; offset = 0; - bitdata = (char *)fimage->fw_entry->data; + bitdata = (u8 *)fimage->fw_entry->data; + + err = readmagic_bitstream(bitdata, &offset); + if (err) + return err; + + err = readinfo_bitstream(bitdata, fimage->filename, MAX_STR, &offset); + if (err) + return err; + err = readinfo_bitstream(bitdata, fimage->part, MAX_STR, &offset); + if (err) + return err; + err = readinfo_bitstream(bitdata, fimage->date, MAX_STR, &offset); + if (err) + return err; + err = readinfo_bitstream(bitdata, fimage->time, MAX_STR, &offset); + if (err) + return err; - readmagic_bitstream(bitdata, &offset); - readinfo_bitstream(bitdata, fimage->filename, &offset); - readinfo_bitstream(bitdata, fimage->part, &offset); - readinfo_bitstream(bitdata, fimage->date, &offset); - readinfo_bitstream(bitdata, fimage->time, &offset); - readlength_bitstream(bitdata, &fimage->lendata, &offset); + err = readlength_bitstream(bitdata, &fimage->lendata, &offset); + if (err) + return err; fimage->fpgadata = bitdata + offset; + + return 0; } static int gs_read_image(struct fpgaimage *fimage) { int img_fmt; + int err; - img_fmt = get_imageformat(fimage); + img_fmt = get_imageformat(); switch (img_fmt) { case f_bit: pr_info("image is bitstream format\n"); - gs_read_bitstream(fimage); + err = gs_read_bitstream(fimage); + if (err) + return err; break; default: pr_err("unsupported fpga image format\n"); - return -1; + return -EINVAL; } gs_print_header(fimage); @@ -183,11 +209,11 @@ static int gs_load_image(struct fpgaimage *fimage, char *fw_file) static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes) { - char *bitdata; + u8 *bitdata; int size, i, cnt; cnt = 0; - bitdata = (char *)fimage->fpgadata; + bitdata = (u8 *)fimage->fpgadata; size = fimage->lendata; #ifdef DEBUG_FPGA @@ -197,7 +223,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes) if (!xl_supported_prog_bus_width(bus_bytes)) { pr_err("unsupported program bus width %d\n", bus_bytes); - return -1; + return -EINVAL; } /* Bring csi_b, rdwr_b Low and program_b High */ @@ -224,7 +250,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes) /* Check INIT_B */ if (xl_get_init_b() == 0) { pr_err("init_b 0\n"); - return -1; + return -EIO; } while (xl_get_done_b() == 0) { @@ -236,7 +262,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes) if (cnt > MAX_WAIT_DONE) { pr_err("fpga download fail\n"); - return -1; + return -EIO; } pr_info("download fpgaimage\n"); @@ -325,7 +351,7 @@ err_out2: err_out1: kfree(fimage); - return -1; + return err; } static int __init gs_fpgaboot_init(void) diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h index cd1eb2c4c940..986e841f6b5e 100644 --- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h +++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h @@ -47,5 +47,5 @@ struct fpgaimage { char date[MAX_STR]; char time[MAX_STR]; int lendata; - char *fpgadata; + u8 *fpgadata; }; diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c index c9391198fbfb..83a13ca7259a 100644 --- a/drivers/staging/gs_fpgaboot/io.c +++ b/drivers/staging/gs_fpgaboot/io.c @@ -9,10 +9,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index d5ab83f0236d..f85dde9805e0 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -99,9 +99,14 @@ #define AD7280A_DEVADDR_MASTER 0 #define AD7280A_DEVADDR_ALL 0x1F /* 5-bit device address is sent LSB first */ -#define AD7280A_DEVADDR(addr) (((addr & 0x1) << 4) | ((addr & 0x2) << 3) | \ - (addr & 0x4) | ((addr & 0x8) >> 3) | \ - ((addr & 0x10) >> 4)) +static unsigned int ad7280a_devaddr(unsigned int addr) +{ + return ((addr & 0x1) << 4) | + ((addr & 0x2) << 3) | + (addr & 0x4) | + ((addr & 0x8) >> 3) | + ((addr & 0x10) >> 4); +} /* During a read a valid write is mandatory. * So writing to the highest available address (Address 0x1F) @@ -372,7 +377,7 @@ static int ad7280_chain_setup(struct ad7280_state *st) if (ad7280_check_crc(st, val)) return -EIO; - if (n != AD7280A_DEVADDR(val >> 27)) + if (n != ad7280a_devaddr(val >> 27)) return -EIO; } @@ -511,7 +516,7 @@ static int ad7280_channel_init(struct ad7280_state *st) st->channels[cnt].info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE); st->channels[cnt].address = - AD7280A_DEVADDR(dev) << 8 | ch; + ad7280a_devaddr(dev) << 8 | ch; st->channels[cnt].scan_index = cnt; st->channels[cnt].scan_type.sign = 'u'; st->channels[cnt].scan_type.realbits = 12; @@ -558,7 +563,7 @@ static int ad7280_attr_init(struct ad7280_state *st) for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6; ch++, cnt++) { st->iio_attr[cnt].address = - AD7280A_DEVADDR(dev) << 8 | ch; + ad7280a_devaddr(dev) << 8 | ch; st->iio_attr[cnt].dev_attr.attr.mode = 0644; st->iio_attr[cnt].dev_attr.show = @@ -574,7 +579,7 @@ static int ad7280_attr_init(struct ad7280_state *st) &st->iio_attr[cnt].dev_attr.attr; cnt++; st->iio_attr[cnt].address = - AD7280A_DEVADDR(dev) << 8 | + ad7280a_devaddr(dev) << 8 | (AD7280A_CB1_TIMER + ch); st->iio_attr[cnt].dev_attr.attr.mode = 0644; @@ -918,7 +923,7 @@ static int ad7280_probe(struct spi_device *spi) if (ret) goto error_unregister; - ret = ad7280_write(st, AD7280A_DEVADDR(st->slave_num), + ret = ad7280_write(st, ad7280a_devaddr(st->slave_num), AD7280A_ALERT, 0, AD7280A_ALERT_GEN_STATIC_HIGH | (pdata->chain_last_alert_ignore & 0xF)); diff --git a/drivers/staging/iio/light/tsl2x7x.c b/drivers/staging/iio/light/tsl2x7x.c index 146719928fb3..786e93f16ce9 100644 --- a/drivers/staging/iio/light/tsl2x7x.c +++ b/drivers/staging/iio/light/tsl2x7x.c @@ -285,35 +285,6 @@ static const u8 device_channel_config[] = { }; /** - * tsl2x7x_i2c_read() - Read a byte from a register. - * @client: i2c client - * @reg: device register to read from - * @*val: pointer to location to store register contents. - * - */ -static int -tsl2x7x_i2c_read(struct i2c_client *client, u8 reg, u8 *val) -{ - int ret; - - /* select register to write */ - ret = i2c_smbus_write_byte(client, (TSL2X7X_CMD_REG | reg)); - if (ret < 0) { - dev_err(&client->dev, "failed to write register %x\n", reg); - return ret; - } - - /* read the data */ - ret = i2c_smbus_read_byte(client); - if (ret >= 0) - *val = (u8)ret; - else - dev_err(&client->dev, "failed to read register %x\n", reg); - - return ret; -} - -/** * tsl2x7x_get_lux() - Reads and calculates current lux value. * @indio_dev: pointer to IIO device * @@ -352,15 +323,15 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev) goto out_unlock; } - ret = tsl2x7x_i2c_read(chip->client, - (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]); + ret = i2c_smbus_read_byte_data(chip->client, + TSL2X7X_CMD_REG | TSL2X7X_STATUS); if (ret < 0) { dev_err(&chip->client->dev, "%s: Failed to read STATUS Reg\n", __func__); goto out_unlock; } /* is data new & valid */ - if (!(buf[0] & TSL2X7X_STA_ADC_VALID)) { + if (!(ret & TSL2X7X_STA_ADC_VALID)) { dev_err(&chip->client->dev, "%s: data not valid yet\n", __func__); ret = chip->als_cur_info.lux; /* return LAST VALUE */ @@ -368,14 +339,16 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev) } for (i = 0; i < 4; i++) { - ret = tsl2x7x_i2c_read(chip->client, - (TSL2X7X_CMD_REG | - (TSL2X7X_ALS_CHAN0LO + i)), &buf[i]); + int reg = TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i); + + ret = i2c_smbus_read_byte_data(chip->client, reg); if (ret < 0) { dev_err(&chip->client->dev, "failed to read. err=%x\n", ret); goto out_unlock; } + + buf[i] = ret; } /* clear any existing interrupt status */ @@ -475,7 +448,6 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev) { int i; int ret; - u8 status; u8 chdata[2]; struct tsl2X7X_chip *chip = iio_priv(indio_dev); @@ -485,8 +457,8 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev) return -EBUSY; } - ret = tsl2x7x_i2c_read(chip->client, - (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status); + ret = i2c_smbus_read_byte_data(chip->client, + TSL2X7X_CMD_REG | TSL2X7X_STATUS); if (ret < 0) { dev_err(&chip->client->dev, "i2c err=%d\n", ret); goto prox_poll_err; @@ -498,7 +470,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev) case tmd2671: case tsl2771: case tmd2771: - if (!(status & TSL2X7X_STA_ADC_VALID)) + if (!(ret & TSL2X7X_STA_ADC_VALID)) goto prox_poll_err; break; case tsl2572: @@ -506,17 +478,19 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev) case tmd2672: case tsl2772: case tmd2772: - if (!(status & TSL2X7X_STA_PRX_VALID)) + if (!(ret & TSL2X7X_STA_PRX_VALID)) goto prox_poll_err; break; } for (i = 0; i < 2; i++) { - ret = tsl2x7x_i2c_read(chip->client, - (TSL2X7X_CMD_REG | - (TSL2X7X_PRX_LO + i)), &chdata[i]); + int reg = TSL2X7X_CMD_REG | (TSL2X7X_PRX_LO + i); + + ret = i2c_smbus_read_byte_data(chip->client, reg); if (ret < 0) goto prox_poll_err; + + chdata[i] = ret; } chip->prox_data = @@ -568,39 +542,29 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip) static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev) { struct tsl2X7X_chip *chip = iio_priv(indio_dev); - u8 reg_val; int gain_trim_val; int ret; int lux_val; - ret = i2c_smbus_write_byte(chip->client, - (TSL2X7X_CMD_REG | TSL2X7X_CNTRL)); + ret = i2c_smbus_read_byte_data(chip->client, + TSL2X7X_CMD_REG | TSL2X7X_CNTRL); if (ret < 0) { dev_err(&chip->client->dev, - "failed to write CNTRL register, ret=%d\n", ret); + "%s: failed to read from the CNTRL register\n", + __func__); return ret; } - reg_val = i2c_smbus_read_byte(chip->client); - if ((reg_val & (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) - != (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) { - dev_err(&chip->client->dev, - "%s: failed: ADC not enabled\n", __func__); - return -1; - } - - ret = i2c_smbus_write_byte(chip->client, - (TSL2X7X_CMD_REG | TSL2X7X_CNTRL)); - if (ret < 0) { + if ((ret & (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) + != (TSL2X7X_CNTL_ADC_ENBL | TSL2X7X_CNTL_PWR_ON)) { dev_err(&chip->client->dev, - "failed to write ctrl reg: ret=%d\n", ret); - return ret; - } - - reg_val = i2c_smbus_read_byte(chip->client); - if ((reg_val & TSL2X7X_STA_ADC_VALID) != TSL2X7X_STA_ADC_VALID) { + "%s: Device is not powered on and/or ADC is not enabled\n", + __func__); + return -EINVAL; + } else if ((ret & TSL2X7X_STA_ADC_VALID) != TSL2X7X_STA_ADC_VALID) { dev_err(&chip->client->dev, - "%s: failed: STATUS - ADC not valid.\n", __func__); + "%s: The two ADC channels have not completed an integration cycle\n", + __func__); return -ENODATA; } @@ -722,7 +686,8 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev) } } - mdelay(3); /* Power-on settling time */ + /* Power-on settling time */ + usleep_range(3000, 3500); /* * NOW enable the ADC @@ -806,22 +771,24 @@ int tsl2x7x_invoke_change(struct iio_dev *indio_dev) { struct tsl2X7X_chip *chip = iio_priv(indio_dev); int device_status = chip->tsl2x7x_chip_status; + int ret; mutex_lock(&chip->als_mutex); mutex_lock(&chip->prox_mutex); - if (device_status == TSL2X7X_CHIP_WORKING) - tsl2x7x_chip_off(indio_dev); - - tsl2x7x_chip_on(indio_dev); + if (device_status == TSL2X7X_CHIP_WORKING) { + ret = tsl2x7x_chip_off(indio_dev); + if (ret < 0) + goto unlock; + } - if (device_status != TSL2X7X_CHIP_WORKING) - tsl2x7x_chip_off(indio_dev); + ret = tsl2x7x_chip_on(indio_dev); +unlock: mutex_unlock(&chip->prox_mutex); mutex_unlock(&chip->als_mutex); - return 0; + return ret; } static @@ -889,7 +856,7 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev) /*gather the samples*/ for (i = 0; i < chip->tsl2x7x_settings.prox_max_samples_cal; i++) { - mdelay(15); + usleep_range(15000, 17500); tsl2x7x_get_prox(indio_dev); prox_history[i] = chip->prox_data; dev_info(&chip->client->dev, "2 i=%d prox data= %d\n", @@ -915,33 +882,6 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev) tsl2x7x_chip_on(indio_dev); } -static ssize_t power_state_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev)); - - return snprintf(buf, PAGE_SIZE, "%d\n", chip->tsl2x7x_chip_status); -} - -static ssize_t power_state_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - bool value; - - if (strtobool(buf, &value)) - return -EINVAL; - - if (value) - tsl2x7x_chip_on(indio_dev); - else - tsl2x7x_chip_off(indio_dev); - - return len; -} - static ssize_t in_illuminance0_calibscale_available_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1027,6 +967,7 @@ static ssize_t in_illuminance0_target_input_store(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2X7X_chip *chip = iio_priv(indio_dev); unsigned long value; + int ret; if (kstrtoul(buf, 0, &value)) return -EINVAL; @@ -1034,7 +975,9 @@ static ssize_t in_illuminance0_target_input_store(struct device *dev, if (value) chip->tsl2x7x_settings.als_cal_target = value; - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; return len; } @@ -1083,7 +1026,9 @@ static ssize_t in_intensity0_thresh_period_store(struct device *dev, dev_info(&chip->client->dev, "%s: als persistence = %d", __func__, filter_delay); - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; return IIO_VAL_INT_PLUS_MICRO; } @@ -1131,7 +1076,10 @@ static ssize_t in_proximity0_thresh_period_store(struct device *dev, dev_info(&chip->client->dev, "%s: prox persistence = %d", __func__, filter_delay); - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; + return IIO_VAL_INT_PLUS_MICRO; } @@ -1142,6 +1090,7 @@ static ssize_t in_illuminance0_calibrate_store(struct device *dev, { struct iio_dev *indio_dev = dev_to_iio_dev(dev); bool value; + int ret; if (strtobool(buf, &value)) return -EINVAL; @@ -1149,7 +1098,9 @@ static ssize_t in_illuminance0_calibrate_store(struct device *dev, if (value) tsl2x7x_als_calibrate(indio_dev); - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; return len; } @@ -1189,7 +1140,7 @@ static ssize_t in_illuminance0_lux_table_store(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2X7X_chip *chip = iio_priv(indio_dev); int value[ARRAY_SIZE(chip->tsl2x7x_device_lux) * 3 + 1]; - int n; + int n, ret; get_options(buf, ARRAY_SIZE(value), value); @@ -1217,7 +1168,9 @@ static ssize_t in_illuminance0_lux_table_store(struct device *dev, memset(chip->tsl2x7x_device_lux, 0, sizeof(chip->tsl2x7x_device_lux)); memcpy(chip->tsl2x7x_device_lux, &value[1], (value[0] * 4)); - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; return len; } @@ -1228,6 +1181,7 @@ static ssize_t in_proximity0_calibrate_store(struct device *dev, { struct iio_dev *indio_dev = dev_to_iio_dev(dev); bool value; + int ret; if (strtobool(buf, &value)) return -EINVAL; @@ -1235,7 +1189,9 @@ static ssize_t in_proximity0_calibrate_store(struct device *dev, if (value) tsl2x7x_prox_cal(indio_dev); - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; return len; } @@ -1263,6 +1219,7 @@ static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev, int val) { struct tsl2X7X_chip *chip = iio_priv(indio_dev); + int ret; if (chan->type == IIO_INTENSITY) { if (val) @@ -1276,83 +1233,108 @@ static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev, chip->tsl2x7x_settings.interrupts_en &= 0x10; } - tsl2x7x_invoke_change(indio_dev); + ret = tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; return 0; } -static int tsl2x7x_write_thresh(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir, - enum iio_event_info info, - int val, int val2) +static int tsl2x7x_write_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int val, int val2) { struct tsl2X7X_chip *chip = iio_priv(indio_dev); + int ret = -EINVAL; - if (chan->type == IIO_INTENSITY) { - switch (dir) { - case IIO_EV_DIR_RISING: - chip->tsl2x7x_settings.als_thresh_high = val; - break; - case IIO_EV_DIR_FALLING: - chip->tsl2x7x_settings.als_thresh_low = val; - break; - default: - return -EINVAL; - } - } else { - switch (dir) { - case IIO_EV_DIR_RISING: - chip->tsl2x7x_settings.prox_thres_high = val; - break; - case IIO_EV_DIR_FALLING: - chip->tsl2x7x_settings.prox_thres_low = val; - break; - default: - return -EINVAL; + switch (info) { + case IIO_EV_INFO_VALUE: + if (chan->type == IIO_INTENSITY) { + switch (dir) { + case IIO_EV_DIR_RISING: + chip->tsl2x7x_settings.als_thresh_high = val; + ret = 0; + break; + case IIO_EV_DIR_FALLING: + chip->tsl2x7x_settings.als_thresh_low = val; + ret = 0; + break; + default: + break; + } + } else { + switch (dir) { + case IIO_EV_DIR_RISING: + chip->tsl2x7x_settings.prox_thres_high = val; + ret = 0; + break; + case IIO_EV_DIR_FALLING: + chip->tsl2x7x_settings.prox_thres_low = val; + ret = 0; + break; + default: + break; + } } + break; + default: + break; } - tsl2x7x_invoke_change(indio_dev); + if (ret < 0) + return ret; - return 0; + return tsl2x7x_invoke_change(indio_dev); } -static int tsl2x7x_read_thresh(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir, - enum iio_event_info info, - int *val, int *val2) +static int tsl2x7x_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int *val, int *val2) { struct tsl2X7X_chip *chip = iio_priv(indio_dev); + int ret = -EINVAL; - if (chan->type == IIO_INTENSITY) { - switch (dir) { - case IIO_EV_DIR_RISING: - *val = chip->tsl2x7x_settings.als_thresh_high; - break; - case IIO_EV_DIR_FALLING: - *val = chip->tsl2x7x_settings.als_thresh_low; - break; - default: - return -EINVAL; - } - } else { - switch (dir) { - case IIO_EV_DIR_RISING: - *val = chip->tsl2x7x_settings.prox_thres_high; - break; - case IIO_EV_DIR_FALLING: - *val = chip->tsl2x7x_settings.prox_thres_low; - break; - default: - return -EINVAL; + switch (info) { + case IIO_EV_INFO_VALUE: + if (chan->type == IIO_INTENSITY) { + switch (dir) { + case IIO_EV_DIR_RISING: + *val = chip->tsl2x7x_settings.als_thresh_high; + ret = IIO_VAL_INT; + break; + case IIO_EV_DIR_FALLING: + *val = chip->tsl2x7x_settings.als_thresh_low; + ret = IIO_VAL_INT; + break; + default: + break; + } + } else { + switch (dir) { + case IIO_EV_DIR_RISING: + *val = chip->tsl2x7x_settings.prox_thres_high; + ret = IIO_VAL_INT; + break; + case IIO_EV_DIR_FALLING: + *val = chip->tsl2x7x_settings.prox_thres_low; + ret = IIO_VAL_INT; + break; + default: + break; + } } + break; + default: + break; } - return IIO_VAL_INT; + return ret; } static int tsl2x7x_read_raw(struct iio_dev *indio_dev, @@ -1489,13 +1471,9 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev, return -EINVAL; } - tsl2x7x_invoke_change(indio_dev); - - return 0; + return tsl2x7x_invoke_change(indio_dev); } -static DEVICE_ATTR_RW(power_state); - static DEVICE_ATTR_RO(in_proximity0_calibscale_available); static DEVICE_ATTR_RO(in_illuminance0_calibscale_available); @@ -1515,7 +1493,7 @@ static DEVICE_ATTR_RW(in_intensity0_thresh_period); static DEVICE_ATTR_RW(in_proximity0_thresh_period); /* Use the default register values to identify the Taos device */ -static int tsl2x7x_device_id(unsigned char *id, int target) +static int tsl2x7x_device_id(int *id, int target) { switch (target) { case tsl2571: @@ -1580,7 +1558,6 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private) } static struct attribute *tsl2x7x_ALS_device_attrs[] = { - &dev_attr_power_state.attr, &dev_attr_in_illuminance0_calibscale_available.attr, &dev_attr_in_illuminance0_integration_time.attr, &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr, @@ -1591,13 +1568,11 @@ static struct attribute *tsl2x7x_ALS_device_attrs[] = { }; static struct attribute *tsl2x7x_PRX_device_attrs[] = { - &dev_attr_power_state.attr, &dev_attr_in_proximity0_calibrate.attr, NULL }; static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = { - &dev_attr_power_state.attr, &dev_attr_in_illuminance0_calibscale_available.attr, &dev_attr_in_illuminance0_integration_time.attr, &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr, @@ -1609,14 +1584,12 @@ static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = { }; static struct attribute *tsl2x7x_PRX2_device_attrs[] = { - &dev_attr_power_state.attr, &dev_attr_in_proximity0_calibrate.attr, &dev_attr_in_proximity0_calibscale_available.attr, NULL }; static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = { - &dev_attr_power_state.attr, &dev_attr_in_illuminance0_calibscale_available.attr, &dev_attr_in_illuminance0_integration_time.attr, &iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr, @@ -1684,8 +1657,8 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value = &tsl2x7x_read_thresh, - .write_event_value = &tsl2x7x_write_thresh, + .read_event_value = &tsl2x7x_read_event_value, + .write_event_value = &tsl2x7x_write_event_value, .read_event_config = &tsl2x7x_read_interrupt_config, .write_event_config = &tsl2x7x_write_interrupt_config, }, @@ -1695,8 +1668,8 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value = &tsl2x7x_read_thresh, - .write_event_value = &tsl2x7x_write_thresh, + .read_event_value = &tsl2x7x_read_event_value, + .write_event_value = &tsl2x7x_write_event_value, .read_event_config = &tsl2x7x_read_interrupt_config, .write_event_config = &tsl2x7x_write_interrupt_config, }, @@ -1706,8 +1679,8 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value = &tsl2x7x_read_thresh, - .write_event_value = &tsl2x7x_write_thresh, + .read_event_value = &tsl2x7x_read_event_value, + .write_event_value = &tsl2x7x_write_event_value, .read_event_config = &tsl2x7x_read_interrupt_config, .write_event_config = &tsl2x7x_write_interrupt_config, }, @@ -1717,8 +1690,8 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value = &tsl2x7x_read_thresh, - .write_event_value = &tsl2x7x_write_thresh, + .read_event_value = &tsl2x7x_read_event_value, + .write_event_value = &tsl2x7x_write_event_value, .read_event_config = &tsl2x7x_read_interrupt_config, .write_event_config = &tsl2x7x_write_interrupt_config, }, @@ -1728,8 +1701,8 @@ static const struct iio_info tsl2X7X_device_info[] = { .driver_module = THIS_MODULE, .read_raw = &tsl2x7x_read_raw, .write_raw = &tsl2x7x_write_raw, - .read_event_value = &tsl2x7x_read_thresh, - .write_event_value = &tsl2x7x_write_thresh, + .read_event_value = &tsl2x7x_read_event_value, + .write_event_value = &tsl2x7x_write_event_value, .read_event_config = &tsl2x7x_read_interrupt_config, .write_event_config = &tsl2x7x_write_interrupt_config, }, @@ -1877,7 +1850,6 @@ static int tsl2x7x_probe(struct i2c_client *clientp, const struct i2c_device_id *id) { int ret; - unsigned char device_id; struct iio_dev *indio_dev; struct tsl2X7X_chip *chip; @@ -1889,13 +1861,13 @@ static int tsl2x7x_probe(struct i2c_client *clientp, chip->client = clientp; i2c_set_clientdata(clientp, indio_dev); - ret = tsl2x7x_i2c_read(chip->client, - TSL2X7X_CHIPID, &device_id); + ret = i2c_smbus_read_byte_data(chip->client, + TSL2X7X_CMD_REG | TSL2X7X_CHIPID); if (ret < 0) return ret; - if ((!tsl2x7x_device_id(&device_id, id->driver_data)) || - (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) { + if ((!tsl2x7x_device_id(&ret, id->driver_data)) || + (tsl2x7x_device_id(&ret, id->driver_data) == -EINVAL)) { dev_info(&chip->client->dev, "%s: i2c device found does not match expected id\n", __func__); @@ -2026,6 +1998,21 @@ static struct i2c_device_id tsl2x7x_idtable[] = { MODULE_DEVICE_TABLE(i2c, tsl2x7x_idtable); +static const struct of_device_id tsl2x7x_of_match[] = { + { .compatible = "amstaos,tsl2571" }, + { .compatible = "amstaos,tsl2671" }, + { .compatible = "amstaos,tmd2671" }, + { .compatible = "amstaos,tsl2771" }, + { .compatible = "amstaos,tmd2771" }, + { .compatible = "amstaos,tsl2572" }, + { .compatible = "amstaos,tsl2672" }, + { .compatible = "amstaos,tmd2672" }, + { .compatible = "amstaos,tsl2772" }, + { .compatible = "amstaos,tmd2772" }, + {} +}; +MODULE_DEVICE_TABLE(of, tsl2x7x_of_match); + static const struct dev_pm_ops tsl2x7x_pm_ops = { .suspend = tsl2x7x_suspend, .resume = tsl2x7x_resume, @@ -2035,6 +2022,7 @@ static const struct dev_pm_ops tsl2x7x_pm_ops = { static struct i2c_driver tsl2x7x_driver = { .driver = { .name = "tsl2x7x", + .of_match_table = tsl2x7x_of_match, .pm = &tsl2x7x_pm_ops, }, .id_table = tsl2x7x_idtable, diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index 8aa12e813bd7..0f9348ba5d84 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1356,7 +1356,7 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev, /* Add mode */ iwe.cmd = SIOCGIWMODE; - capabilities = le16_to_cpu(ap->capability); + capabilities = ap->capability; if (capabilities & (BSS_CAP_ESS | BSS_CAP_IBSS)) { if (capabilities & BSS_CAP_ESS) iwe.u.mode = IW_MODE_INFRA; diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h index 8ae7423b4543..f534115d402a 100644 --- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h +++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h @@ -453,7 +453,8 @@ extern int portal_rotor; int lnet_lib_init(void); void lnet_lib_exit(void); -int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive, unsigned long when); +int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive, + unsigned long when); void lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive, unsigned long when); int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid, diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h index 321752dfe58b..ddb808ed5d0b 100644 --- a/drivers/staging/lustre/include/linux/lnet/lib-types.h +++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h @@ -308,9 +308,11 @@ struct lnet_rc_data { struct lnet_peer { struct list_head lp_hashlist; /* chain on peer hash */ struct list_head lp_txq; /* messages blocking for - tx credits */ + * tx credits + */ struct list_head lp_rtrq; /* messages blocking for - router credits */ + * router credits + */ struct list_head lp_rtr_list; /* chain on router list */ int lp_txcredits; /* # tx credits available */ int lp_mintxcredits; /* low water mark */ @@ -319,23 +321,31 @@ struct lnet_peer { unsigned int lp_alive:1; /* alive/dead? */ unsigned int lp_notify:1; /* notification outstanding? */ unsigned int lp_notifylnd:1;/* outstanding notification - for LND? */ + * for LND? + */ unsigned int lp_notifying:1; /* some thread is handling - notification */ + * notification + */ unsigned int lp_ping_notsent;/* SEND event outstanding - from ping */ + * from ping + */ int lp_alive_count; /* # times router went - dead<->alive */ - long lp_txqnob; /* bytes queued for sending */ + * dead<->alive + */ + long lp_txqnob; /* ytes queued for sending */ unsigned long lp_timestamp; /* time of last aliveness - news */ + * news + */ unsigned long lp_ping_timestamp;/* time of last ping - attempt */ + * attempt + */ unsigned long lp_ping_deadline; /* != 0 if ping reply - expected */ + * expected + */ unsigned long lp_last_alive; /* when I was last alive */ unsigned long lp_last_query; /* when lp_ni was queried - last time */ + * last time + */ struct lnet_ni *lp_ni; /* interface peer is on */ lnet_nid_t lp_nid; /* peer's NID */ int lp_refcount; /* # refs */ @@ -386,7 +396,8 @@ struct lnet_route { struct lnet_remotenet { struct list_head lrn_list; /* chain on - ln_remote_nets_hash */ + * ln_remote_nets_hash + */ struct list_head lrn_routes; /* routes to me */ __u32 lrn_net; /* my net number */ }; @@ -399,14 +410,16 @@ struct lnet_remotenet { struct lnet_rtrbufpool { struct list_head rbp_bufs; /* my free buffer pool */ struct list_head rbp_msgs; /* messages blocking - for a buffer */ + * for a buffer + */ int rbp_npages; /* # pages in each buffer */ /* requested number of buffers */ int rbp_req_nbuffers; /* # buffers actually allocated */ int rbp_nbuffers; - int rbp_credits; /* # free buffers / - blocked messages */ + int rbp_credits; /* # free buffers + * blocked messages + */ int rbp_mincredits; /* low water mark */ }; @@ -442,7 +455,8 @@ enum lnet_match_flags { #define LNET_PTL_LAZY (1 << 0) #define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */ #define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, - request portal */ + * request portal + */ /* parameter for matching operations (GET, PUT) */ struct lnet_match_info { diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h index ea736f8d5231..a4f9ff01d458 100644 --- a/drivers/staging/lustre/include/linux/lnet/lnetst.h +++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h @@ -54,7 +54,8 @@ #define LSTIO_GROUP_ADD 0xC10 /* add group */ #define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */ #define LSTIO_GROUP_INFO 0xC12 /* query default information of - * specified group */ + * specified group + */ #define LSTIO_GROUP_DEL 0xC13 /* delete group */ #define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */ #define LSTIO_GROUP_UPDATE 0xC15 /* update group */ @@ -102,27 +103,32 @@ struct lstcon_test_ent { int tse_type; /* test type */ int tse_loop; /* loop count */ int tse_concur; /* concurrency of test */ -}; /*** test summary entry, for - *** list_batch command */ +}; /* test summary entry, for + * list_batch command + */ struct lstcon_batch_ent { int bae_state; /* batch status */ int bae_timeout; /* batch timeout */ int bae_ntest; /* # of tests in the batch */ -}; /*** batch summary entry, for - *** list_batch command */ +}; /* batch summary entry, for + * list_batch command + */ struct lstcon_test_batch_ent { struct lstcon_ndlist_ent tbe_cli_nle; /* client (group) node_list - * entry */ + * entry + */ struct lstcon_ndlist_ent tbe_srv_nle; /* server (group) node_list - * entry */ + * entry + */ union { struct lstcon_test_ent tbe_test; /* test entry */ struct lstcon_batch_ent tbe_batch;/* batch entry */ } u; -}; /*** test/batch verbose information entry, - *** for list_batch command */ +}; /* test/batch verbose information entry, + * for list_batch command + */ struct lstcon_rpc_ent { struct list_head rpe_link; /* link chain */ @@ -138,10 +144,10 @@ struct lstcon_rpc_ent { }; struct lstcon_trans_stat { - int trs_rpc_stat[4]; /* RPCs stat (0: total - 1: failed - 2: finished - 4: reserved */ + int trs_rpc_stat[4]; /* RPCs stat (0: total 1: failed + * 2: finished + * 4: reserved + */ int trs_rpc_errno; /* RPC errno */ int trs_fwk_stat[8]; /* framework stat */ int trs_fwk_errno; /* errno of the first remote error */ @@ -275,22 +281,28 @@ struct lstio_session_end_args { struct lstio_debug_args { int lstio_dbg_key; /* IN: session key */ int lstio_dbg_type; /* IN: debug - session|batch| - group|nodes - list */ + * session|batch| + * group|nodes list + */ int lstio_dbg_flags; /* IN: reserved debug - flags */ + * flags + */ int lstio_dbg_timeout; /* IN: timeout of - debug */ + * debug + */ int lstio_dbg_nmlen; /* IN: len of name */ char __user *lstio_dbg_namep; /* IN: name of - group|batch */ + * group|batch + */ int lstio_dbg_count; /* IN: # of test nodes - to debug */ + * to debug + */ struct lnet_process_id __user *lstio_dbg_idsp; /* IN: id of test - nodes */ + * nodes + */ struct list_head __user *lstio_dbg_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; struct lstio_group_add_args { @@ -307,7 +319,8 @@ struct lstio_group_del_args { #define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */ #define LST_GROUP_REFRESH 2 /* refresh inactive nodes - * in the group */ + * in the group + */ #define LST_GROUP_RMND 3 /* delete nodes from the group */ struct lstio_group_update_args { @@ -319,7 +332,8 @@ struct lstio_group_update_args { int lstio_grp_count; /* IN: # of nodes id */ struct lnet_process_id __user *lstio_grp_idsp; /* IN: array of nodes */ struct list_head __user *lstio_grp_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; struct lstio_group_nodes_args { @@ -331,7 +345,8 @@ struct lstio_group_nodes_args { unsigned int __user *lstio_grp_featp; struct lnet_process_id __user *lstio_grp_idsp; /* IN: nodes */ struct list_head __user *lstio_grp_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; struct lstio_group_list_args { @@ -345,8 +360,9 @@ struct lstio_group_info_args { int lstio_grp_key; /* IN: session key */ int lstio_grp_nmlen; /* IN: name len */ char __user *lstio_grp_namep; /* IN: name */ - struct lstcon_ndlist_ent __user *lstio_grp_entp;/* OUT: description of - group */ + struct lstcon_ndlist_ent __user *lstio_grp_entp;/* OUT: description + * of group + */ int __user *lstio_grp_idxp; /* IN/OUT: node index */ int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */ struct lstcon_node_ent __user *lstio_grp_dentsp;/* OUT: nodent array */ @@ -369,34 +385,41 @@ struct lstio_batch_del_args { struct lstio_batch_run_args { int lstio_bat_key; /* IN: session key */ int lstio_bat_timeout; /* IN: timeout for - the batch */ + * the batch + */ int lstio_bat_nmlen; /* IN: name length */ char __user *lstio_bat_namep; /* IN: batch name */ struct list_head __user *lstio_bat_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; struct lstio_batch_stop_args { int lstio_bat_key; /* IN: session key */ int lstio_bat_force; /* IN: abort unfinished - test RPC */ + * test RPC + */ int lstio_bat_nmlen; /* IN: name length */ char __user *lstio_bat_namep; /* IN: batch name */ struct list_head __user *lstio_bat_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; struct lstio_batch_query_args { int lstio_bat_key; /* IN: session key */ int lstio_bat_testidx; /* IN: test index */ int lstio_bat_client; /* IN: we testing - client? */ + * client? + */ int lstio_bat_timeout; /* IN: timeout for - waiting */ + * waiting + */ int lstio_bat_nmlen; /* IN: name length */ char __user *lstio_bat_namep; /* IN: batch name */ struct list_head __user *lstio_bat_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; struct lstio_batch_list_args { @@ -411,7 +434,8 @@ struct lstio_batch_info_args { int lstio_bat_nmlen; /* IN: name length */ char __user *lstio_bat_namep; /* IN: name */ int lstio_bat_server; /* IN: query server - or not */ + * or not + */ int lstio_bat_testidx; /* IN: test index */ struct lstcon_test_batch_ent __user *lstio_bat_entp;/* OUT: batch ent */ @@ -424,14 +448,17 @@ struct lstio_batch_info_args { struct lstio_stat_args { int lstio_sta_key; /* IN: session key */ int lstio_sta_timeout; /* IN: timeout for - stat request */ + * stat request + */ int lstio_sta_nmlen; /* IN: group name - length */ + * length + */ char __user *lstio_sta_namep; /* IN: group name */ int lstio_sta_count; /* IN: # of pid */ struct lnet_process_id __user *lstio_sta_idsp; /* IN: pid */ struct list_head __user *lstio_sta_resultp; /* OUT: list head of - result buffer */ + * result buffer + */ }; enum lst_test_type { @@ -452,26 +479,32 @@ struct lstio_test_args { int lstio_tes_concur; /* IN: concurrency */ int lstio_tes_dist; /* IN: node distribution in - destination groups */ + * destination groups + */ int lstio_tes_span; /* IN: node span in - destination groups */ + * destination groups + */ int lstio_tes_sgrp_nmlen; /* IN: source group - name length */ + * name length + */ char __user *lstio_tes_sgrp_name; /* IN: group name */ int lstio_tes_dgrp_nmlen; /* IN: destination group - name length */ + * name length + */ char __user *lstio_tes_dgrp_name; /* IN: group name */ int lstio_tes_param_len; /* IN: param buffer len */ void __user *lstio_tes_param; /* IN: parameter for specified - test: - lstio_bulk_param_t, - lstio_ping_param_t, - ... more */ + * test: lstio_bulk_param_t, + * lstio_ping_param_t, + * ... more + */ int __user *lstio_tes_retp; /* OUT: private returned - value */ + * value + */ struct list_head __user *lstio_tes_resultp;/* OUT: list head of - result buffer */ + * result buffer + */ }; enum lst_brw_type { diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h index dd5bc0e46560..a1ae66ede7a8 100644 --- a/drivers/staging/lustre/include/linux/lnet/socklnd.h +++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h @@ -76,7 +76,8 @@ struct ksock_msg { __u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */ union { struct ksock_lnet_msg lnetmsg; /* lnet message, it's empty if - * it's NOOP */ + * it's NOOP + */ } WIRE_ATTR ksm_u; } WIRE_ATTR; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index 5540de65f9a2..9eb169da2c2f 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -519,17 +519,6 @@ extern struct ksock_proto ksocknal_protocol_v3x; #define CPU_MASK_NONE 0UL #endif -static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len) -{ -#if 1 - return crc32_le(crc, p, len); -#else - while (len-- > 0) - crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ; - return crc; -#endif -} - static inline int ksocknal_route_mask(void) { diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index 8a036f4eb8d8..9c328dc6537b 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -201,9 +201,9 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn) if (fragnob > sum) fragnob = sum; - conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, - iov[i].iov_base, - fragnob); + conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum, + iov[i].iov_base, + fragnob); } conn->ksnc_msg.ksm_csum = saved_csum; } @@ -243,8 +243,8 @@ ksocknal_lib_recv_kiov(struct ksock_conn *conn) if (fragnob > sum) fragnob = sum; - conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, - base, fragnob); + conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum, + base, fragnob); kunmap(kiov[i].bv_page); } @@ -265,22 +265,22 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx) tx->tx_msg.ksm_csum = 0; - csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base, - tx->tx_iov[0].iov_len); + csum = crc32_le(~0, tx->tx_iov[0].iov_base, + tx->tx_iov[0].iov_len); if (tx->tx_kiov) { for (i = 0; i < tx->tx_nkiov; i++) { base = kmap(tx->tx_kiov[i].bv_page) + tx->tx_kiov[i].bv_offset; - csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len); + csum = crc32_le(csum, base, tx->tx_kiov[i].bv_len); kunmap(tx->tx_kiov[i].bv_page); } } else { for (i = 1; i < tx->tx_niov; i++) - csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base, - tx->tx_iov[i].iov_len); + csum = crc32_le(csum, tx->tx_iov[i].iov_base, + tx->tx_iov[i].iov_len); } if (*ksocknal_tunables.ksnd_inject_csum_error) { diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c index 5c2ce2ee6fd9..ff54eaf6651b 100644 --- a/drivers/staging/lustre/lnet/libcfs/hash.c +++ b/drivers/staging/lustre/lnet/libcfs/hash.c @@ -1008,7 +1008,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, LASSERT(ops->hs_object); LASSERT(ops->hs_keycmp); LASSERT(ops->hs_get); - LASSERT(ops->hs_put_locked); + LASSERT(ops->hs_put || ops->hs_put_locked); if (flags & CFS_HASH_REHASH) flags |= CFS_HASH_COUNTER; /* must have counter */ @@ -1553,19 +1553,20 @@ static int cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data, int start) { + struct hlist_node *next = NULL; struct hlist_node *hnode; - struct hlist_node *tmp; struct cfs_hash_bd bd; u32 version; int count = 0; int stop_on_change; + int has_put_locked; int end = -1; int rc = 0; int i; stop_on_change = cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs) || - !hs->hs_ops->hs_put_locked; + !cfs_hash_with_no_itemref(hs); + has_put_locked = hs->hs_ops->hs_put_locked != NULL; cfs_hash_lock(hs, 0); again: LASSERT(!cfs_hash_is_rehashing(hs)); @@ -1582,38 +1583,52 @@ again: version = cfs_hash_bd_version_get(&bd); cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - for (hnode = hhead->first; hnode;) { + hnode = hhead->first; + if (!hnode) + continue; + cfs_hash_get(hs, hnode); + + for (; hnode; hnode = next) { cfs_hash_bucket_validate(hs, &bd, hnode); - cfs_hash_get(hs, hnode); + next = hnode->next; + if (next) + cfs_hash_get(hs, next); cfs_hash_bd_unlock(hs, &bd, 0); cfs_hash_unlock(hs, 0); rc = func(hs, &bd, hnode, data); - if (stop_on_change) + if (stop_on_change || !has_put_locked) cfs_hash_put(hs, hnode); cond_resched(); count++; cfs_hash_lock(hs, 0); cfs_hash_bd_lock(hs, &bd, 0); - if (!stop_on_change) { - tmp = hnode->next; - cfs_hash_put_locked(hs, hnode); - hnode = tmp; - } else { /* bucket changed? */ + if (stop_on_change) { if (version != cfs_hash_bd_version_get(&bd)) - break; - /* safe to continue because no change */ - hnode = hnode->next; + rc = -EINTR; + } else if (has_put_locked) { + cfs_hash_put_locked(hs, hnode); } if (rc) /* callback wants to break iteration */ break; } - if (rc) /* callback wants to break iteration */ + if (next) { + if (has_put_locked) { + cfs_hash_put_locked(hs, next); + next = NULL; + } break; + } else if (rc) { + break; + } } cfs_hash_bd_unlock(hs, &bd, 0); + if (next && !has_put_locked) { + cfs_hash_put(hs, next); + next = NULL; + } if (rc) /* callback wants to break iteration */ break; } diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c index 075826bd3a2a..c6837bc58ed4 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c @@ -134,7 +134,7 @@ int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp, return -EINVAL; } - if (hdr.ioc_len < sizeof(struct libcfs_ioctl_data)) { + if (hdr.ioc_len < sizeof(hdr)) { CERROR("libcfs ioctl: user buffer too small for ioctl\n"); return -EINVAL; } diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index 6ca7192b03b7..043eafb5ea40 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -69,8 +69,8 @@ lst_session_new_ioctl(struct lstio_session_new_args *args) rc = lstcon_session_new(name, args->lstio_ses_key, args->lstio_ses_feats, - args->lstio_ses_force, args->lstio_ses_timeout, + args->lstio_ses_force, args->lstio_ses_idp); LIBCFS_FREE(name, args->lstio_ses_nmlen + 1); diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index da36c55b86d3..ae7c2772825e 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -487,10 +487,9 @@ lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans, sizeof(struct list_head))) return -EFAULT; - if (tmp.next == head_up) - return 0; - next = tmp.next; + if (next == head_up) + return 0; ent = list_entry(next, struct lstcon_rpc_ent, rpe_link); diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c index b852fed0b10f..adaa0942130f 100644 --- a/drivers/staging/lustre/lustre/fld/fld_cache.c +++ b/drivers/staging/lustre/lustre/fld/fld_cache.c @@ -348,9 +348,10 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, f_curr->fce_range.lsr_end = new_start; fld_cache_entry_add(cache, f_new, &f_curr->fce_list); - } else + } else { CERROR("NEW range =" DRANGE " curr = " DRANGE "\n", PRANGE(range), PRANGE(&f_curr->fce_range)); + } } struct fld_cache_entry diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h index 90a0c501e1ea..6887b81fa901 100644 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ b/drivers/staging/lustre/lustre/include/cl_object.h @@ -1358,7 +1358,7 @@ struct cl_2queue { /** IO types */ enum cl_io_type { /** read system call */ - CIT_READ, + CIT_READ = 1, /** write system call */ CIT_WRITE, /** truncate, utime system calls */ diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h index 0d4f92ec8334..a4d7280e1fa4 100644 --- a/drivers/staging/lustre/lustre/include/interval_tree.h +++ b/drivers/staging/lustre/lustre/include/interval_tree.h @@ -111,4 +111,8 @@ enum interval_iter interval_search(struct interval_node *root, struct interval_node_extent *ex, interval_callback_t func, void *data); +enum interval_iter interval_iterate_reverse(struct interval_node *root, + interval_callback_t func, + void *data); + #endif diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h index 915283c04094..9054d3745785 100644 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h @@ -59,7 +59,7 @@ struct lprocfs_vars { struct lprocfs_static_vars { struct lprocfs_vars *obd_vars; - struct attribute_group *sysfs_vars; + const struct attribute_group *sysfs_vars; }; /* if we find more consumers this could be generalized */ @@ -468,7 +468,7 @@ struct dentry *ldebugfs_register(const char *name, void ldebugfs_remove(struct dentry **entryp); int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, - struct attribute_group *attrs); + const struct attribute_group *attrs); int lprocfs_obd_cleanup(struct obd_device *obd); int ldebugfs_seq_create(struct dentry *parent, diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index 77995fa47691..1db1ab8b93a2 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -1122,7 +1122,7 @@ struct ptlrpc_body_v2 { #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ -#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ +#define OBD_CONNECT_REAL 0x8000000ULL /* obsolete since 2.8 */ #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ @@ -3217,9 +3217,8 @@ struct link_ea_header { __u32 leh_magic; __u32 leh_reccount; __u64 leh_len; /* total size */ - /* future use */ - __u32 padding1; - __u32 padding2; + __u32 leh_overflow_time; + __u32 leh_padding; }; /** Hardlink data is name and parent fid. diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h index 249e8bf4fa22..3ff008fee13d 100644 --- a/drivers/staging/lustre/lustre/include/lustre_linkea.h +++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h @@ -26,7 +26,19 @@ * Author: di wang <di.wang@intel.com> */ -#define DEFAULT_LINKEA_SIZE 4096 +/* There are several reasons to restrict the linkEA size: + * + * 1. Under DNE mode, if we do not restrict the linkEA size, and if there + * are too many cross-MDTs hard links to the same object, then it will + * casue the llog overflow. + * + * 2. Some backend has limited size for EA. For example, if without large + * EA enabled, the ldiskfs will make all EAs to share one (4K) EA block. + * + * 3. Too many entries in linkEA will seriously affect linkEA performance + * because we only support to locate linkEA entry consecutively. + */ +#define MAX_LINKEA_SIZE 4096 struct linkea_data { /** @@ -43,6 +55,7 @@ struct linkea_data { int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf); int linkea_init(struct linkea_data *ldata); +int linkea_init_with_rec(struct linkea_data *ldata); void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen, struct lu_name *lname, struct lu_fid *pfid); int linkea_entry_pack(struct link_ea_entry *lee, const struct lu_name *lname, diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index d61b000dac2c..05f02b17622b 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -558,13 +558,13 @@ struct ptlrpc_cli_req { /** request sent timeval */ struct timespec64 cr_sent_tv; /** time for request really sent out */ - time_t cr_sent_out; + time64_t cr_sent_out; /** when req reply unlink must finish. */ - time_t cr_reply_deadline; + time64_t cr_reply_deadline; /** when req bulk unlink must finish. */ - time_t cr_bulk_deadline; + time64_t cr_bulk_deadline; /** when req unlink must finish. */ - time_t cr_req_deadline; + time64_t cr_req_deadline; /** Portal to which this request would be sent */ short cr_req_ptl; /** Portal where to wait for reply and where reply would be sent */ @@ -663,7 +663,7 @@ struct ptlrpc_srv_req { /** history sequence # */ __u64 sr_hist_seq; /** the index of service's srv_at_array into which request is linked */ - time_t sr_at_index; + time64_t sr_at_index; /** authed uid */ uid_t sr_auth_uid; /** authed uid mapped to */ diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index 4ce85064f9d0..4dfc31e2960b 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -404,12 +404,10 @@ struct lmv_tgt_desc { }; struct lmv_obd { - int refcount; struct lu_client_fld lmv_fld; spinlock_t lmv_lock; struct lmv_desc desc; struct obd_uuid cluuid; - struct obd_export *exp; struct mutex lmv_init_mutex; int connected; diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h index 083a6ff56a05..a6a4b2fbc18f 100644 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ b/drivers/staging/lustre/lustre/include/obd_class.h @@ -46,14 +46,7 @@ #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update * obd_osfs_age */ -#define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd - * instead of a specific set. This - * means that we cannot rely on the set - * interpret routine to be called. - * lov_statfs_fini() must thus be called - * by the request interpret routine - */ -#define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving +#define OBD_STATFS_FOR_MDT0 0x0004 /* The statfs is only for retrieving * information from MDT0. */ diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c index e1069021420d..5bebd9a361ee 100644 --- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c +++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c @@ -110,6 +110,15 @@ static struct interval_node *interval_first(struct interval_node *node) return node; } +static struct interval_node *interval_last(struct interval_node *node) +{ + if (!node) + return NULL; + while (node->in_right) + node = node->in_right; + return node; +} + static struct interval_node *interval_next(struct interval_node *node) { if (!node) @@ -121,6 +130,37 @@ static struct interval_node *interval_next(struct interval_node *node) return node->in_parent; } +static struct interval_node *interval_prev(struct interval_node *node) +{ + if (!node) + return NULL; + + if (node->in_left) + return interval_last(node->in_left); + + while (node->in_parent && node_is_left_child(node)) + node = node->in_parent; + + return node->in_parent; +} + +enum interval_iter interval_iterate_reverse(struct interval_node *root, + interval_callback_t func, + void *data) +{ + enum interval_iter rc = INTERVAL_ITER_CONT; + struct interval_node *node; + + for (node = interval_last(root); node; node = interval_prev(node)) { + rc = func(node, data); + if (rc == INTERVAL_ITER_STOP) + break; + } + + return rc; +} +EXPORT_SYMBOL(interval_iterate_reverse); + static void __rotate_change_maxhigh(struct interval_node *node, struct interval_node *rotate) { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h index ec3b23cd09ec..2bf5c8491be9 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h @@ -106,9 +106,6 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, extern unsigned int ldlm_enqueue_min; extern unsigned int ldlm_cancel_unused_locks_before_replay; -/* ldlm_resource.c */ -int ldlm_resource_putref_locked(struct ldlm_resource *res); - /* ldlm_lock.c */ struct ldlm_cb_set_arg { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index ddb46428093f..181025dc36c3 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -1029,11 +1029,11 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) if (work_list && lock->l_completion_ast) ldlm_add_ast_work_item(lock, NULL, work_list); - if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) + if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) { ldlm_grant_lock_with_skiplist(lock); - else if (res->lr_type == LDLM_EXTENT) + } else if (res->lr_type == LDLM_EXTENT) { ldlm_extent_add_lock(res, lock); - else if (res->lr_type == LDLM_FLOCK) { + } else if (res->lr_type == LDLM_FLOCK) { /* * We should not add locks to granted list in the following cases: * - this is an UNLOCK but not a real lock; @@ -1045,8 +1045,9 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock)) return; ldlm_resource_add_lock(res, &res->lr_granted, lock); - } else + } else { LBUG(); + } ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); } diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index fff930fc3cff..e0c3e5d6cf87 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -926,7 +926,7 @@ static struct attribute *ldlm_attrs[] = { NULL, }; -static struct attribute_group ldlm_attr_group = { +static const struct attribute_group ldlm_attr_group = { .attrs = ldlm_attrs, }; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c index c9ef247d9be4..4e805e775134 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c @@ -223,7 +223,7 @@ static ssize_t lru_size_show(struct kobject *kobj, struct attribute *attr, if (ns_connect_lru_resize(ns)) nr = &ns->ns_nr_unused; - return sprintf(buf, "%u", *nr); + return sprintf(buf, "%u\n", *nr); } static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, @@ -318,7 +318,7 @@ static ssize_t lru_max_age_show(struct kobject *kobj, struct attribute *attr, struct ldlm_namespace *ns = container_of(kobj, struct ldlm_namespace, ns_kobj); - return sprintf(buf, "%u", ns->ns_max_age); + return sprintf(buf, "%u\n", ns->ns_max_age); } static ssize_t lru_max_age_store(struct kobject *kobj, struct attribute *attr, @@ -536,16 +536,6 @@ static void ldlm_res_hop_get_locked(struct cfs_hash *hs, ldlm_resource_getref(res); } -static void ldlm_res_hop_put_locked(struct cfs_hash *hs, - struct hlist_node *hnode) -{ - struct ldlm_resource *res; - - res = hlist_entry(hnode, struct ldlm_resource, lr_hash); - /* cfs_hash_for_each_nolock is the only chance we call it */ - ldlm_resource_putref_locked(res); -} - static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode) { struct ldlm_resource *res; @@ -561,7 +551,6 @@ static struct cfs_hash_ops ldlm_ns_hash_ops = { .hs_keycpy = NULL, .hs_object = ldlm_res_hop_object, .hs_get = ldlm_res_hop_get_locked, - .hs_put_locked = ldlm_res_hop_put_locked, .hs_put = ldlm_res_hop_put }; @@ -572,7 +561,6 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = { .hs_keycpy = NULL, .hs_object = ldlm_res_hop_object, .hs_get = ldlm_res_hop_get_locked, - .hs_put_locked = ldlm_res_hop_put_locked, .hs_put = ldlm_res_hop_put }; @@ -1249,37 +1237,6 @@ int ldlm_resource_putref(struct ldlm_resource *res) } EXPORT_SYMBOL(ldlm_resource_putref); -/* Returns 1 if the resource was freed, 0 if it remains. */ -int ldlm_resource_putref_locked(struct ldlm_resource *res) -{ - struct ldlm_namespace *ns = ldlm_res_to_ns(res); - - LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON); - CDEBUG(D_INFO, "putref res: %p count: %d\n", - res, atomic_read(&res->lr_refcount) - 1); - - if (atomic_dec_and_test(&res->lr_refcount)) { - struct cfs_hash_bd bd; - - cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash, - &res->lr_name, &bd); - __ldlm_resource_putref_final(&bd, res); - cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); - /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF, - * so we should never be here while calling cfs_hash_del, - * cfs_hash_for_each_nolock is the only case we can get - * here, which is safe to release cfs_hash_bd_lock. - */ - if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) - ns->ns_lvbo->lvbo_free(res); - kmem_cache_free(ldlm_resource_slab, res); - - cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); - return 1; - } - return 0; -} - /** * Add a lock into a given resource into specified lock list. */ diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index ab1c85c1ed38..215479a21865 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -3035,9 +3035,6 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type) spin_lock(&lli->lli_lock); /* VFS' acl_permission_check->check_acl will release the refcount */ acl = posix_acl_dup(lli->lli_posix_acl); -#ifdef CONFIG_FS_POSIX_ACL - forget_cached_acl(inode, type); -#endif spin_unlock(&lli->lli_lock); return acl; diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 974a05d6c969..5c8405c71e4b 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -222,9 +222,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, else sbi->ll_fop = &ll_file_operations_noflock; - /* real client */ - data->ocd_connect_flags |= OBD_CONNECT_REAL; - /* always ping even if server suppress_pings */ if (sbi->ll_flags & LL_SBI_ALWAYS_PING) data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS; @@ -1319,6 +1316,7 @@ void ll_clear_inode(struct inode *inode) ll_xattr_cache_destroy(inode); #ifdef CONFIG_FS_POSIX_ACL + forget_all_cached_acls(inode); if (lli->lli_posix_acl) { posix_acl_release(lli->lli_posix_acl); lli->lli_posix_acl = NULL; @@ -2543,7 +2541,7 @@ static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno, unsigned int idx; int rc; - rc = linkea_init(ldata); + rc = linkea_init_with_rec(ldata); if (rc < 0) return rc; diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c index 1bac51f882a7..166455e05cc2 100644 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ b/drivers/staging/lustre/lustre/llite/rw.c @@ -115,7 +115,7 @@ void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) { - LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which); + LASSERTF(which < _NR_RA_STAT, "which: %u\n", which); lprocfs_counter_incr(sbi->ll_ra_stats, which); } diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c index 8e45672b4617..2b6069983ac2 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c @@ -591,9 +591,10 @@ static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos) env = cl_env_get(&refcheck); if (!IS_ERR(env)) { sbi = f->private; - if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT) + if (sbi->ll_site->ls_obj_hash->hs_cur_bits > + 64 - PGC_OBJ_SHIFT) { pos = ERR_PTR(-EFBIG); - else { + } else { *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos); if (*pos == ~0ULL) diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index bd30abdecfb9..d0cad7eb2b51 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -33,6 +33,7 @@ #include <linux/fs.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/xattr.h> #include <linux/selinux.h> #define DEBUG_SUBSYSTEM S_LLITE diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c index d61d8018001a..391fb25ac31d 100644 --- a/drivers/staging/lustre/lustre/llite/xattr_security.c +++ b/drivers/staging/lustre/lustre/llite/xattr_security.c @@ -28,7 +28,10 @@ * lustre/llite/xattr_security.c * Handler for storing security labels as extended attributes. */ + +#include <linux/types.h> #include <linux/security.h> +#include <linux/selinux.h> #include <linux/xattr.h> #include "llite_internal.h" @@ -48,19 +51,23 @@ static int ll_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { - const struct xattr_handler *handler; struct dentry *dentry = fs_info; const struct xattr *xattr; int err = 0; - handler = get_xattr_type(XATTR_SECURITY_PREFIX); - if (!handler) - return -ENXIO; - for (xattr = xattr_array; xattr->name; xattr++) { - err = handler->set(handler, dentry, inode, xattr->name, - xattr->value, xattr->value_len, - XATTR_CREATE); + char *full_name; + + full_name = kasprintf(GFP_KERNEL, "%s%s", + XATTR_SECURITY_PREFIX, xattr->name); + if (!full_name) { + err = -ENOMEM; + break; + } + + err = __vfs_setxattr(dentry, inode, full_name, xattr->value, + xattr->value_len, XATTR_CREATE); + kfree(full_name); if (err < 0) break; } diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c index f49db6c23217..5f01a0cfc809 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c @@ -474,7 +474,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, ldlm_blocking_callback cb_blocking, __u64 extra_lock_flags) { - struct obd_device *obd = exp->exp_obd; int rc; LASSERT(fid_is_sane(&op_data->op_fid1)); @@ -484,10 +483,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, (int)op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1)); - rc = lmv_check_connect(obd); - if (rc) - return rc; - if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT)) rc = lmv_intent_lookup(exp, op_data, it, reqp, cb_blocking, extra_lock_flags); diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h index 12731a17e263..e839b831a7c5 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h +++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h @@ -42,8 +42,6 @@ #define LL_IT2STR(it) \ ((it) ? ldlm_it2str((it)->it_op) : "0") -int lmv_check_connect(struct obd_device *obd); - int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, struct lookup_intent *it, struct ptlrpc_request **reqp, ldlm_blocking_callback cb_blocking, diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index 64fcaef0bacd..1c9ff2493423 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -53,6 +53,8 @@ #include "../include/lustre_kernelcomm.h" #include "lmv_internal.h" +static int lmv_check_connect(struct obd_device *obd); + static void lmv_activate_target(struct lmv_obd *lmv, struct lmv_tgt_desc *tgt, int activate) @@ -183,59 +185,44 @@ static int lmv_notify(struct obd_device *obd, struct obd_device *watched, return rc; } -/** - * This is fake connect function. Its purpose is to initialize lmv and say - * caller that everything is okay. Real connection will be performed later. - */ static int lmv_connect(const struct lu_env *env, - struct obd_export **exp, struct obd_device *obd, + struct obd_export **pexp, struct obd_device *obd, struct obd_uuid *cluuid, struct obd_connect_data *data, void *localdata) { struct lmv_obd *lmv = &obd->u.lmv; struct lustre_handle conn = { 0 }; + struct obd_export *exp; int rc = 0; - /* - * We don't want to actually do the underlying connections more than - * once, so keep track. - */ - lmv->refcount++; - if (lmv->refcount > 1) { - *exp = NULL; - return 0; - } - rc = class_connect(&conn, obd, cluuid); if (rc) { CERROR("class_connection() returned %d\n", rc); return rc; } - *exp = class_conn2export(&conn); - class_export_get(*exp); + exp = class_conn2export(&conn); - lmv->exp = *exp; lmv->connected = 0; lmv->cluuid = *cluuid; - - if (data) - lmv->conn_data = *data; + lmv->conn_data = *data; lmv->lmv_tgts_kobj = kobject_create_and_add("target_obds", &obd->obd_kobj); - /* - * All real clients should perform actual connection right away, because - * it is possible, that LMV will not have opportunity to connect targets - * and MDC stuff will be called directly, for instance while reading - * ../mdc/../kbytesfree procfs file, etc. - */ - if (data && data->ocd_connect_flags & OBD_CONNECT_REAL) - rc = lmv_check_connect(obd); + rc = lmv_check_connect(obd); + if (rc) + goto out_sysfs; + + *pexp = exp; - if (rc && lmv->lmv_tgts_kobj) + return rc; + +out_sysfs: + if (lmv->lmv_tgts_kobj) kobject_put(lmv->lmv_tgts_kobj); + class_disconnect(exp); + return rc; } @@ -475,7 +462,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, return rc; } -int lmv_check_connect(struct obd_device *obd) +static int lmv_check_connect(struct obd_device *obd) { struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; @@ -519,7 +506,6 @@ int lmv_check_connect(struct obd_device *obd) goto out_disc; } - class_export_put(lmv->exp); lmv->connected = 1; easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC); lmv_init_ea_size(obd->obd_self_export, easize, 0); @@ -543,7 +529,7 @@ int lmv_check_connect(struct obd_device *obd) } } } - class_disconnect(lmv->exp); + mutex_unlock(&lmv->lmv_init_mutex); return rc; } @@ -598,13 +584,6 @@ static int lmv_disconnect(struct obd_export *exp) if (!lmv->tgts) goto out_local; - /* - * Only disconnect the underlying layers on the final disconnect. - */ - lmv->refcount--; - if (lmv->refcount != 0) - goto out_local; - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; @@ -623,8 +602,7 @@ out_local: if (!lmv->connected) class_export_put(exp); rc = class_disconnect(exp); - if (lmv->refcount == 0) - lmv->connected = 0; + lmv->connected = 0; return rc; } @@ -1122,7 +1100,8 @@ hsm_req_err: err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); if (err) { if (tgt->ltd_active) { - CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", + CERROR("%s: error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n", + lmv2obd_dev(lmv)->obd_name, tgt->ltd_uuid.uuid, i, cmd, err); if (!rc) rc = err; @@ -1368,10 +1347,6 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; u32 i; - rc = lmv_check_connect(obd); - if (rc) - return rc; - temp = kzalloc(sizeof(*temp), GFP_NOFS); if (!temp) return -ENOMEM; @@ -1418,11 +1393,6 @@ static int lmv_getstatus(struct obd_export *exp, { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; return md_getstatus(lmv->tgts[0]->ltd_exp, fid); } @@ -1435,11 +1405,6 @@ static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; tgt = lmv_find_target(lmv, fid); if (IS_ERR(tgt)) @@ -1458,11 +1423,6 @@ static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; tgt = lmv_find_target(lmv, fid); if (IS_ERR(tgt)) @@ -1479,11 +1439,6 @@ static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; tgt = lmv_find_target(lmv, &op_data->op_fid1); if (IS_ERR(tgt)) @@ -1502,11 +1457,6 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid) struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; u32 i; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; CDEBUG(D_INODE, "CBDATA for " DFID "\n", PFID(fid)); @@ -1530,11 +1480,6 @@ static int lmv_close(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; tgt = lmv_find_target(lmv, &op_data->op_fid1); if (IS_ERR(tgt)) @@ -1661,10 +1606,6 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, struct lmv_tgt_desc *tgt; int rc; - rc = lmv_check_connect(obd); - if (rc) - return rc; - if (!lmv->desc.ld_active_tgt_count) return -EIO; @@ -1718,11 +1659,6 @@ lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; CDEBUG(D_INODE, "ENQUEUE '%s' on " DFID "\n", LL_IT2STR(it), PFID(&op_data->op_fid1)); @@ -1749,10 +1685,6 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, struct mdt_body *body; int rc; - rc = lmv_check_connect(obd); - if (rc) - return rc; - tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); if (IS_ERR(tgt)) return PTR_ERR(tgt); @@ -1845,10 +1777,6 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data, struct lmv_tgt_desc *tgt; int rc; - rc = lmv_check_connect(obd); - if (rc) - return rc; - LASSERT(op_data->op_namelen != 0); CDEBUG(D_INODE, "LINK " DFID ":%*s to " DFID "\n", @@ -1907,10 +1835,6 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, (int)newlen, new, PFID(&op_data->op_fid2), op_data->op_mea2 ? op_data->op_mea2->lsm_md_stripe_count : 0); - rc = lmv_check_connect(obd); - if (rc) - return rc; - op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); op_data->op_cap = cfs_curproc_cap_pack(); @@ -2063,11 +1987,6 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; CDEBUG(D_INODE, "SETATTR for " DFID ", valid 0x%x\n", PFID(&op_data->op_fid1), op_data->op_attr.ia_valid); @@ -2086,11 +2005,6 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; tgt = lmv_find_target(lmv, fid); if (IS_ERR(tgt)) @@ -2272,7 +2186,6 @@ static int lmv_read_striped_page(struct obd_export *exp, { struct inode *master_inode = op_data->op_data; struct lu_fid master_fid = op_data->op_fid1; - struct obd_device *obd = exp->exp_obd; __u64 hash_offset = offset; __u32 ldp_flags; struct page *min_ent_page = NULL; @@ -2286,10 +2199,6 @@ static int lmv_read_striped_page(struct obd_export *exp, void *area; int rc; - rc = lmv_check_connect(obd); - if (rc) - return rc; - /* * Allocate a page and read entries from all of stripes and fill * the page by hash order @@ -2408,11 +2317,6 @@ static int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; if (unlikely(lsm)) return lmv_read_striped_page(exp, op_data, cb_op, offset, ppage); @@ -2460,9 +2364,6 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data, int stripe_index = 0; int rc; - rc = lmv_check_connect(obd); - if (rc) - return rc; retry_unlink: /* For striped dir, we need to locate the parent as well */ if (lsm) { @@ -2647,10 +2548,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) { int i; - rc = lmv_check_connect(obd); - if (rc) - return rc; - LASSERT(*vallen == sizeof(__u32)); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { struct lmv_tgt_desc *tgt = lmv->tgts[i]; @@ -2669,10 +2566,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, } else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_DEFAULT_EASIZE) || KEY_IS(KEY_CONN_DATA)) { - rc = lmv_check_connect(obd); - if (rc) - return rc; - /* * Forwarding this request to first MDS, it should know LOV * desc. @@ -3021,15 +2914,10 @@ static int lmv_intent_getattr_async(struct obd_export *exp, struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *ptgt = NULL; struct lmv_tgt_desc *ctgt = NULL; - int rc; if (!fid_is_sane(&op_data->op_fid2)) return -EINVAL; - rc = lmv_check_connect(obd); - if (rc) - return rc; - ptgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); if (IS_ERR(ptgt)) return PTR_ERR(ptgt); @@ -3056,11 +2944,6 @@ static int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - int rc; - - rc = lmv_check_connect(obd); - if (rc) - return rc; tgt = lmv_find_target(lmv, fid); if (IS_ERR(tgt)) diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c index bf25f887062d..4c13e3926898 100644 --- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c +++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c @@ -161,7 +161,7 @@ static struct attribute *lmv_attrs[] = { NULL, }; -static struct attribute_group lmv_attr_group = { +static const struct attribute_group lmv_attr_group = { .attrs = lmv_attrs, }; diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile index e4cc0db21014..ea93addcaff4 100644 --- a/drivers/staging/lustre/lustre/lov/Makefile +++ b/drivers/staging/lustre/lustre/lov/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_LUSTRE_FS) += lov.o lov-y := lov_obd.o lov_pack.o lov_offset.o lov_merge.o \ lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \ lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \ - lovsub_lock.o lovsub_io.o lov_pool.o lproc_lov.o + lovsub_lock.o lov_pool.o lproc_lov.o diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index e889d3a7de9c..38281b2a75df 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h @@ -92,35 +92,6 @@ enum lov_device_flags { * Upper half. */ -/** - * Resources that are used in memory-cleaning path, and whose allocation - * cannot fail even when memory is tight. They are preallocated in sufficient - * quantities in lov_device::ld_emerg[], and access to them is serialized - * lov_device::ld_mutex. - */ -struct lov_device_emerg { - /** - * Page list used to submit IO when memory is in pressure. - */ - struct cl_page_list emrg_page_list; - /** - * sub-io's shared by all threads accessing this device when memory is - * too low to allocate sub-io's dynamically. - */ - struct cl_io emrg_subio; - /** - * Environments used by sub-io's in - * lov_device_emerg::emrg_subio. - */ - struct lu_env *emrg_env; - /** - * Refchecks for lov_device_emerg::emrg_env. - * - * \see cl_env_get() - */ - u16 emrg_refcheck; -}; - struct lov_device { /* * XXX Locking of lov-private data is missing. @@ -131,14 +102,6 @@ struct lov_device { __u32 ld_target_nr; struct lovsub_device **ld_target; __u32 ld_flags; - - /** Emergency resources used in memory-cleansing paths. */ - struct lov_device_emerg **ld_emrg; - /** - * Serializes access to lov_device::ld_emrg in low-memory - * conditions. - */ - struct mutex ld_mutex; }; /** @@ -299,8 +262,6 @@ struct lov_page { struct lovsub_device { struct cl_device acid_cl; - struct lov_device *acid_super; - int acid_idx; struct cl_device *acid_next; }; @@ -312,42 +273,10 @@ struct lovsub_object { }; /** - * A link between a top-lock and a sub-lock. Separate data-structure is - * necessary, because top-locks and sub-locks are in M:N relationship. - * - * \todo This can be optimized for a (by far) most frequent case of a single - * top-lock per sub-lock. - */ -struct lov_lock_link { - struct lov_lock *lll_super; - /** An index within parent lock. */ - int lll_idx; - /** - * A linkage into per sub-lock list of all corresponding top-locks, - * hanging off lovsub_lock::lss_parents. - */ - struct list_head lll_list; -}; - -/** * Lock state at lovsub layer. */ struct lovsub_lock { struct cl_lock_slice lss_cl; - /** - * List of top-locks that have given sub-lock as their part. Protected - * by cl_lock::cll_guard mutex. - */ - struct list_head lss_parents; - /** - * Top-lock that initiated current operation on this sub-lock. This is - * only set during top-to-bottom lock operations like enqueue, and is - * used to optimize state change notification. Protected by - * cl_lock::cll_guard mutex. - * - * \see lovsub_lock_state_one(). - */ - struct cl_lock *lss_active; }; /** @@ -356,7 +285,6 @@ struct lovsub_lock { struct lov_sublock_env { const struct lu_env *lse_env; struct cl_io *lse_io; - struct lov_io_sub *lse_sub; }; struct lovsub_page { @@ -366,12 +294,10 @@ struct lovsub_page { struct lov_thread_info { struct cl_object_conf lti_stripe_conf; struct lu_fid lti_fid; - struct cl_lock_descr lti_ldescr; struct ost_lvb lti_lvb; struct cl_2queue lti_cl2q; struct cl_page_list lti_plist; wait_queue_entry_t lti_waiter; - struct cl_attr lti_attr; }; /** @@ -385,7 +311,6 @@ struct lov_io_sub { * \see cl_env_get() */ u16 sub_refcheck; - u16 sub_reenter; /** * true, iff cl_io_init() was successfully executed against * lov_io_sub::sub_io. @@ -445,7 +370,6 @@ struct lov_io { */ u64 lis_endpos; - int lis_mem_frozen; int lis_stripe_count; int lis_active_subios; @@ -485,8 +409,6 @@ extern struct kmem_cache *lov_session_kmem; extern struct kmem_cache *lovsub_lock_kmem; extern struct kmem_cache *lovsub_object_kmem; -extern struct kmem_cache *lov_lock_link_kmem; - int lov_object_init(const struct lu_env *env, struct lu_object *obj, const struct lu_object_conf *conf); int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, @@ -508,15 +430,9 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); -void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, - struct lovsub_lock *sub); struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio, int stripe); -void lov_sub_put(struct lov_io_sub *sub); -int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, - struct lovsub_lock *sublock, - const struct cl_lock_descr *d, int idx); int lov_page_init(const struct lu_env *env, struct cl_object *ob, struct cl_page *page, pgoff_t index); @@ -533,12 +449,6 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); -struct lov_lock_link *lov_lock_link_find(const struct lu_env *env, - struct lov_lock *lck, - struct lovsub_lock *sub); -struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, - const struct cl_page_slice *slice); - struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov); int lov_page_stripe(const struct cl_page *page); diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c index 7301f6e579a1..531b4fe61f7b 100644 --- a/drivers/staging/lustre/lustre/lov/lov_dev.c +++ b/drivers/staging/lustre/lustre/lov/lov_dev.c @@ -50,11 +50,6 @@ struct kmem_cache *lov_session_kmem; struct kmem_cache *lovsub_lock_kmem; struct kmem_cache *lovsub_object_kmem; -struct kmem_cache *lov_lock_link_kmem; - -/** Lock class of lov_device::ld_mutex. */ -static struct lock_class_key cl_lov_device_mutex_class; - struct lu_kmem_descr lov_caches[] = { { .ckd_cache = &lov_lock_kmem, @@ -87,11 +82,6 @@ struct lu_kmem_descr lov_caches[] = { .ckd_size = sizeof(struct lovsub_object) }, { - .ckd_cache = &lov_lock_link_kmem, - .ckd_name = "lov_lock_link_kmem", - .ckd_size = sizeof(struct lov_lock_link) - }, - { .ckd_cache = NULL } }; @@ -204,8 +194,6 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, break; } lsd = cl2lovsub_dev(cl); - lsd->acid_idx = i; - lsd->acid_super = ld; ld->ld_target[i] = lsd; } @@ -217,34 +205,13 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, return rc; } -static void lov_emerg_free(struct lov_device_emerg **emrg, int nr) -{ - int i; - - for (i = 0; i < nr; ++i) { - struct lov_device_emerg *em; - - em = emrg[i]; - if (em) { - LASSERT(em->emrg_page_list.pl_nr == 0); - if (em->emrg_env) - cl_env_put(em->emrg_env, &em->emrg_refcheck); - kfree(em); - } - } - kfree(emrg); -} - static struct lu_device *lov_device_free(const struct lu_env *env, struct lu_device *d) { struct lov_device *ld = lu2lov_dev(d); - const int nr = ld->ld_target_nr; cl_device_fini(lu2cl_dev(d)); kfree(ld->ld_target); - if (ld->ld_emrg) - lov_emerg_free(ld->ld_emrg, nr); kfree(ld); return NULL; } @@ -260,41 +227,6 @@ static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev, } } -static struct lov_device_emerg **lov_emerg_alloc(int nr) -{ - struct lov_device_emerg **emerg; - int i; - int result; - - emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS); - if (!emerg) - return ERR_PTR(-ENOMEM); - for (result = i = 0; i < nr && result == 0; i++) { - struct lov_device_emerg *em; - - em = kzalloc(sizeof(*em), GFP_NOFS); - if (em) { - emerg[i] = em; - cl_page_list_init(&em->emrg_page_list); - em->emrg_env = cl_env_alloc(&em->emrg_refcheck, - LCT_REMEMBER | LCT_NOREF); - if (!IS_ERR(em->emrg_env)) { - em->emrg_env->le_ctx.lc_cookie = 0x2; - } else { - result = PTR_ERR(em->emrg_env); - em->emrg_env = NULL; - } - } else { - result = -ENOMEM; - } - } - if (result != 0) { - lov_emerg_free(emerg, nr); - emerg = ERR_PTR(result); - } - return emerg; -} - static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) { int result; @@ -306,29 +238,17 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) sub_size = dev->ld_target_nr; if (sub_size < tgt_size) { struct lovsub_device **newd; - struct lov_device_emerg **emerg; const size_t sz = sizeof(newd[0]); - emerg = lov_emerg_alloc(tgt_size); - if (IS_ERR(emerg)) - return PTR_ERR(emerg); - newd = kcalloc(tgt_size, sz, GFP_NOFS); if (newd) { - mutex_lock(&dev->ld_mutex); if (sub_size > 0) { memcpy(newd, dev->ld_target, sub_size * sz); kfree(dev->ld_target); } dev->ld_target = newd; dev->ld_target_nr = tgt_size; - - if (dev->ld_emrg) - lov_emerg_free(dev->ld_emrg, sub_size); - dev->ld_emrg = emerg; - mutex_unlock(&dev->ld_mutex); } else { - lov_emerg_free(emerg, tgt_size); result = -ENOMEM; } } @@ -362,8 +282,6 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, tgt->ltd_obd->obd_lu_dev); if (!IS_ERR(cl)) { lsd = cl2lovsub_dev(cl); - lsd->acid_idx = index; - lsd->acid_super = ld; ld->ld_target[index] = lsd; } else { CERROR("add failed (%d), deleting %s\n", rc, @@ -428,9 +346,6 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env, d = lov2lu_dev(ld); d->ld_ops = &lov_lu_ops; - mutex_init(&ld->ld_mutex); - lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class); - /* setup the LOV OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); LASSERT(obd); diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c index ac0bf64c08c1..2a8fee800365 100644 --- a/drivers/staging/lustre/lustre/lov/lov_ea.c +++ b/drivers/staging/lustre/lustre/lov/lov_ea.c @@ -150,9 +150,10 @@ static int lsm_unpackmd_common(struct lov_obd *lov, struct lov_mds_md *lmm, struct lov_ost_data_v1 *objects) { - loff_t stripe_maxbytes = LLONG_MAX; + loff_t min_stripe_maxbytes = 0; unsigned int stripe_count; struct lov_oinfo *loi; + loff_t lov_bytes; unsigned int i; /* @@ -168,8 +169,6 @@ static int lsm_unpackmd_common(struct lov_obd *lov, stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count; for (i = 0; i < stripe_count; i++) { - loff_t tgt_bytes; - loi = lsm->lsm_oinfo[i]; ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi); loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx); @@ -194,17 +193,21 @@ static int lsm_unpackmd_common(struct lov_obd *lov, continue; } - tgt_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]); - stripe_maxbytes = min_t(loff_t, stripe_maxbytes, tgt_bytes); + lov_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]); + if (min_stripe_maxbytes == 0 || lov_bytes < min_stripe_maxbytes) + min_stripe_maxbytes = lov_bytes; } - if (stripe_maxbytes == LLONG_MAX) - stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; + if (min_stripe_maxbytes == 0) + min_stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES; + + stripe_count = lsm->lsm_stripe_count ?: lov->desc.ld_tgt_count; + lov_bytes = min_stripe_maxbytes * stripe_count; - if (!lsm->lsm_stripe_count) - lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count; + if (lov_bytes < min_stripe_maxbytes) /* handle overflow */ + lsm->lsm_maxbytes = MAX_LFS_FILESIZE; else - lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count; + lsm->lsm_maxbytes = lov_bytes; return 0; } diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h index 774499c74daa..9905df2343a6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_internal.h @@ -161,42 +161,21 @@ struct lov_request { struct list_head rq_link; int rq_idx; /* index in lov->tgts array */ - int rq_stripe; /* stripe number */ - int rq_complete; - int rq_rc; - - u32 rq_oabufs; - u32 rq_pgaidx; }; struct lov_request_set { struct obd_info *set_oi; - atomic_t set_refcount; - struct obd_export *set_exp; - /* XXX: There is @set_exp already, however obd_statfs gets obd_device - * only. - */ struct obd_device *set_obd; int set_count; atomic_t set_completes; atomic_t set_success; - atomic_t set_finish_checked; struct list_head set_list; - wait_queue_head_t set_waitq; }; extern struct kmem_cache *lov_oinfo_slab; extern struct lu_kmem_descr lov_caches[]; -void lov_finish_set(struct lov_request_set *set); - -static inline void lov_put_reqset(struct lov_request_set *set) -{ - if (atomic_dec_and_test(&set->set_refcount)) - lov_finish_set(set); -} - #define lov_uuid2str(lv, index) \ (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid) @@ -217,15 +196,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index, int stripe); /* lov_request.c */ -int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, - struct lov_request_set **reqset); -int lov_fini_getattr_set(struct lov_request_set *set); int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, struct lov_request_set **reqset); -int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, - int success); int lov_fini_statfs_set(struct lov_request_set *set); -int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc); /* lov_obd.c */ void lov_stripe_lock(struct lov_stripe_md *md); diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c index babf39adef85..9e3b150967b4 100644 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ b/drivers/staging/lustre/lustre/lov/lov_io.c @@ -43,24 +43,12 @@ * @{ */ -static inline void lov_sub_enter(struct lov_io_sub *sub) -{ - sub->sub_reenter++; -} - -static inline void lov_sub_exit(struct lov_io_sub *sub) -{ - sub->sub_reenter--; -} - static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, struct lov_io_sub *sub) { if (sub->sub_io) { if (sub->sub_io_initialized) { - lov_sub_enter(sub); cl_io_fini(sub->sub_env, sub->sub_io); - lov_sub_exit(sub); sub->sub_io_initialized = 0; lio->lis_active_subios--; } @@ -142,13 +130,11 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, struct lov_io_sub *sub) { struct lov_object *lov = lio->lis_object; - struct lov_device *ld = lu2lov_dev(lov2cl(lov)->co_lu.lo_dev); struct cl_io *sub_io; struct cl_object *sub_obj; struct cl_io *io = lio->lis_cl.cis_io; - int stripe = sub->sub_stripe; - int result; + int rc; LASSERT(!sub->sub_io); LASSERT(!sub->sub_env); @@ -157,63 +143,53 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, if (unlikely(!lov_r0(lov)->lo_sub[stripe])) return -EIO; - result = 0; sub->sub_io_initialized = 0; sub->sub_borrowed = 0; - if (lio->lis_mem_frozen) { - LASSERT(mutex_is_locked(&ld->ld_mutex)); - sub->sub_io = &ld->ld_emrg[stripe]->emrg_subio; - sub->sub_env = ld->ld_emrg[stripe]->emrg_env; - sub->sub_borrowed = 1; - } else { - sub->sub_env = cl_env_get(&sub->sub_refcheck); - if (IS_ERR(sub->sub_env)) - result = PTR_ERR(sub->sub_env); + /* obtain new environment */ + sub->sub_env = cl_env_get(&sub->sub_refcheck); + if (IS_ERR(sub->sub_env)) { + rc = PTR_ERR(sub->sub_env); + goto fini_lov_io; + } - if (result == 0) { - /* - * First sub-io. Use ->lis_single_subio to - * avoid dynamic allocation. - */ - if (lio->lis_active_subios == 0) { - sub->sub_io = &lio->lis_single_subio; - lio->lis_single_subio_index = stripe; - } else { - sub->sub_io = kzalloc(sizeof(*sub->sub_io), - GFP_NOFS); - if (!sub->sub_io) - result = -ENOMEM; - } + /* + * First sub-io. Use ->lis_single_subio to + * avoid dynamic allocation. + */ + if (lio->lis_active_subios == 0) { + sub->sub_io = &lio->lis_single_subio; + lio->lis_single_subio_index = stripe; + } else { + sub->sub_io = kzalloc(sizeof(*sub->sub_io), + GFP_NOFS); + if (!sub->sub_io) { + rc = -ENOMEM; + goto fini_lov_io; } } - if (result == 0) { - sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]); - sub_io = sub->sub_io; - - sub_io->ci_obj = sub_obj; - sub_io->ci_result = 0; - - sub_io->ci_parent = io; - sub_io->ci_lockreq = io->ci_lockreq; - sub_io->ci_type = io->ci_type; - sub_io->ci_no_srvlock = io->ci_no_srvlock; - sub_io->ci_noatime = io->ci_noatime; - - lov_sub_enter(sub); - result = cl_io_sub_init(sub->sub_env, sub_io, - io->ci_type, sub_obj); - lov_sub_exit(sub); - if (result >= 0) { - lio->lis_active_subios++; - sub->sub_io_initialized = 1; - result = 0; - } + sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]); + sub_io = sub->sub_io; + + sub_io->ci_obj = sub_obj; + sub_io->ci_result = 0; + sub_io->ci_parent = io; + sub_io->ci_lockreq = io->ci_lockreq; + sub_io->ci_type = io->ci_type; + sub_io->ci_no_srvlock = io->ci_no_srvlock; + sub_io->ci_noatime = io->ci_noatime; + + rc = cl_io_sub_init(sub->sub_env, sub_io, io->ci_type, sub_obj); + if (rc >= 0) { + lio->lis_active_subios++; + sub->sub_io_initialized = 1; + rc = 0; } - if (result != 0) +fini_lov_io: + if (rc) lov_io_sub_fini(env, lio, sub); - return result; + return rc; } struct lov_io_sub *lov_sub_get(const struct lu_env *env, @@ -230,16 +206,10 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env, } else { rc = 0; } - if (rc == 0) - lov_sub_enter(sub); - else + if (rc < 0) sub = ERR_PTR(rc); - return sub; -} -void lov_sub_put(struct lov_io_sub *sub) -{ - lov_sub_exit(sub); + return sub; } /***************************************************************************** @@ -258,22 +228,6 @@ int lov_page_stripe(const struct cl_page *page) return cl2lov_page(slice)->lps_stripe; } -struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, - const struct cl_page_slice *slice) -{ - struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; - struct cl_page *page = slice->cpl_page; - int stripe; - - LASSERT(lio->lis_cl.cis_io); - LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object); - LASSERT(lsm); - LASSERT(lio->lis_nr_subios > 0); - - stripe = lov_page_stripe(page); - return lov_sub_get(env, lio, stripe); -} - static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, struct cl_io *io) { @@ -431,12 +385,10 @@ static int lov_io_iter_init(const struct lu_env *env, lov_io_sub_inherit(sub->sub_io, lio, stripe, start, end); rc = cl_io_iter_init(sub->sub_env, sub->sub_io); - if (rc) + if (rc) { cl_io_iter_fini(sub->sub_env, sub->sub_io); - lov_sub_put(sub); - if (rc) break; - + } CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n", stripe, start, end); @@ -488,9 +440,7 @@ static int lov_io_call(const struct lu_env *env, struct lov_io *lio, int rc = 0; list_for_each_entry(sub, &lio->lis_active, sub_linkage) { - lov_sub_enter(sub); rc = iofunc(sub->sub_env, sub->sub_io); - lov_sub_exit(sub); if (rc) break; @@ -610,7 +560,6 @@ static int lov_io_read_ahead(const struct lu_env *env, rc = cl_io_read_ahead(sub->sub_env, sub->sub_io, cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff), ra); - lov_sub_put(sub); CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n", PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc); @@ -679,7 +628,6 @@ static int lov_io_submit(const struct lu_env *env, LASSERT(sub->sub_io == &lio->lis_single_subio); rc = cl_io_submit_rw(sub->sub_env, sub->sub_io, crt, queue); - lov_sub_put(sub); return rc; } @@ -707,7 +655,6 @@ static int lov_io_submit(const struct lu_env *env, if (!IS_ERR(sub)) { rc = cl_io_submit_rw(sub->sub_env, sub->sub_io, crt, cl2q); - lov_sub_put(sub); } else { rc = PTR_ERR(sub); } @@ -746,7 +693,6 @@ static int lov_io_commit_async(const struct lu_env *env, LASSERT(sub->sub_io == &lio->lis_single_subio); rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue, from, to, cb); - lov_sub_put(sub); return rc; } @@ -777,7 +723,6 @@ static int lov_io_commit_async(const struct lu_env *env, if (!IS_ERR(sub)) { rc = cl_io_commit_async(sub->sub_env, sub->sub_io, plist, from, stripe_to, cb); - lov_sub_put(sub); } else { rc = PTR_ERR(sub); break; @@ -813,7 +758,6 @@ static int lov_io_fault_start(const struct lu_env *env, if (IS_ERR(sub)) return PTR_ERR(sub); sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob; - lov_sub_put(sub); return lov_io_start(env, ios); } @@ -828,9 +772,7 @@ static void lov_io_fsync_end(const struct lu_env *env, list_for_each_entry(sub, &lio->lis_active, sub_linkage) { struct cl_io *subio = sub->sub_io; - lov_sub_enter(sub); lov_io_end_wrapper(sub->sub_env, subio); - lov_sub_exit(sub); if (subio->ci_result == 0) *written += subio->u.ci_fsync.fi_nr_written; @@ -939,12 +881,6 @@ static const struct cl_io_operations lov_empty_io_ops = { .op = { [CIT_READ] = { .cio_fini = lov_empty_io_fini, -#if 0 - .cio_iter_init = LOV_EMPTY_IMPOSSIBLE, - .cio_lock = LOV_EMPTY_IMPOSSIBLE, - .cio_start = LOV_EMPTY_IMPOSSIBLE, - .cio_end = LOV_EMPTY_IMPOSSIBLE -#endif }, [CIT_WRITE] = { .cio_fini = lov_empty_io_fini, @@ -1047,6 +983,8 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, switch (io->ci_type) { default: LASSERTF(0, "invalid type %d\n", io->ci_type); + result = -EOPNOTSUPP; + break; case CIT_MISC: case CIT_FSYNC: case CIT_DATA_VERSION: diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c index 8502128e8248..7d0d3ea5a92a 100644 --- a/drivers/staging/lustre/lustre/lov/lov_lock.c +++ b/drivers/staging/lustre/lustre/lov/lov_lock.c @@ -71,13 +71,11 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env, if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) { subenv->lse_env = env; subenv->lse_io = io; - subenv->lse_sub = NULL; } else { sub = lov_sub_get(env, lio, lls->sub_stripe); if (!IS_ERR(sub)) { subenv->lse_env = sub->sub_env; subenv->lse_io = sub->sub_io; - subenv->lse_sub = sub; } else { subenv = (void *)sub; } @@ -85,12 +83,6 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env, return subenv; } -static void lov_sublock_env_put(struct lov_sublock_env *subenv) -{ - if (subenv && subenv->lse_sub) - lov_sub_put(subenv->lse_sub); -} - static int lov_sublock_init(const struct lu_env *env, const struct cl_lock *parent, struct lov_lock_sub *lls) @@ -102,7 +94,6 @@ static int lov_sublock_init(const struct lu_env *env, if (!IS_ERR(subenv)) { result = cl_lock_init(subenv->lse_env, &lls->sub_lock, subenv->lse_io); - lov_sublock_env_put(subenv); } else { /* error occurs. */ result = PTR_ERR(subenv); @@ -244,7 +235,6 @@ static int lov_lock_enqueue(const struct lu_env *env, } rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io, &lls->sub_lock, anchor); - lov_sublock_env_put(subenv); if (rc != 0) break; @@ -272,7 +262,6 @@ static void lov_lock_cancel(const struct lu_env *env, subenv = lov_sublock_env_get(env, lock, lls); if (!IS_ERR(subenv)) { cl_lock_cancel(subenv->lse_env, sublock); - lov_sublock_env_put(subenv); } else { CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock, "lov_lock_cancel fails with %ld.\n", diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c index 25f15da6e189..f27b11fdee3f 100644 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ b/drivers/staging/lustre/lustre/lov/lov_obd.c @@ -947,7 +947,8 @@ out: return rc; } -int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc) +static int +lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc) { struct lov_request_set *lovset = (struct lov_request_set *)data; int err; diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index 14f38268d414..8fc0bcc11aa2 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c @@ -638,7 +638,7 @@ static const struct lov_layout_operations lov_dispatch[] = { enum lov_layout_type __llt; \ \ __llt = __obj->lo_type; \ - LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \ + LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \ lov_dispatch[__llt].op(__VA_ARGS__); \ }) @@ -697,7 +697,7 @@ do { \ \ lov_conf_freeze(__obj); \ __llt = __obj->lo_type; \ - LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \ + LASSERT(__llt < ARRAY_SIZE(lov_dispatch)); \ lov_dispatch[__llt].op(__VA_ARGS__); \ lov_conf_thaw(__obj); \ } while (0) @@ -748,13 +748,13 @@ static int lov_layout_change(const struct lu_env *unused, u16 refcheck; int rc; - LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); + LASSERT(lov->lo_type < ARRAY_SIZE(lov_dispatch)); env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); - LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); + LASSERT(llt < ARRAY_SIZE(lov_dispatch)); CDEBUG(D_INODE, DFID " from %s to %s\n", PFID(lu_object_fid(lov2lu(lov))), @@ -1003,12 +1003,12 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj, * \retval last_stripe return the last stripe of the mapping */ static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, - loff_t fm_start, loff_t fm_end, + u64 fm_start, u64 fm_end, int start_stripe, int *stripe_count) { int last_stripe; - loff_t obd_start; - loff_t obd_end; + u64 obd_start; + u64 obd_end; int i, j; if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) { @@ -1076,14 +1076,14 @@ static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap, * \param fm_end [in] logical end of mapping * \param start_stripe [out] starting stripe will be returned in this */ -static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap, - struct lov_stripe_md *lsm, - loff_t fm_start, loff_t fm_end, - int *start_stripe) +static u64 fiemap_calc_fm_end_offset(struct fiemap *fiemap, + struct lov_stripe_md *lsm, + u64 fm_start, u64 fm_end, + int *start_stripe) { - loff_t local_end = fiemap->fm_extents[0].fe_logical; - loff_t lun_start, lun_end; - loff_t fm_end_offset; + u64 local_end = fiemap->fm_extents[0].fe_logical; + u64 lun_start, lun_end; + u64 fm_end_offset; int stripe_no = -1; int i; @@ -1126,6 +1126,190 @@ static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap, return fm_end_offset; } +struct fiemap_state { + struct fiemap *fs_fm; + u64 fs_start; + u64 fs_length; + u64 fs_end; + u64 fs_end_offset; + int fs_cur_extent; + int fs_cnt_need; + int fs_start_stripe; + int fs_last_stripe; + bool fs_device_done; + bool fs_finish; + bool fs_enough; +}; + +int fiemap_for_stripe(const struct lu_env *env, struct cl_object *obj, + struct lov_stripe_md *lsm, + struct fiemap *fiemap, size_t *buflen, + struct ll_fiemap_info_key *fmkey, int stripeno, + struct fiemap_state *fs) +{ + struct cl_object *subobj; + struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov; + struct fiemap_extent *fm_ext = &fs->fs_fm->fm_extents[0]; + u64 req_fm_len; /* Stores length of required mapping */ + u64 len_mapped_single_call; + u64 lun_start; + u64 lun_end; + u64 obd_object_end; + unsigned int ext_count; + /* EOF for object */ + bool ost_eof = false; + /* done with required mapping for this OST? */ + bool ost_done = false; + int ost_index; + int rc = 0; + + fs->fs_device_done = false; + /* Find out range of mapping on this stripe */ + if ((lov_stripe_intersects(lsm, stripeno, fs->fs_start, fs->fs_end, + &lun_start, &obd_object_end)) == 0) + return 0; + + if (lov_oinfo_is_dummy(lsm->lsm_oinfo[stripeno])) + return -EIO; + + /* If this is a continuation FIEMAP call and we are on + * starting stripe then lun_start needs to be set to + * end_offset */ + if (fs->fs_end_offset != 0 && stripeno == fs->fs_start_stripe) + lun_start = fs->fs_end_offset; + + lun_end = fs->fs_length; + if (lun_end != ~0ULL) { + /* Handle fs->fs_start + fs->fs_length overflow */ + if (fs->fs_start + fs->fs_length < fs->fs_start) + fs->fs_length = ~0ULL - fs->fs_start; + lun_end = lov_size_to_stripe(lsm, fs->fs_start + fs->fs_length, + stripeno); + } + + if (lun_start == lun_end) + return 0; + + req_fm_len = obd_object_end - lun_start; + fs->fs_fm->fm_length = 0; + len_mapped_single_call = 0; + + /* find lobsub object */ + subobj = lov_find_subobj(env, cl2lov(obj), lsm, stripeno); + if (IS_ERR(subobj)) + return PTR_ERR(subobj); + /* If the output buffer is very large and the objects have many + * extents we may need to loop on a single OST repeatedly */ + do { + if (fiemap->fm_extent_count > 0) { + /* Don't get too many extents. */ + if (fs->fs_cur_extent + fs->fs_cnt_need > + fiemap->fm_extent_count) + fs->fs_cnt_need = fiemap->fm_extent_count - + fs->fs_cur_extent; + } + + lun_start += len_mapped_single_call; + fs->fs_fm->fm_length = req_fm_len - len_mapped_single_call; + req_fm_len = fs->fs_fm->fm_length; + fs->fs_fm->fm_extent_count = fs->fs_enough ? + 1 : fs->fs_cnt_need; + fs->fs_fm->fm_mapped_extents = 0; + fs->fs_fm->fm_flags = fiemap->fm_flags; + + ost_index = lsm->lsm_oinfo[stripeno]->loi_ost_idx; + + if (ost_index < 0 || ost_index >= lov->desc.ld_tgt_count) { + rc = -EINVAL; + goto obj_put; + } + /* If OST is inactive, return extent with UNKNOWN flag. */ + if (!lov->lov_tgts[ost_index]->ltd_active) { + fs->fs_fm->fm_flags |= FIEMAP_EXTENT_LAST; + fs->fs_fm->fm_mapped_extents = 1; + + fm_ext[0].fe_logical = lun_start; + fm_ext[0].fe_length = obd_object_end - lun_start; + fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN; + + goto inactive_tgt; + } + + fs->fs_fm->fm_start = lun_start; + fs->fs_fm->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER; + memcpy(&fmkey->lfik_fiemap, fs->fs_fm, sizeof(*fs->fs_fm)); + *buflen = fiemap_count_to_size(fs->fs_fm->fm_extent_count); + + rc = cl_object_fiemap(env, subobj, fmkey, fs->fs_fm, buflen); + if (rc) + goto obj_put; +inactive_tgt: + ext_count = fs->fs_fm->fm_mapped_extents; + if (ext_count == 0) { + ost_done = true; + fs->fs_device_done = true; + /* If last stripe has hold at the end, + * we need to return */ + if (stripeno == fs->fs_last_stripe) { + fiemap->fm_mapped_extents = 0; + fs->fs_finish = true; + goto obj_put; + } + break; + } else if (fs->fs_enough) { + /* + * We've collected enough extents and there are + * more extents after it. + */ + fs->fs_finish = true; + goto obj_put; + } + + /* If we just need num of extents, got to next device */ + if (fiemap->fm_extent_count == 0) { + fs->fs_cur_extent += ext_count; + break; + } + + /* prepare to copy retrived map extents */ + len_mapped_single_call = fm_ext[ext_count - 1].fe_logical + + fm_ext[ext_count - 1].fe_length - + lun_start; + + /* Have we finished mapping on this device? */ + if (req_fm_len <= len_mapped_single_call) { + ost_done = true; + fs->fs_device_done = true; + } + + /* Clear the EXTENT_LAST flag which can be present on + * the last extent */ + if (fm_ext[ext_count - 1].fe_flags & FIEMAP_EXTENT_LAST) + fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST; + if (lov_stripe_size(lsm, fm_ext[ext_count - 1].fe_logical + + fm_ext[ext_count - 1].fe_length, + stripeno) >= fmkey->lfik_oa.o_size) { + ost_eof = true; + fs->fs_device_done = true; + } + + fiemap_prepare_and_copy_exts(fiemap, fm_ext, ost_index, + ext_count, fs->fs_cur_extent); + fs->fs_cur_extent += ext_count; + + /* Ran out of available extents? */ + if (fs->fs_cur_extent >= fiemap->fm_extent_count) + fs->fs_enough = true; + } while (!ost_done && !ost_eof); + + if (stripeno == fs->fs_last_stripe) + fs->fs_finish = true; +obj_put: + cl_object_put(env, subobj); + + return rc; +} + /** * Break down the FIEMAP request and send appropriate calls to individual OSTs. * This also handles the restarting of FIEMAP calls in case mapping overflows @@ -1144,31 +1328,13 @@ static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj, struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap, size_t *buflen) { - struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov; unsigned int buffer_size = FIEMAP_BUFFER_SIZE; - struct fiemap_extent *lcl_fm_ext; - struct cl_object *subobj = NULL; struct fiemap *fm_local = NULL; struct lov_stripe_md *lsm; - loff_t fm_start; - loff_t fm_end; - loff_t fm_length; - loff_t fm_end_offset; - int count_local; - int ost_index = 0; - int start_stripe; - int current_extent = 0; int rc = 0; - int last_stripe; - int cur_stripe = 0; - int cur_stripe_wrap = 0; + int cur_stripe; int stripe_count; - /* Whether have we collected enough extents */ - bool enough = false; - /* EOF for object */ - bool ost_eof = false; - /* done with required mapping for this OST? */ - bool ost_done = false; + struct fiemap_state fs = { 0 }; lsm = lov_lsm_addref(cl2lov(obj)); if (!lsm) @@ -1215,28 +1381,37 @@ static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj, rc = -ENOMEM; goto out; } - lcl_fm_ext = &fm_local->fm_extents[0]; - count_local = fiemap_size_to_count(buffer_size); + fs.fs_fm = fm_local; + fs.fs_cnt_need = fiemap_size_to_count(buffer_size); - fm_start = fiemap->fm_start; - fm_length = fiemap->fm_length; + fs.fs_start = fiemap->fm_start; + /* fs_start is beyond the end of the file */ + if (fs.fs_start > fmkey->lfik_oa.o_size) { + rc = -EINVAL; + goto out; + } /* Calculate start stripe, last stripe and length of mapping */ - start_stripe = lov_stripe_number(lsm, fm_start); - fm_end = (fm_length == ~0ULL) ? fmkey->lfik_oa.o_size : - fm_start + fm_length - 1; - /* If fm_length != ~0ULL but fm_start_fm_length-1 exceeds file size */ - if (fm_end > fmkey->lfik_oa.o_size) - fm_end = fmkey->lfik_oa.o_size; - - last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end, - start_stripe, &stripe_count); - fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end, - &start_stripe); - if (fm_end_offset == -EINVAL) { + fs.fs_start_stripe = lov_stripe_number(lsm, fs.fs_start); + fs.fs_end = (fs.fs_length == ~0ULL) ? fmkey->lfik_oa.o_size : + fs.fs_start + fs.fs_length - 1; + /* If fs_length != ~0ULL but fs_start+fs_length-1 exceeds file size */ + if (fs.fs_end > fmkey->lfik_oa.o_size) { + fs.fs_end = fmkey->lfik_oa.o_size; + fs.fs_length = fs.fs_end - fs.fs_start; + } + + fs.fs_last_stripe = fiemap_calc_last_stripe(lsm, fs.fs_start, fs.fs_end, + fs.fs_start_stripe, + &stripe_count); + fs.fs_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fs.fs_start, + fs.fs_end, + &fs.fs_start_stripe); + if (fs.fs_end_offset == -EINVAL) { rc = -EINVAL; goto out; } + /** * Requested extent count exceeds the fiemap buffer size, shrink our * ambition. @@ -1244,186 +1419,23 @@ static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj, if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen) fiemap->fm_extent_count = fiemap_size_to_count(*buflen); if (!fiemap->fm_extent_count) - count_local = 0; + fs.fs_cnt_need = 0; + + fs.fs_finish = false; + fs.fs_enough = false; + fs.fs_cur_extent = 0; /* Check each stripe */ - for (cur_stripe = start_stripe; stripe_count > 0; + for (cur_stripe = fs.fs_start_stripe; stripe_count > 0; --stripe_count, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) { - loff_t req_fm_len; /* Stores length of required mapping */ - loff_t len_mapped_single_call; - loff_t lun_start; - loff_t lun_end; - loff_t obd_object_end; - unsigned int ext_count; - - cur_stripe_wrap = cur_stripe; - - /* Find out range of mapping on this stripe */ - if (!(lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end, - &lun_start, &obd_object_end))) - continue; - - if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) { - rc = -EIO; - goto out; - } - - /* - * If this is a continuation FIEMAP call and we are on - * starting stripe then lun_start needs to be set to - * fm_end_offset - */ - if (fm_end_offset && cur_stripe == start_stripe) - lun_start = fm_end_offset; - - if (fm_length != ~0ULL) { - /* Handle fm_start + fm_length overflow */ - if (fm_start + fm_length < fm_start) - fm_length = ~0ULL - fm_start; - lun_end = lov_size_to_stripe(lsm, fm_start + fm_length, - cur_stripe); - } else { - lun_end = ~0ULL; - } - - if (lun_start == lun_end) - continue; - - req_fm_len = obd_object_end - lun_start; - fm_local->fm_length = 0; - len_mapped_single_call = 0; - - /* find lobsub object */ - subobj = lov_find_subobj(env, cl2lov(obj), lsm, - cur_stripe); - if (IS_ERR(subobj)) { - rc = PTR_ERR(subobj); + rc = fiemap_for_stripe(env, obj, lsm, fiemap, buflen, fmkey, + cur_stripe, &fs); + if (rc < 0) goto out; - } - /* - * If the output buffer is very large and the objects have many - * extents we may need to loop on a single OST repeatedly - */ - ost_eof = false; - ost_done = false; - do { - if (fiemap->fm_extent_count > 0) { - /* Don't get too many extents. */ - if (current_extent + count_local > - fiemap->fm_extent_count) - count_local = fiemap->fm_extent_count - - current_extent; - } - - lun_start += len_mapped_single_call; - fm_local->fm_length = req_fm_len - - len_mapped_single_call; - req_fm_len = fm_local->fm_length; - fm_local->fm_extent_count = enough ? 1 : count_local; - fm_local->fm_mapped_extents = 0; - fm_local->fm_flags = fiemap->fm_flags; - - ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx; - - if (ost_index < 0 || - ost_index >= lov->desc.ld_tgt_count) { - rc = -EINVAL; - goto obj_put; - } - /* - * If OST is inactive, return extent with UNKNOWN - * flag. - */ - if (!lov->lov_tgts[ost_index]->ltd_active) { - fm_local->fm_flags |= FIEMAP_EXTENT_LAST; - fm_local->fm_mapped_extents = 1; - - lcl_fm_ext[0].fe_logical = lun_start; - lcl_fm_ext[0].fe_length = obd_object_end - - lun_start; - lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN; - - goto inactive_tgt; - } - - fm_local->fm_start = lun_start; - fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER; - memcpy(&fmkey->lfik_fiemap, fm_local, sizeof(*fm_local)); - *buflen = fiemap_count_to_size(fm_local->fm_extent_count); - - rc = cl_object_fiemap(env, subobj, fmkey, fm_local, - buflen); - if (rc) - goto obj_put; -inactive_tgt: - ext_count = fm_local->fm_mapped_extents; - if (!ext_count) { - ost_done = true; - /* - * If last stripe has hold at the end, - * we need to return - */ - if (cur_stripe_wrap == last_stripe) { - fiemap->fm_mapped_extents = 0; - goto finish; - } - break; - } else if (enough) { - /* - * We've collected enough extents and there are - * more extents after it. - */ - goto finish; - } - - /* If we just need num of extents, got to next device */ - if (!fiemap->fm_extent_count) { - current_extent += ext_count; - break; - } - - /* prepare to copy retrived map extents */ - len_mapped_single_call = - lcl_fm_ext[ext_count - 1].fe_logical - - lun_start + lcl_fm_ext[ext_count - 1].fe_length; - - /* Have we finished mapping on this device? */ - if (req_fm_len <= len_mapped_single_call) - ost_done = true; - - /* - * Clear the EXTENT_LAST flag which can be present on - * the last extent - */ - if (lcl_fm_ext[ext_count - 1].fe_flags & - FIEMAP_EXTENT_LAST) - lcl_fm_ext[ext_count - 1].fe_flags &= - ~FIEMAP_EXTENT_LAST; - - if (lov_stripe_size(lsm, - lcl_fm_ext[ext_count - 1].fe_logical + - lcl_fm_ext[ext_count - 1].fe_length, - cur_stripe) >= fmkey->lfik_oa.o_size) - ost_eof = true; - - fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext, - ost_index, ext_count, - current_extent); - current_extent += ext_count; - - /* Ran out of available extents? */ - if (current_extent >= fiemap->fm_extent_count) - enough = true; - } while (!ost_done && !ost_eof); - - cl_object_put(env, subobj); - subobj = NULL; - - if (cur_stripe_wrap == last_stripe) - goto finish; + if (fs.fs_finish) + break; } /* for each stripe */ -finish: /* * Indicate that we are returning device offsets unless file just has * single stripe @@ -1438,14 +1450,11 @@ finish: * Check if we have reached the last stripe and whether mapping for that * stripe is done. */ - if ((cur_stripe_wrap == last_stripe) && (ost_done || ost_eof)) - fiemap->fm_extents[current_extent - 1].fe_flags |= + if ((cur_stripe == fs.fs_last_stripe) && fs.fs_device_done) + fiemap->fm_extents[fs.fs_cur_extent - 1].fe_flags |= FIEMAP_EXTENT_LAST; skip_last_device_calc: - fiemap->fm_mapped_extents = current_extent; -obj_put: - if (subobj) - cl_object_put(env, subobj); + fiemap->fm_mapped_extents = fs.fs_cur_extent; out: kvfree(fm_local); lov_lsm_put(lsm); diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c index 62ceb6dfdfdf..de43c609cf3d 100644 --- a/drivers/staging/lustre/lustre/lov/lov_page.c +++ b/drivers/staging/lustre/lustre/lov/lov_page.c @@ -100,7 +100,6 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, break; } } - lov_sub_put(sub); return rc; } diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c index 3a747913fb4f..70636e5bb3ed 100644 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ b/drivers/staging/lustre/lustre/lov/lov_request.c @@ -43,13 +43,10 @@ static void lov_init_set(struct lov_request_set *set) set->set_count = 0; atomic_set(&set->set_completes, 0); atomic_set(&set->set_success, 0); - atomic_set(&set->set_finish_checked, 0); INIT_LIST_HEAD(&set->set_list); - atomic_set(&set->set_refcount, 1); - init_waitqueue_head(&set->set_waitq); } -void lov_finish_set(struct lov_request_set *set) +static void lov_finish_set(struct lov_request_set *set) { struct list_head *pos, *n; @@ -66,32 +63,12 @@ void lov_finish_set(struct lov_request_set *set) kfree(set); } -static int lov_set_finished(struct lov_request_set *set, int idempotent) -{ - int completes = atomic_read(&set->set_completes); - - CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count); - - if (completes == set->set_count) { - if (idempotent) - return 1; - if (atomic_inc_return(&set->set_finish_checked) == 1) - return 1; - } - return 0; -} - static void lov_update_set(struct lov_request_set *set, struct lov_request *req, int rc) { - req->rq_complete = 1; - req->rq_rc = rc; - atomic_inc(&set->set_completes); if (rc == 0) atomic_inc(&set->set_success); - - wake_up(&set->set_waitq); } static void lov_set_add_req(struct lov_request *req, @@ -173,8 +150,8 @@ out: (tot) += (add); \ } while (0) -int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, - int success) +static int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs, + int success) { if (success) { __u32 expected_stripes = lov_get_stripecnt(&obd->u.lov, @@ -205,7 +182,9 @@ int lov_fini_statfs_set(struct lov_request_set *set) rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs, atomic_read(&set->set_success)); } - lov_put_reqset(set); + + lov_finish_set(set); + return rc; } @@ -307,14 +286,7 @@ static int cb_statfs_update(void *cookie, int rc) out_update: lov_update_statfs(osfs, lov_sfs, success); obd_putref(lovobd); - out: - if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD && - lov_set_finished(set, 0)) { - lov_statfs_interpret(NULL, set, set->set_count != - atomic_read(&set->set_success)); - } - return 0; } diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c index 5d6536f8a4f7..d4646a0949d2 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c @@ -77,7 +77,6 @@ static struct lu_device *lovsub_device_fini(const struct lu_env *env, lsd = lu2lovsub_dev(d); next = cl2lu_dev(lsd->acid_next); - lsd->acid_super = NULL; lsd->acid_next = NULL; return next; } diff --git a/drivers/staging/lustre/lustre/lov/lovsub_io.c b/drivers/staging/lustre/lustre/lov/lovsub_io.c deleted file mode 100644 index 6a9820218a3e..000000000000 --- a/drivers/staging/lustre/lustre/lov/lovsub_io.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.gnu.org/licenses/gpl-2.0.html - * - * GPL HEADER END - */ -/* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Implementation of cl_io for LOVSUB layer. - * - * Author: Nikita Danilov <nikita.danilov@sun.com> - */ - -#define DEBUG_SUBSYSTEM S_LOV - -#include "lov_cl_internal.h" - -/** \addtogroup lov - * @{ - */ - -/***************************************************************************** - * - * Lovsub io operations. - * - */ - -/* All trivial */ - -/** @} lov */ diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c index 38f9b735c241..d29f0bb33980 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c @@ -54,7 +54,6 @@ static void lovsub_lock_fini(const struct lu_env *env, struct lovsub_lock *lsl; lsl = cl2lovsub_lock(slice); - LASSERT(list_empty(&lsl->lss_parents)); kmem_cache_free(lovsub_lock_kmem, lsl); } @@ -70,7 +69,6 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS); if (lsk) { - INIT_LIST_HEAD(&lsk->lss_parents); cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); result = 0; } else { diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c index eb6d30d34e3a..ce4682120ffb 100644 --- a/drivers/staging/lustre/lustre/lov/lproc_lov.c +++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c @@ -279,7 +279,7 @@ static struct attribute *lov_attrs[] = { NULL, }; -static struct attribute_group lov_attr_group = { +static const struct attribute_group lov_attr_group = { .attrs = lov_attrs, }; diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c index 9021c465c044..51a7047ff209 100644 --- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c +++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c @@ -57,7 +57,7 @@ static ssize_t active_store(struct kobject *kobj, struct attribute *attr, if (rc) return rc; - if (val < 0 || val > 1) + if (val > 1) return -ERANGE; /* opposite senses */ @@ -219,7 +219,7 @@ static struct attribute *mdc_attrs[] = { NULL, }; -static struct attribute_group mdc_attr_group = { +static const struct attribute_group mdc_attr_group = { .attrs = mdc_attrs, }; diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c index 0b1d2f0a422c..cf3ad04ca0ab 100644 --- a/drivers/staging/lustre/lustre/obdclass/linkea.c +++ b/drivers/staging/lustre/lustre/obdclass/linkea.c @@ -39,6 +39,8 @@ int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf) ldata->ld_leh->leh_magic = LINK_EA_MAGIC; ldata->ld_leh->leh_len = sizeof(struct link_ea_header); ldata->ld_leh->leh_reccount = 0; + ldata->ld_leh->leh_overflow_time = 0; + ldata->ld_leh->leh_padding = 0; return 0; } EXPORT_SYMBOL(linkea_data_new); @@ -53,11 +55,15 @@ int linkea_init(struct linkea_data *ldata) leh->leh_magic = LINK_EA_MAGIC; leh->leh_reccount = __swab32(leh->leh_reccount); leh->leh_len = __swab64(leh->leh_len); - /* entries are swabbed by linkea_entry_unpack */ + leh->leh_overflow_time = __swab32(leh->leh_overflow_time); + leh->leh_padding = __swab32(leh->leh_padding); + /* individual entries are swabbed by linkea_entry_unpack() */ } + if (leh->leh_magic != LINK_EA_MAGIC) return -EINVAL; - if (leh->leh_reccount == 0) + + if (leh->leh_reccount == 0 && leh->leh_overflow_time == 0) return -ENODATA; ldata->ld_leh = leh; @@ -65,6 +71,18 @@ int linkea_init(struct linkea_data *ldata) } EXPORT_SYMBOL(linkea_init); +int linkea_init_with_rec(struct linkea_data *ldata) +{ + int rc; + + rc = linkea_init(ldata); + if (!rc && ldata->ld_leh->leh_reccount == 0) + rc = -ENODATA; + + return rc; +} +EXPORT_SYMBOL(linkea_init_with_rec); + /** * Pack a link_ea_entry. * All elements are stored as chars to avoid alignment issues. @@ -94,6 +112,8 @@ EXPORT_SYMBOL(linkea_entry_pack); void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen, struct lu_name *lname, struct lu_fid *pfid) { + LASSERT(lee); + *reclen = (lee->lee_reclen[0] << 8) | lee->lee_reclen[1]; memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid)); fid_be_to_cpu(pfid, pfid); @@ -110,25 +130,44 @@ EXPORT_SYMBOL(linkea_entry_unpack); int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname, const struct lu_fid *pfid) { - LASSERT(ldata->ld_leh); + struct link_ea_header *leh = ldata->ld_leh; + int reclen; + + LASSERT(leh); if (!lname || !pfid) return -EINVAL; - ldata->ld_reclen = lname->ln_namelen + sizeof(struct link_ea_entry); - if (ldata->ld_leh->leh_len + ldata->ld_reclen > - ldata->ld_buf->lb_len) { + reclen = lname->ln_namelen + sizeof(struct link_ea_entry); + if (unlikely(leh->leh_len + reclen > MAX_LINKEA_SIZE)) { + /* + * Use 32-bits to save the overflow time, although it will + * shrink the ktime_get_real_seconds() returned 64-bits value + * to 32-bits value, it is still quite large and can be used + * for about 140 years. That is enough. + */ + leh->leh_overflow_time = ktime_get_real_seconds(); + if (unlikely(leh->leh_overflow_time == 0)) + leh->leh_overflow_time++; + + CDEBUG(D_INODE, "No enough space to hold linkea entry '" DFID ": %.*s' at %u\n", + PFID(pfid), lname->ln_namelen, + lname->ln_name, leh->leh_overflow_time); + return 0; + } + + if (leh->leh_len + reclen > ldata->ld_buf->lb_len) { if (lu_buf_check_and_grow(ldata->ld_buf, - ldata->ld_leh->leh_len + - ldata->ld_reclen) < 0) + leh->leh_len + reclen) < 0) return -ENOMEM; + + leh = ldata->ld_leh = ldata->ld_buf->lb_buf; } - ldata->ld_leh = ldata->ld_buf->lb_buf; - ldata->ld_lee = ldata->ld_buf->lb_buf + ldata->ld_leh->leh_len; + ldata->ld_lee = ldata->ld_buf->lb_buf + leh->leh_len; ldata->ld_reclen = linkea_entry_pack(ldata->ld_lee, lname, pfid); - ldata->ld_leh->leh_len += ldata->ld_reclen; - ldata->ld_leh->leh_reccount++; + leh->leh_len += ldata->ld_reclen; + leh->leh_reccount++; CDEBUG(D_INODE, "New link_ea name '" DFID ":%.*s' is added\n", PFID(pfid), lname->ln_namelen, lname->ln_name); return 0; @@ -139,6 +178,7 @@ EXPORT_SYMBOL(linkea_add_buf); void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname) { LASSERT(ldata->ld_leh && ldata->ld_lee); + LASSERT(ldata->ld_leh->leh_reccount > 0); ldata->ld_leh->leh_reccount--; ldata->ld_leh->leh_len -= ldata->ld_reclen; @@ -174,8 +214,9 @@ int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname, LASSERT(ldata->ld_leh); - /* link #0 */ - ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1); + /* link #0, if leh_reccount == 0 we skip the loop and return -ENOENT */ + if (likely(ldata->ld_leh->leh_reccount > 0)) + ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1); for (count = 0; count < ldata->ld_leh->leh_reccount; count++) { linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen, diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c index 9f5e8299d7e4..eb88bd9c9b5e 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c @@ -405,7 +405,7 @@ static const struct file_operations obd_device_list_fops = { struct kobject *lustre_kobj; EXPORT_SYMBOL_GPL(lustre_kobj); -static struct attribute_group lustre_attr_group = { +static const struct attribute_group lustre_attr_group = { .attrs = lustre_attrs, }; diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c index e6c785afceba..814334b2c297 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c @@ -151,7 +151,7 @@ static struct attribute *lustre_attrs[] = { NULL, }; -static struct attribute_group lustre_attr_group = { +static const struct attribute_group lustre_attr_group = { .attrs = lustre_attrs, }; diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c index bc19f19d38d9..ba41983e85a9 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c @@ -1031,7 +1031,7 @@ static struct kobj_type obd_ktype = { }; int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, - struct attribute_group *attrs) + const struct attribute_group *attrs) { int rc = 0; diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index 86f252d6adbd..6e0fd155e6b8 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -831,7 +831,7 @@ static struct attribute *osc_attrs[] = { NULL, }; -static struct attribute_group osc_attr_group = { +static const struct attribute_group osc_attr_group = { .attrs = osc_attrs, }; diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index d8a95f8fe1ff..e1207c227b79 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -783,6 +783,7 @@ restart: /* pull ext's start back to cover cur */ ext->oe_start = cur->oe_start; ext->oe_grants += chunksize; + LASSERT(*grants >= chunksize); *grants -= chunksize; found = osc_extent_hold(ext); @@ -790,6 +791,7 @@ restart: /* rear merge */ ext->oe_end = cur->oe_end; ext->oe_grants += chunksize; + LASSERT(*grants >= chunksize); *grants -= chunksize; /* try to merge with the next one because we just fill @@ -819,8 +821,8 @@ restart: /* create a new extent */ EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); cur->oe_grants = chunksize + cli->cl_extent_tax; + LASSERT(*grants >= cur->oe_grants); *grants -= cur->oe_grants; - LASSERT(*grants >= 0); cur->oe_state = OES_CACHE; found = osc_extent_hold(cur); @@ -849,7 +851,6 @@ restart: out: osc_extent_put(env, cur); - LASSERT(*grants >= 0); return found; } @@ -1219,8 +1220,8 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, ext->oe_end = end_index; ext->oe_grants += chunksize; + LASSERT(*grants >= chunksize); *grants -= chunksize; - LASSERT(*grants >= 0); EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext, "overlapped after expanding for %lu.\n", index); @@ -1887,6 +1888,7 @@ struct extent_rpc_data { unsigned int erd_page_count; unsigned int erd_max_pages; unsigned int erd_max_chunks; + unsigned int erd_max_extents; }; static inline unsigned int osc_extent_chunks(const struct osc_extent *ext) @@ -1915,11 +1917,23 @@ static int try_to_add_extent_for_io(struct client_obd *cli, EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE), ext); + if (!data->erd_max_extents) + return 0; + chunk_count = osc_extent_chunks(ext); + EASSERTF(data->erd_page_count != 0 || + chunk_count <= data->erd_max_chunks, ext, + "The first extent to be fit in a RPC contains %u chunks, which is over the limit %u.\n", + chunk_count, data->erd_max_chunks); + if (chunk_count > data->erd_max_chunks) return 0; data->erd_max_pages = max(ext->oe_mppr, data->erd_max_pages); + EASSERTF(data->erd_page_count != 0 || + ext->oe_nr_pages <= data->erd_max_pages, ext, + "The first extent to be fit in a RPC contains %u pages, which is over the limit %u.\n", + ext->oe_nr_pages, data->erd_max_pages); if (data->erd_page_count + ext->oe_nr_pages > data->erd_max_pages) return 0; @@ -1943,6 +1957,7 @@ static int try_to_add_extent_for_io(struct client_obd *cli, break; } + data->erd_max_extents--; data->erd_max_chunks -= chunk_count; data->erd_page_count += ext->oe_nr_pages; list_move_tail(&ext->oe_link, data->erd_rpc_list); @@ -1972,10 +1987,12 @@ static inline unsigned int osc_max_write_chunks(const struct client_obd *cli) * * This limitation doesn't apply to ldiskfs, which allows as many * chunks in one RPC as we want. However, it won't have any benefits - * to have too many discontiguous pages in one RPC. Therefore, it - * can only have 256 chunks at most in one RPC. + * to have too many discontiguous pages in one RPC. + * + * An osc_extent won't cover over a RPC size, so the chunks in an + * osc_extent won't bigger than PTLRPC_MAX_BRW_SIZE >> chunkbits. */ - return min(PTLRPC_MAX_BRW_SIZE >> cli->cl_chunkbits, 256); + return PTLRPC_MAX_BRW_SIZE >> cli->cl_chunkbits; } /** @@ -2002,6 +2019,7 @@ static unsigned int get_write_extents(struct osc_object *obj, .erd_page_count = 0, .erd_max_pages = cli->cl_max_pages_per_rpc, .erd_max_chunks = osc_max_write_chunks(cli), + .erd_max_extents = 256, }; LASSERT(osc_object_is_locked(obj)); @@ -2140,6 +2158,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, .erd_page_count = 0, .erd_max_pages = cli->cl_max_pages_per_rpc, .erd_max_chunks = UINT_MAX, + .erd_max_extents = UINT_MAX, }; int rc = 0; diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index 1c7779215eed..977d7a6bb99b 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -367,9 +367,8 @@ void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, */ CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ? D_ADAPTTO : D_WARNING, - "Reported service time %u > total measured time " CFS_DURATION_T "\n", - service_time, - (long)(now - req->rq_sent)); + "Reported service time %u > total measured time %lld\n", + service_time, now - req->rq_sent); return; } @@ -742,7 +741,7 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, /* Let's setup deadline for req/reply/bulk unlink for opcode. */ if (cfs_fail_val == opcode) { - time_t *fail_t = NULL, *fail2_t = NULL; + time64_t *fail_t = NULL, *fail2_t = NULL; if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) { fail_t = &request->rq_bulk_deadline; @@ -3116,13 +3115,20 @@ void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) LASSERT(bd); - if (!req->rq_resend) { - /* this request has a new xid, just use it as bulk matchbits */ - req->rq_mbits = req->rq_xid; - - } else { /* needs to generate a new matchbits for resend */ + /* + * Generate new matchbits for all resend requests, including + * resend replay. + */ + if (req->rq_resend) { u64 old_mbits = req->rq_mbits; + /* + * First time resend on -EINPROGRESS will generate new xid, + * so we can actually use the rq_xid as rq_mbits in such case, + * however, it's bit hard to distinguish such resend with a + * 'resend for the -EINPROGRESS resend'. To make it simple, + * we opt to generate mbits for all resend cases. + */ if ((bd->bd_import->imp_connect_data.ocd_connect_flags & OBD_CONNECT_BULK_MBITS)) { req->rq_mbits = ptlrpc_next_xid(); @@ -3131,12 +3137,21 @@ void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req) spin_lock(&req->rq_import->imp_lock); list_del_init(&req->rq_unreplied_list); ptlrpc_assign_next_xid_nolock(req); - req->rq_mbits = req->rq_xid; spin_unlock(&req->rq_import->imp_lock); + req->rq_mbits = req->rq_xid; } CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n", old_mbits, req->rq_mbits); + } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) { + /* Request being sent first time, use xid as matchbits. */ + req->rq_mbits = req->rq_xid; + } else { + /* + * Replay request, xid and matchbits have already been + * correctly assigned. + */ + return; } /* diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c index 52cb1f0c9c94..b19dac15e901 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ b/drivers/staging/lustre/lustre/ptlrpc/import.c @@ -1026,7 +1026,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, /* check that server granted subset of flags we asked for. */ if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) != ocd->ocd_connect_flags) { - CERROR("%s: Server didn't granted asked subset of flags: asked=%#llx grranted=%#llx\n", + CERROR("%s: Server didn't grant the asked for subset of flags: asked=%#llx granted=%#llx\n", imp->imp_obd->obd_name, imp->imp_connect_flags_orig, ocd->ocd_connect_flags); rc = -EPROTO; diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c index f87478180013..c4296130c11c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c @@ -905,11 +905,18 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq); if (rc == 0) { + struct timespec64 arrival, sent, arrivaldiff; char nidstr[LNET_NIDSTR_SIZE]; req = srhi->srhi_req; libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr)); + arrival.tv_sec = req->rq_arrival_time.tv_sec; + arrival.tv_nsec = req->rq_arrival_time.tv_nsec; + sent.tv_sec = req->rq_sent; + sent.tv_nsec = 0; + arrivaldiff = timespec64_sub(sent, arrival); + /* Print common req fields. * CAVEAT EMPTOR: we're racing with the service handler * here. The request could contain any old crap, so you @@ -917,13 +924,15 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) * parser. Currently I only print stuff here I know is OK * to look at coz it was set up in request_in_callback()!!! */ - seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ", + seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld.%06lld:%lld.%06llds(%+lld.0s) ", req->rq_history_seq, nidstr, libcfs_id2str(req->rq_peer), req->rq_xid, req->rq_reqlen, ptlrpc_rqphase2str(req), (s64)req->rq_arrival_time.tv_sec, - (long)(req->rq_sent - req->rq_arrival_time.tv_sec), - (long)(req->rq_sent - req->rq_deadline)); + (s64)req->rq_arrival_time.tv_nsec / NSEC_PER_USEC, + (s64)arrivaldiff.tv_sec, + (s64)(arrivaldiff.tv_nsec / NSEC_PER_USEC), + (s64)(req->rq_sent - req->rq_deadline)); if (!svc->srv_ops.so_req_printer) seq_putc(s, '\n'); else diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c index 8ffd000eafac..026bec7539fd 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c @@ -66,7 +66,7 @@ void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec) sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval; spin_lock(&sec_gc_list_lock); - list_add_tail(&sec_gc_list, &sec->ps_gc_list); + list_add_tail(&sec->ps_gc_list, &sec_gc_list); spin_unlock(&sec_gc_list_lock); CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name); diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 759aa6c16e28..bcf5fafe476d 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -1565,9 +1565,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, /* req_in handling should/must be fast */ if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5) - DEBUG_REQ(D_WARNING, req, "Slow req_in handling " CFS_DURATION_T "s", - (long)(ktime_get_real_seconds() - - req->rq_arrival_time.tv_sec)); + DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds", + (s64)(ktime_get_real_seconds() - + req->rq_arrival_time.tv_sec)); /* Set rpc server deadline and add it to the timed list */ deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) & @@ -1674,12 +1674,11 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, * The deadline is increased if we send an early reply. */ if (ktime_get_real_seconds() > request->rq_deadline) { - DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n", + DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n", libcfs_id2str(request->rq_peer), - (long)(request->rq_deadline - - request->rq_arrival_time.tv_sec), - (long)(ktime_get_real_seconds() - - request->rq_deadline)); + request->rq_deadline - + request->rq_arrival_time.tv_sec, + ktime_get_real_seconds() - request->rq_deadline); goto put_conn; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c index 367f7e24e3da..311c52624430 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c +++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c @@ -3820,14 +3820,14 @@ void lustre_assert_wire_constants(void) (long long)(int)offsetof(struct link_ea_header, leh_len)); LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_len) == 8, "found %lld\n", (long long)(int)sizeof(((struct link_ea_header *)0)->leh_len)); - LASSERTF((int)offsetof(struct link_ea_header, padding1) == 16, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, padding1)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->padding1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->padding1)); - LASSERTF((int)offsetof(struct link_ea_header, padding2) == 20, "found %lld\n", - (long long)(int)offsetof(struct link_ea_header, padding2)); - LASSERTF((int)sizeof(((struct link_ea_header *)0)->padding2) == 4, "found %lld\n", - (long long)(int)sizeof(((struct link_ea_header *)0)->padding2)); + LASSERTF((int)offsetof(struct link_ea_header, leh_overflow_time) == 16, "found %lld\n", + (long long)(int)offsetof(struct link_ea_header, leh_overflow_time)); + LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_overflow_time) == 4, "found %lld\n", + (long long)(int)sizeof(((struct link_ea_header *)0)->leh_overflow_time)); + LASSERTF((int)offsetof(struct link_ea_header, leh_padding) == 20, "found %lld\n", + (long long)(int)offsetof(struct link_ea_header, leh_padding)); + LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_padding) == 4, "found %lld\n", + (long long)(int)sizeof(((struct link_ea_header *)0)->leh_padding)); BUILD_BUG_ON(LINK_EA_MAGIC != 0x11EAF1DFUL); /* Checks for struct link_ea_entry */ diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.c b/drivers/staging/media/atomisp/i2c/imx/imx.c index 408a7b945153..fb32cb2f2dd1 100644 --- a/drivers/staging/media/atomisp/i2c/imx/imx.c +++ b/drivers/staging/media/atomisp/i2c/imx/imx.c @@ -1084,7 +1084,7 @@ static int imx_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val) return 0; } -int imx_vcm_power_up(struct v4l2_subdev *sd) +static int imx_vcm_power_up(struct v4l2_subdev *sd) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->power_up) @@ -1092,7 +1092,7 @@ int imx_vcm_power_up(struct v4l2_subdev *sd) return 0; } -int imx_vcm_power_down(struct v4l2_subdev *sd) +static int imx_vcm_power_down(struct v4l2_subdev *sd) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->power_down) @@ -1100,7 +1100,7 @@ int imx_vcm_power_down(struct v4l2_subdev *sd) return 0; } -int imx_vcm_init(struct v4l2_subdev *sd) +static int imx_vcm_init(struct v4l2_subdev *sd) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->init) @@ -1108,7 +1108,7 @@ int imx_vcm_init(struct v4l2_subdev *sd) return 0; } -int imx_t_focus_vcm(struct v4l2_subdev *sd, u16 val) +static int imx_t_focus_vcm(struct v4l2_subdev *sd, u16 val) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_focus_vcm) @@ -1116,14 +1116,15 @@ int imx_t_focus_vcm(struct v4l2_subdev *sd, u16 val) return 0; } -int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value) +static int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_focus_abs) return dev->vcm_driver->t_focus_abs(sd, value); return 0; } -int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value) + +static int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_focus_rel) @@ -1131,7 +1132,7 @@ int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value) return 0; } -int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value) +static int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->q_focus_status) @@ -1139,7 +1140,7 @@ int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value) return 0; } -int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value) +static int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->q_focus_abs) @@ -1147,7 +1148,7 @@ int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value) return 0; } -int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value) +static int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_vcm_slew) @@ -1155,7 +1156,7 @@ int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value) return 0; } -int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value) +static int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value) { struct imx_device *dev = to_imx_sensor(sd); if (dev->vcm_driver && dev->vcm_driver->t_vcm_timing) @@ -2105,8 +2106,7 @@ imx_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param) return 0; } -int -imx_g_frame_interval(struct v4l2_subdev *sd, +static int imx_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { struct imx_device *dev = to_imx_sensor(sd); diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index c1feccf8d94a..4ff8f47385da 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c @@ -831,7 +831,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) return -ENODEV; } - nvec->rst = devm_reset_control_get(&pdev->dev, "i2c"); + nvec->rst = devm_reset_control_get_exclusive(&pdev->dev, "i2c"); if (IS_ERR(nvec->rst)) { dev_err(nvec->dev, "failed to get controller reset\n"); return PTR_ERR(nvec->rst); diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts new file mode 100644 index 000000000000..004b5027a934 --- /dev/null +++ b/drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts @@ -0,0 +1,53 @@ +// Definitions for Pi433 +/dts-v1/; +/plugin/; + +/ { + compatible = "bcm,bcm2835", "bcm,bcm2708", "bcm,bcm2709"; + + fragment@0 { + target = <&spi0>; + __overlay__ { + status = "okay"; + + spidev@0{ + status = "disabled"; + }; + + spidev@1{ + status = "disabled"; + }; + }; + }; + + fragment@1 { + target = <&gpio>; + __overlay__ { + pi433_pins: pi433_pins { + brcm,pins = <7 25 24>; + brcm,function = <0 0 0>; // in in in + }; + }; + }; + + fragment@2 { + target = <&spi0>; + __overlay__ { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + pi433: pi433@0 { + compatible = "Smarthome-Wolf,pi433"; + reg = <0>; + spi-max-frequency = <10000000>; + status = "okay"; + + pinctrl-0 = <&pi433_pins>; + DIO0-gpio = <&gpio 24 0>; + DIO1-gpio = <&gpio 25 0>; + DIO2-gpio = <&gpio 7 0>; + }; + }; + }; +}; diff --git a/drivers/staging/pi433/Documentation/devicetree/pi433.txt b/drivers/staging/pi433/Documentation/devicetree/pi433.txt new file mode 100644 index 000000000000..9ff217fbcbbd --- /dev/null +++ b/drivers/staging/pi433/Documentation/devicetree/pi433.txt @@ -0,0 +1,62 @@ +* Smarthome-Wolf Pi433 - a 433MHz radio module/shield for Raspberry Pi (see www.pi433.de) + +Required properties: +- compatible: must be "Smarthome-Wolf,pi433" +- reg: chip select of SPI Interface +- DIOx-gpio must be dedicated to the GPIO, connected with DIOx of the RFM69 module + + +Example: + +With the following lines in gpio-section, the gpio pins, connected with pi433 are +reserved/declared. + +&gpio{ + [...] + + pi433_pins: pi433_pins { + brcm,pins = <7 25 24>; + brcm,function = <0 0 0>; // in in in + }; + + [...] +} + +With the following lines in spi section, the device pi433 is declared. +It consists of the three gpio pins and an spi interface (here chip select 0) + +&spi0{ + [...] + + pi433: pi433@0 { + compatible = "Smarthome-Wolf,pi433"; + reg = <0>; /* CE 0 */ + #address-cells = <1>; + #size-cells = <0>; + spi-max-frequency = <10000000>; + + pinctrl-0 = <&pi433_pins>; + DIO0-gpio = <&gpio 24 0>; + DIO1-gpio = <&gpio 25 0>; + DIO2-gpio = <&gpio 7 0>; + }; +} + + + +For Raspbian users only +======================= +Since Raspbian supports device tree overlays, you may use and overlay, instead +of editing your boards device tree. +For using the overlay, you need to compile the file pi433-overlay.dts you can +find aside to this documentation. +The file needs to be compiled - either manually or by integration in your kernel +source tree. For a manual compile, you may use a command line like the following: +'linux/scripts/dtc/dtc -@ -I dts -O dtb -o pi433.dtbo pi433-overlay.dts' + +For compiling inside of the kernel tree, you need to copy pi433-overlay.dts to +arch/arm/boot/dts/overlays and you need to add the file to the list of files +in the Makefile over there. Execute 'make dtbs' in kernel tree root to make the +kernel make files compile the device tree overlay for you. + + diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt new file mode 100644 index 000000000000..38b83b86c334 --- /dev/null +++ b/drivers/staging/pi433/Documentation/pi433.txt @@ -0,0 +1,274 @@ +===== +Pi433 +===== + + +Introduction +============ +This driver is for controlling pi433, a radio module for the Raspberry Pi +(www.pi433.de). It supports transmission and reception. It can be opened +by multiple applications for transmission and reception. While transmit +jobs were queued and process automatically in the background, the first +application asking for reception will block out all other applications +until something gets received terminates the read request. +The driver supports on the fly reloading of the hardware fifo of the rf +chip, thus enabling for much longer telegrams then hardware fifo size. + +Discription of driver operation +=============================== + +a) transmission + +Each transmission can take place with a different configuration of the rf +module. Therfore each application can set its own set of parameters. The driver +takes care, that each transmission takes place with the parameterset of the +application, that requests the transmission. To allow the transmission to take +place in the background, a tx thread is introduced. +The transfer of data from the main thread to the tx thread is realised by a +kfifo. With each write request of an application, the passed in data and the +corresponding parameter set gets written to the kfifo. +On the other "side" of the kfifo, the tx thread continuously checks, whether the +kfifo is empty. If not, it gets one set of config and data from the kfifo. If +there is no receive request or the receiver is still waiting for something in +the air, the rf module is set to standby, the parameters for transmission gets +set, the hardware fifo of the rf chip gets preloaded and the transmission gets +started. Upon hardware fifo threshold interrupt it gets reloaded, thus enabling +much longer telegrams then hardware fifo size. If the telegram is send and there +is more data available in the kfifo, the procedure is repeated. If not the +transmission cycle ends. + +b) reception + +Since there is only one application allowed to receive data at a time, for +reception there is only one configuration set. +As soon as an application sets an request for receiving a telegram, the reception +configuration set is written to the rf module and it gets set into receiving mode. +Now the driver is waiting, that a predefined RSSI level (signal strength at the +receiver) is reached. Until this hasn't happened, the reception can be +interrupted by the transmission thread at any time to insert a transmission cycle. +As soon as the predefined RSSI level is meat, a receiving cycle starts. Similar +as described for the transmission cycle the read out of the hardware fifo is done +dynamically. Upon each hardware fifo threshold interrupt, a portion of data gets +read. So also for reception it is possible to receive more data then the hardware +fifo can hold. + + +Driver API +========== + +The driver is currently implemented as a character device. Therefore it supports +the calls open, ioctl, read, write and close. + + +params for ioctl +---------------- + +There are four options: +PI433_IOC_RD_TX_CFG - get the transmission parameters from the driver +PI433_IOC_WR_TX_CFG - set the transmission parameters +PI433_IOC_RD_RX_CFG - get the receiving parameters from the driver +PI433_IOC_WR_RX_CFG - set the receiving parameters + +The tx configuration is transfered via struct pi433_tx_cfg, the parameterset for transmission. +It is devided into two sections: rf parameters and packet format. + +rf params: + frequency + frequency used for transmission. + Allowed values: 433050000...434790000 + bit_rate + bit rate used for transmission. + Allowed values: ##### + dev_frequency + frequency deviation in case of FSK. + Allowed values: 600...500000 + modulation + FSK - frequency shift key + OOK - On-Off-key + modShaping + shapingOff - no shaping + shaping1_0 - gauss filter with BT 1 (FSK only) + shaping0_5 - gauss filter with BT 0.5 (FSK only) + shaping0_3 - gauss filter with BT 0.3 (FSK only) + shapingBR - filter cut off at BR (OOK only) + shaping2BR - filter cut off at 2*BR (OOK only) + paRamp (FSK only) + ramp3400 - amp ramps up in 3.4ms + ramp2000 - amp ramps up in 2.0ms + ramp1000 - amp ramps up in 1ms + ramp500 - amp ramps up in 500us + ramp250 - amp ramps up in 250us + ramp125 - amp ramps up in 125us + ramp100 - amp ramps up in 100us + ramp62 - amp ramps up in 62us + ramp50 - amp ramps up in 50us + ramp40 - amp ramps up in 40us + ramp31 - amp ramps up in 31us + ramp25 - amp ramps up in 25us + ramp20 - amp ramps up in 20us + ramp15 - amp ramps up in 15us + ramp12 - amp ramps up in 12us + ramp10 - amp ramps up in 10us + tx_start_condition + fifoLevel - transmission starts, if fifo is filled to + threshold level + fifoNotEmpty - transmission starts, as soon as there is one + byte in internal fifo + repetitions + This gives the option, to send a telegram multiple times. Default: 1 + +packet format: + enable_preamble + optionOn - a preamble will be automatically generated + optionOff - no preamble will be generated + enable_sync + optionOn - a sync word will be automatically added to + the telegram after preamble + optionOff - no sync word will be added + Attention: While possible to generate sync without preamble, the + receiver won't be able to detect the sync without preamble. + enable_length_byte + optionOn - the length of the telegram will be automatically + added to the telegram. It's part of the payload + optionOff - no length information will be automatically added + to the telegram. + Attention: For telegram length over 255 bytes, this option can't be used + Attention: should be used in combination with sync, only + enable_address_byte + optionOn - the address byte will be automatically added to the + telgram. It's part of the payload + optionOff - the address byte will not be added to the telegram. + The address byte can be used for address filtering, so the receiver + will only receive telegrams with a given address byte. + Attention: should be used in combination with sync, only + enable_crc + optionOn - an crc will be automatically calculated over the + payload of the telegram and added to the telegram + after payload. + optionOff - no crc will be calculated + preamble_length + length of the preamble. Allowed values: 0...65536 + sync_length + length of the sync word. Allowed values: 0...8 + fixed_message_length + length of the payload of the telegram. Will override the length + given by the buffer, passed in with the write command. Will be + ignored if set to zero. + sync_pattern[8] + contains up to eight values, that are used as the sync pattern + on sync option + address_byte + one byte, used as address byte on address byte option. + + +The rx configuration is transfered via struct pi433_rx_cfg, the parameterset for receiving. It is devided into two sections: rf parameters and packet format. + +rf params: + frequency + frequency used for transmission. + Allowed values: 433050000...434790000 + bit_rate + bit rate used for transmission. + Allowed values: ##### + dev_frequency + frequency deviation in case of FSK. + Allowed values: 600...500000 + modulation + FSK - frequency shift key + OOK - on off key + rssi_threshold + threshold value for the signal strength on the receiver input. + If this value is exeeded, a reception cycle starts + Allowed values: 0...255 + thresholdDecrement + in order to adapt to different levels of singnal strength, over + time the receiver gets more and more sensitive. This value + determs, how fast the sensitivity increases. + step_0_5db - increase in 0,5dB steps + step_1_0db - increase in 1 db steps + step_1_5db - increase in 1,5dB steps + step_2_0db - increase in 2 db steps + step_3_0db - increase in 3 db steps + step_4_0db - increase in 4 db steps + step_5_0db - increase in 5 db steps + step_6_0db - increase in 6 db steps + antennaImpedance + sets the electrical adoption of the antenna + fiftyOhm - for antennas with an impedance of 50Ohm + twohundretOhm - for antennas with an impedance of 200Ohm + lnaGain + sets the gain of the low noise amp + automatic - lna gain is determed by an agc + max - lna gain is set to maximum + maxMinus6 - lna gain is set to 6db below max + maxMinus12 - lna gain is set to 12db below max + maxMinus24 - lna gain is set to 24db below max + maxMinus36 - lna gain is set to 36db below max + maxMinus48 - lna gain is set to 48db below max + bw_mantisse + sets the bandwidth of the channel filter - part one: mantisse. + mantisse16 - mantisse is set to 16 + mantisse20 - mantisse is set to 20 + mantisse24 - mantisse is set to 24 + bw_exponent + sets the bandwidth of the channel filter - part two: exponent. + Allowd values: 0...7 + dagc; + operation mode of the digital automatic gain control + normalMode + improve + improve4LowModulationIndex + + packet format: + enable_sync + optionOn - sync detection is enabled. If configured sync pattern + isn't found, telegram will be internally discarded + optionOff - sync detection is disabled. + enable_length_byte + optionOn - First byte of payload will be used as length byte, + regardless of the amount of bytes that were requested + by the read request. + optionOff - Number of bytes to be read will be set according to + amount of bytes that were requested by the read request. + Attention: should be used in combination with sync, only + enable_address_filtering; + filteringOff - no adress filtering will take place + nodeAddress - all telegrams, not matching the node + address will be internally discarded + nodeOrBroadcastAddress - all telegrams, neither matching the + node, nor the broadcast address will + be internally discarded + Attention: Sync option must be enabled in order to use this feature + enable_crc + optionOn - a crc will be calculated over the payload of + the telegram, that was received. If the + calculated crc doesn't match to two bytes, + that follow the payload, the telegram will be + internally discarded. + Attention: This option is only operational, if sync on and fixed length + or length byte is used + sync_length + Gives the length of the payload. + Attention: This setting must meet the setting of the transmitter, + if sync option is used. + fixed_message_length + Overrides the telegram length either given by the first byte of + payload or by the read request. + bytes_to_drop + gives the number of bytes, that will be dropped before transfering + data to the read buffer + This option is only usefull, if all packet helper are switched + off and the rf chip is used in raw receiving mode. This may be + needed, if a telegram of a third party device should be received, + using a protocol not compatible with the packet engine of the rf69 chip. + sync_pattern[8] + contains up to eight values, that are used as the sync pattern + on sync option. + This setting must meet the configuration of the transmitting device, + if sync option is enabled. + node_address + one byte, used as node address byte on address byte option. + broadcast_address + one byte, used as broadcast address byte on address byte option. + + diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig new file mode 100644 index 000000000000..87c2ee192cca --- /dev/null +++ b/drivers/staging/pi433/Kconfig @@ -0,0 +1,16 @@ +config PI433 + tristate "Pi433 - a 433MHz radio module for Raspberry Pi" + depends on SPI + ---help--- + This option allows you to enable support for the radio module Pi433. + + Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi + or compatible. It extends the Raspberry Pi with the option, to + send and receive data in the 433MHz ISM band - for example to + communicate between two systems without using ethernet or bluetooth + or for control or read sockets, actors, sensors, widely available + for low price. + + For details or the option to buy, please visit https://pi433.de/en.html + + If in doubt, say N here, but saying yes most probably won't hurt diff --git a/drivers/staging/pi433/Makefile b/drivers/staging/pi433/Makefile new file mode 100644 index 000000000000..417f3e4d12b1 --- /dev/null +++ b/drivers/staging/pi433/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_PI433) += pi433.o + +pi433-objs := pi433_if.o rf69.o diff --git a/drivers/staging/pi433/TODO b/drivers/staging/pi433/TODO new file mode 100644 index 000000000000..63a40bfcc67e --- /dev/null +++ b/drivers/staging/pi433/TODO @@ -0,0 +1,5 @@ +* coding style does not fully comply with the kernel style guide. +* still TODOs, annotated in the code +* currently the code introduces new IOCTLs. I'm afraid this is a bad idea. + -> Replace this with another interface, hints are welcome! +* Some missing data (marked with ###) needs to be added in the documentation diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c new file mode 100644 index 000000000000..ed737f4b1e77 --- /dev/null +++ b/drivers/staging/pi433/pi433_if.c @@ -0,0 +1,1322 @@ +/* + * userspace interface for pi433 radio module + * + * Pi433 is a 433MHz radio module for the Raspberry Pi. + * It is based on the HopeRf Module RFM69CW. Therefore inside of this + * driver, you'll find an abstraction of the rf69 chip. + * + * If needed, this driver could be extended, to also support other + * devices, basing on HopeRfs rf69. + * + * The driver can also be extended, to support other modules of + * HopeRf with a similar interace - e. g. RFM69HCW, RFM12, RFM95, ... + * + * Copyright (C) 2016 Wolf-Entwicklungen + * Marcus Wolf <linux@wolf-entwicklungen.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef DEBUG + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/idr.h> +#include <linux/ioctl.h> +#include <linux/uaccess.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/cdev.h> +#include <linux/err.h> +#include <linux/kfifo.h> +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/gpio/consumer.h> +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/spi/spi.h> +#ifdef CONFIG_COMPAT +#include <asm/compat.h> +#endif + +#include "pi433_if.h" +#include "rf69.h" + + +#define N_PI433_MINORS (1U << MINORBITS) /*32*/ /* ... up to 256 */ +#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */ +#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */ +#define NUM_DIO 2 + +static dev_t pi433_dev; +static DEFINE_IDR(pi433_idr); +static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */ + +static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */ + +/* tx config is instance specific + * so with each open a new tx config struct is needed + */ +/* rx config is device specific + * so we have just one rx config, ebedded in device struct + */ +struct pi433_device { + /* device handling related values */ + dev_t devt; + int minor; + struct device *dev; + struct cdev *cdev; + struct spi_device *spi; + unsigned users; + + /* irq related values */ + struct gpio_desc *gpiod[NUM_DIO]; + int irq_num[NUM_DIO]; + u8 irq_state[NUM_DIO]; + + /* tx related values */ + STRUCT_KFIFO_REC_1(MSG_FIFO_SIZE) tx_fifo; + struct mutex tx_fifo_lock; // TODO: check, whether necessary or obsolete + struct task_struct *tx_task_struct; + wait_queue_head_t tx_wait_queue; + u8 free_in_fifo; + + /* rx related values */ + struct pi433_rx_cfg rx_cfg; + u8 *rx_buffer; + unsigned int rx_buffer_size; + u32 rx_bytes_to_drop; + u32 rx_bytes_dropped; + unsigned int rx_position; + struct mutex rx_lock; + wait_queue_head_t rx_wait_queue; + + /* fifo wait queue */ + struct task_struct *fifo_task_struct; + wait_queue_head_t fifo_wait_queue; + + /* flags */ + bool rx_active; + bool tx_active; + bool interrupt_rx_allowed; +}; + +struct pi433_instance { + struct pi433_device *device; + struct pi433_tx_cfg tx_cfg; +}; + +/*-------------------------------------------------------------------------*/ + +/* macro for checked access of registers of radio module */ +#define SET_CHECKED(retval) \ + if (retval < 0) \ + return retval; + +/*-------------------------------------------------------------------------*/ + +/* GPIO interrupt handlers */ +static irq_handler_t +DIO0_irq_handler(unsigned int irq, void *dev_id, struct pt_regs *regs) +{ + struct pi433_device *device = dev_id; + + if (device->irq_state[DIO0] == DIO_PacketSent) + { + device->free_in_fifo = FIFO_SIZE; + printk("DIO0 irq: Packet sent\n"); // TODO: printk() should include KERN_ facility level + wake_up_interruptible(&device->fifo_wait_queue); + } + else if (device->irq_state[DIO0] == DIO_Rssi_DIO0) + { + printk("DIO0 irq: RSSI level over threshold\n"); + wake_up_interruptible(&device->rx_wait_queue); + } + else if (device->irq_state[DIO0] == DIO_PayloadReady) + { + printk("DIO0 irq: PayloadReady\n"); + device->free_in_fifo = 0; + wake_up_interruptible(&device->fifo_wait_queue); + } + + return (irq_handler_t) IRQ_HANDLED; +} + +static irq_handler_t +DIO1_irq_handler(unsigned int irq, void *dev_id, struct pt_regs *regs) +{ + struct pi433_device *device = dev_id; + + if (device->irq_state[DIO1] == DIO_FifoNotEmpty_DIO1) + { + device->free_in_fifo = FIFO_SIZE; + } + else if (device->irq_state[DIO1] == DIO_FifoLevel) + { + if (device->rx_active) device->free_in_fifo = FIFO_THRESHOLD - 1; + else device->free_in_fifo = FIFO_SIZE - FIFO_THRESHOLD - 1; + } + printk("DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo); // TODO: printk() should include KERN_ facility level + wake_up_interruptible(&device->fifo_wait_queue); + + return (irq_handler_t) IRQ_HANDLED; +} + +static void *DIO_irq_handler[NUM_DIO] = { + DIO0_irq_handler, + DIO1_irq_handler +}; + +/*-------------------------------------------------------------------------*/ + +static int +rf69_set_rx_cfg(struct pi433_device *dev, struct pi433_rx_cfg *rx_cfg) +{ + int payload_length; + + /* receiver config */ + SET_CHECKED(rf69_set_frequency (dev->spi, rx_cfg->frequency)); + SET_CHECKED(rf69_set_bit_rate (dev->spi, rx_cfg->bit_rate)); + SET_CHECKED(rf69_set_modulation (dev->spi, rx_cfg->modulation)); + SET_CHECKED(rf69_set_antenna_impedance (dev->spi, rx_cfg->antenna_impedance)); + SET_CHECKED(rf69_set_rssi_threshold (dev->spi, rx_cfg->rssi_threshold)); + SET_CHECKED(rf69_set_ook_threshold_dec (dev->spi, rx_cfg->thresholdDecrement)); + SET_CHECKED(rf69_set_bandwidth (dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent)); + SET_CHECKED(rf69_set_bandwidth_during_afc(dev->spi, rx_cfg->bw_mantisse, rx_cfg->bw_exponent)); + SET_CHECKED(rf69_set_dagc (dev->spi, rx_cfg->dagc)); + + dev->rx_bytes_to_drop = rx_cfg->bytes_to_drop; + + /* packet config */ + /* enable */ + SET_CHECKED(rf69_set_sync_enable(dev->spi, rx_cfg->enable_sync)); + if (rx_cfg->enable_sync == optionOn) + { + SET_CHECKED(rf69_set_fifo_fill_condition(dev->spi, afterSyncInterrupt)); + } + else + { + SET_CHECKED(rf69_set_fifo_fill_condition(dev->spi, always)); + } + SET_CHECKED(rf69_set_packet_format (dev->spi, rx_cfg->enable_length_byte)); + SET_CHECKED(rf69_set_adressFiltering(dev->spi, rx_cfg->enable_address_filtering)); + SET_CHECKED(rf69_set_crc_enable (dev->spi, rx_cfg->enable_crc)); + + /* lengths */ + SET_CHECKED(rf69_set_sync_size(dev->spi, rx_cfg->sync_length)); + if (rx_cfg->enable_length_byte == optionOn) + { + SET_CHECKED(rf69_set_payload_length(dev->spi, 0xff)); + } + else if (rx_cfg->fixed_message_length != 0) + { + payload_length = rx_cfg->fixed_message_length; + if (rx_cfg->enable_length_byte == optionOn) payload_length++; + if (rx_cfg->enable_address_filtering != filteringOff) payload_length++; + SET_CHECKED(rf69_set_payload_length(dev->spi, payload_length)); + } + else + { + SET_CHECKED(rf69_set_payload_length(dev->spi, 0)); + } + + /* values */ + if (rx_cfg->enable_sync == optionOn) + { + SET_CHECKED(rf69_set_sync_values(dev->spi, rx_cfg->sync_pattern)); + } + if (rx_cfg->enable_address_filtering != filteringOff) + { + SET_CHECKED(rf69_set_node_address (dev->spi, rx_cfg->node_address)); + SET_CHECKED(rf69_set_broadcast_address(dev->spi, rx_cfg->broadcast_address)); + } + + return 0; +} + +static int +rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg) +{ + SET_CHECKED(rf69_set_frequency (dev->spi, tx_cfg->frequency)); + SET_CHECKED(rf69_set_bit_rate (dev->spi, tx_cfg->bit_rate)); + SET_CHECKED(rf69_set_modulation (dev->spi, tx_cfg->modulation)); + SET_CHECKED(rf69_set_deviation (dev->spi, tx_cfg->dev_frequency)); + SET_CHECKED(rf69_set_pa_ramp (dev->spi, tx_cfg->pa_ramp)); + SET_CHECKED(rf69_set_modulation_shaping(dev->spi, tx_cfg->modShaping)); + SET_CHECKED(rf69_set_tx_start_condition(dev->spi, tx_cfg->tx_start_condition)); + + /* packet format enable */ + if (tx_cfg->enable_preamble == optionOn) + { + SET_CHECKED(rf69_set_preamble_length(dev->spi, tx_cfg->preamble_length)); + } + else + { + SET_CHECKED(rf69_set_preamble_length(dev->spi, 0)); + } + SET_CHECKED(rf69_set_sync_enable (dev->spi, tx_cfg->enable_sync)); + SET_CHECKED(rf69_set_packet_format(dev->spi, tx_cfg->enable_length_byte)); + SET_CHECKED(rf69_set_crc_enable (dev->spi, tx_cfg->enable_crc)); + + /* configure sync, if enabled */ + if (tx_cfg->enable_sync == optionOn) + { + SET_CHECKED(rf69_set_sync_size(dev->spi, tx_cfg->sync_length)); + SET_CHECKED(rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern)); + } + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static int +pi433_start_rx(struct pi433_device *dev) +{ + int retval; + + /* return without action, if no pending read request */ + if (!dev->rx_active) + return 0; + + /* setup for receiving */ + retval = rf69_set_rx_cfg(dev, &dev->rx_cfg); + if (retval) return retval; + + /* setup rssi irq */ + SET_CHECKED(rf69_set_dio_mapping(dev->spi, DIO0, DIO_Rssi_DIO0)); + dev->irq_state[DIO0] = DIO_Rssi_DIO0; + irq_set_irq_type(dev->irq_num[DIO0], IRQ_TYPE_EDGE_RISING); + + /* setup fifo level interrupt */ + SET_CHECKED(rf69_set_fifo_threshold(dev->spi, FIFO_SIZE - FIFO_THRESHOLD)); + SET_CHECKED(rf69_set_dio_mapping(dev->spi, DIO1, DIO_FifoLevel)); + dev->irq_state[DIO1] = DIO_FifoLevel; + irq_set_irq_type(dev->irq_num[DIO1], IRQ_TYPE_EDGE_RISING); + + /* set module to receiving mode */ + SET_CHECKED(rf69_set_mode(dev->spi, receive)); + + return 0; +} + + +/*-------------------------------------------------------------------------*/ + +static int +pi433_receive(void *data) +{ + struct pi433_device *dev = data; + struct spi_device *spi = dev->spi; /* needed for SET_CHECKED */ + int bytes_to_read, bytes_total; + int retval; + + dev->interrupt_rx_allowed = false; + + /* wait for any tx to finish */ + dev_dbg(dev->dev,"rx: going to wait for any tx to finish"); + retval = wait_event_interruptible(dev->rx_wait_queue, !dev->tx_active); + if(retval) /* wait was interrupted */ + { + dev->interrupt_rx_allowed = true; + wake_up_interruptible(&dev->tx_wait_queue); + return retval; + } + + /* prepare status vars */ + dev->free_in_fifo = FIFO_SIZE; + dev->rx_position = 0; + dev->rx_bytes_dropped = 0; + + /* setup radio module to listen for something "in the air" */ + retval = pi433_start_rx(dev); + if (retval) + return retval; + + /* now check RSSI, if low wait for getting high (RSSI interrupt) */ + while ( !rf69_get_flag(dev->spi, rssiExceededThreshold) ) + { + /* allow tx to interrupt us while waiting for high RSSI */ + dev->interrupt_rx_allowed = true; + wake_up_interruptible(&dev->tx_wait_queue); + + /* wait for RSSI level to become high */ + dev_dbg(dev->dev, "rx: going to wait for high RSSI level"); + retval = wait_event_interruptible(dev->rx_wait_queue, + rf69_get_flag(dev->spi, + rssiExceededThreshold)); + if (retval) goto abort; /* wait was interrupted */ + dev->interrupt_rx_allowed = false; + + /* cross check for ongoing tx */ + if (!dev->tx_active) break; + } + + /* configure payload ready irq */ + SET_CHECKED(rf69_set_dio_mapping(spi, DIO0, DIO_PayloadReady)); + dev->irq_state[DIO0] = DIO_PayloadReady; + irq_set_irq_type(dev->irq_num[DIO0], IRQ_TYPE_EDGE_RISING); + + /* fixed or unlimited length? */ + if (dev->rx_cfg.fixed_message_length != 0) + { + if (dev->rx_cfg.fixed_message_length > dev->rx_buffer_size) + { + retval = -1; + goto abort; + } + bytes_total = dev->rx_cfg.fixed_message_length; + dev_dbg(dev->dev,"rx: msg len set to %d by fixed length", bytes_total); + } + else + { + bytes_total = dev->rx_buffer_size; + dev_dbg(dev->dev, "rx: msg len set to %d as requested by read", bytes_total); + } + + /* length byte enabled? */ + if (dev->rx_cfg.enable_length_byte == optionOn) + { + retval = wait_event_interruptible(dev->fifo_wait_queue, + dev->free_in_fifo < FIFO_SIZE); + if (retval) goto abort; /* wait was interrupted */ + + rf69_read_fifo(spi, (u8 *)&bytes_total, 1); + if (bytes_total > dev->rx_buffer_size) + { + retval = -1; + goto abort; + } + dev->free_in_fifo++; + dev_dbg(dev->dev, "rx: msg len reset to %d due to length byte", bytes_total); + } + + /* address byte enabled? */ + if (dev->rx_cfg.enable_address_filtering != filteringOff) + { + u8 dummy; + + bytes_total--; + + retval = wait_event_interruptible(dev->fifo_wait_queue, + dev->free_in_fifo < FIFO_SIZE); + if (retval) goto abort; /* wait was interrupted */ + + rf69_read_fifo(spi, &dummy, 1); + dev->free_in_fifo++; + dev_dbg(dev->dev, "rx: address byte stripped off"); + } + + /* get payload */ + while (dev->rx_position < bytes_total) + { + if ( !rf69_get_flag(dev->spi, payloadReady) ) + { + retval = wait_event_interruptible(dev->fifo_wait_queue, + dev->free_in_fifo < FIFO_SIZE); + if (retval) goto abort; /* wait was interrupted */ + } + + /* need to drop bytes or acquire? */ + if (dev->rx_bytes_to_drop > dev->rx_bytes_dropped) + bytes_to_read = dev->rx_bytes_to_drop - dev->rx_bytes_dropped; + else + bytes_to_read = bytes_total - dev->rx_position; + + + /* access the fifo */ + if (bytes_to_read > FIFO_SIZE - dev->free_in_fifo) + bytes_to_read = FIFO_SIZE - dev->free_in_fifo; + retval = rf69_read_fifo(spi, + &dev->rx_buffer[dev->rx_position], + bytes_to_read); + if (retval) goto abort; /* read failed */ + dev->free_in_fifo += bytes_to_read; + + /* adjust status vars */ + if (dev->rx_bytes_to_drop > dev->rx_bytes_dropped) + dev->rx_bytes_dropped += bytes_to_read; + else + dev->rx_position += bytes_to_read; + } + + + /* rx done, wait was interrupted or error occured */ +abort: + dev->interrupt_rx_allowed = true; + SET_CHECKED(rf69_set_mode(dev->spi, standby)); + wake_up_interruptible(&dev->tx_wait_queue); + + if (retval) + return retval; + else + return bytes_total; +} + +static int +pi433_tx_thread(void *data) +{ + struct pi433_device *device = data; + struct spi_device *spi = device->spi; /* needed for SET_CHECKED */ + struct pi433_tx_cfg tx_cfg; + u8 buffer[MAX_MSG_SIZE]; + size_t size; + bool rx_interrupted = false; + int position, repetitions; + int retval; + + while (1) + { + /* wait for fifo to be populated or for request to terminate*/ + dev_dbg(device->dev, "thread: going to wait for new messages"); + wait_event_interruptible(device->tx_wait_queue, + ( !kfifo_is_empty(&device->tx_fifo) || + kthread_should_stop() )); + if ( kthread_should_stop() ) + return 0; + + /* get data from fifo in the following order: + * - tx_cfg + * - size of message + * - message + */ + mutex_lock(&device->tx_fifo_lock); + + retval = kfifo_out(&device->tx_fifo, &tx_cfg, sizeof(tx_cfg)); + if (retval != sizeof(tx_cfg)) + { + dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg) ); + mutex_unlock(&device->tx_fifo_lock); + continue; + } + + retval = kfifo_out(&device->tx_fifo, &size, sizeof(size_t)); + if (retval != sizeof(size_t)) + { + dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t) ); + mutex_unlock(&device->tx_fifo_lock); + continue; + } + + /* use fixed message length, if requested */ + if (tx_cfg.fixed_message_length != 0) + size = tx_cfg.fixed_message_length; + + /* increase size, if len byte is requested */ + if (tx_cfg.enable_length_byte == optionOn) + size++; + + /* increase size, if adr byte is requested */ + if (tx_cfg.enable_address_byte == optionOn) + size++; + + /* prime buffer */ + memset(buffer, 0, size); + position = 0; + + /* add length byte, if requested */ + if (tx_cfg.enable_length_byte == optionOn) + buffer[position++] = size-1; /* according to spec length byte itself must be excluded from the length calculation */ + + /* add adr byte, if requested */ + if (tx_cfg.enable_address_byte == optionOn) + buffer[position++] = tx_cfg.address_byte; + + /* finally get message data from fifo */ + retval = kfifo_out(&device->tx_fifo, &buffer[position], sizeof(buffer)-position ); + dev_dbg(device->dev, "read %d message byte(s) from fifo queue.", retval); + mutex_unlock(&device->tx_fifo_lock); + + /* if rx is active, we need to interrupt the waiting for + * incoming telegrams, to be able to send something. + * We are only allowed, if currently no reception takes + * place otherwise we need to wait for the incoming telegram + * to finish + */ + wait_event_interruptible(device->tx_wait_queue, + !device->rx_active || + device->interrupt_rx_allowed == true); + + /* prevent race conditions + * irq will be reenabled after tx config is set + */ + disable_irq(device->irq_num[DIO0]); + device->tx_active = true; + + if (device->rx_active && rx_interrupted == false) + { + /* rx is currently waiting for a telegram; + * we need to set the radio module to standby + */ + SET_CHECKED(rf69_set_mode(device->spi, standby)); + rx_interrupted = true; + } + + /* clear fifo, set fifo threshold, set payload length */ + SET_CHECKED(rf69_set_mode(spi, standby)); /* this clears the fifo */ + SET_CHECKED(rf69_set_fifo_threshold(spi, FIFO_THRESHOLD)); + if (tx_cfg.enable_length_byte == optionOn) + { + SET_CHECKED(rf69_set_payload_length(spi, size * tx_cfg.repetitions)); + } + else + { + SET_CHECKED(rf69_set_payload_length(spi, 0)); + } + + /* configure the rf chip */ + rf69_set_tx_cfg(device, &tx_cfg); + + /* enable fifo level interrupt */ + SET_CHECKED(rf69_set_dio_mapping(spi, DIO1, DIO_FifoLevel)); + device->irq_state[DIO1] = DIO_FifoLevel; + irq_set_irq_type(device->irq_num[DIO1], IRQ_TYPE_EDGE_FALLING); + + /* enable packet sent interrupt */ + SET_CHECKED(rf69_set_dio_mapping(spi, DIO0, DIO_PacketSent)); + device->irq_state[DIO0] = DIO_PacketSent; + irq_set_irq_type(device->irq_num[DIO0], IRQ_TYPE_EDGE_RISING); + enable_irq(device->irq_num[DIO0]); /* was disabled by rx active check */ + + /* enable transmission */ + SET_CHECKED(rf69_set_mode(spi, transmit)); + + /* transfer this msg (and repetitions) to chip fifo */ + device->free_in_fifo = FIFO_SIZE; + position = 0; + repetitions = tx_cfg.repetitions; + while( (repetitions > 0) && (size > position) ) + { + if ( (size - position) > device->free_in_fifo) + { /* msg to big for fifo - take a part */ + int temp = device->free_in_fifo; + device->free_in_fifo = 0; + rf69_write_fifo(spi, + &buffer[position], + temp); + position +=temp; + } + else + { /* msg fits into fifo - take all */ + device->free_in_fifo -= size; + repetitions--; + rf69_write_fifo(spi, + &buffer[position], + (size - position) ); + position = 0; /* reset for next repetition */ + } + + retval = wait_event_interruptible(device->fifo_wait_queue, + device->free_in_fifo > 0); + if (retval) { printk("ABORT\n"); goto abort; } + } + + /* we are done. Wait for packet to get sent */ + dev_dbg(device->dev, "thread: wait for packet to get sent/fifo to be empty"); + wait_event_interruptible(device->fifo_wait_queue, + device->free_in_fifo == FIFO_SIZE || + kthread_should_stop() ); + if ( kthread_should_stop() ) printk("ABORT\n"); + + + /* STOP_TRANSMISSION */ + dev_dbg(device->dev, "thread: Packet sent. Set mode to stby."); + SET_CHECKED(rf69_set_mode(spi, standby)); + + /* everything sent? */ + if ( kfifo_is_empty(&device->tx_fifo) ) + { +abort: + if (rx_interrupted) + { + rx_interrupted = false; + pi433_start_rx(device); + } + device->tx_active = false; + wake_up_interruptible(&device->rx_wait_queue); + } + } +} + +/*-------------------------------------------------------------------------*/ + +static ssize_t +pi433_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos) +{ + struct pi433_instance *instance; + struct pi433_device *device; + int bytes_received; + ssize_t retval; + + /* check, whether internal buffer is big enough for requested size */ + if (size > MAX_MSG_SIZE) + return -EMSGSIZE; + + instance = filp->private_data; + device = instance->device; + + /* just one read request at a time */ + mutex_lock(&device->rx_lock); + if (device->rx_active) + { + mutex_unlock(&device->rx_lock); + return -EAGAIN; + } + else + { + device->rx_active = true; + mutex_unlock(&device->rx_lock); + } + + /* start receiving */ + /* will block until something was received*/ + device->rx_buffer_size = size; + bytes_received = pi433_receive(device); + + /* release rx */ + mutex_lock(&device->rx_lock); + device->rx_active = false; + mutex_unlock(&device->rx_lock); + + /* if read was successful copy to user space*/ + if (bytes_received > 0) + { + retval = copy_to_user(buf, device->rx_buffer, bytes_received); + if (retval) + return -EFAULT; + } + + return bytes_received; +} + + +static ssize_t +pi433_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct pi433_instance *instance; + struct pi433_device *device; + int copied, retval; + + instance = filp->private_data; + device = instance->device; + + /* check, whether internal buffer (tx thread) is big enough for requested size */ + if (count > MAX_MSG_SIZE) + return -EMSGSIZE; + + /* write the following sequence into fifo: + * - tx_cfg + * - size of message + * - message + */ + mutex_lock(&device->tx_fifo_lock); + retval = kfifo_in(&device->tx_fifo, &instance->tx_cfg, sizeof(instance->tx_cfg)); + if ( retval != sizeof(instance->tx_cfg) ) + goto abort; + + retval = kfifo_in (&device->tx_fifo, &count, sizeof(size_t)); + if ( retval != sizeof(size_t) ) + goto abort; + + retval = kfifo_from_user(&device->tx_fifo, buf, count, &copied); + if (retval || copied != count) + goto abort; + + mutex_unlock(&device->tx_fifo_lock); + + /* start transfer */ + wake_up_interruptible(&device->tx_wait_queue); + dev_dbg(device->dev, "write: generated new msg with %d bytes.", copied); + + return 0; + +abort: + dev_dbg(device->dev, "write to fifo failed: 0x%x", retval); + kfifo_reset(&device->tx_fifo); // TODO: maybe find a solution, not to discard already stored, valid entries + mutex_unlock(&device->tx_fifo_lock); + return -EAGAIN; +} + + +static long +pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int err = 0; + int retval = 0; + struct pi433_instance *instance; + struct pi433_device *device; + u32 tmp; + + /* Check type and command number */ + if (_IOC_TYPE(cmd) != PI433_IOC_MAGIC) + return -ENOTTY; + + /* Check access direction once here; don't repeat below. + * IOC_DIR is from the user perspective, while access_ok is + * from the kernel perspective; so they look reversed. + */ + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok(VERIFY_WRITE, + (void __user *)arg, + _IOC_SIZE(cmd)); + + if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok(VERIFY_READ, + (void __user *)arg, + _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + /* TODO? guard against device removal before, or while, + * we issue this ioctl. --> device_get() + */ + instance = filp->private_data; + device = instance->device; + + if (device == NULL) + return -ESHUTDOWN; + + switch (cmd) { + case PI433_IOC_RD_TX_CFG: + tmp = _IOC_SIZE(cmd); + if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) ) + { + retval = -EINVAL; + break; + } + + if (__copy_to_user((void __user *)arg, + &instance->tx_cfg, + tmp)) + { + retval = -EFAULT; + break; + } + + break; + case PI433_IOC_WR_TX_CFG: + tmp = _IOC_SIZE(cmd); + if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) ) + { + retval = -EINVAL; + break; + } + + if (__copy_from_user(&instance->tx_cfg, + (void __user *)arg, + tmp)) + { + retval = -EFAULT; + break; + } + + break; + + case PI433_IOC_RD_RX_CFG: + tmp = _IOC_SIZE(cmd); + if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) { + retval = -EINVAL; + break; + } + + if (__copy_to_user((void __user *)arg, + &device->rx_cfg, + tmp)) + { + retval = -EFAULT; + break; + } + + break; + case PI433_IOC_WR_RX_CFG: + tmp = _IOC_SIZE(cmd); + mutex_lock(&device->rx_lock); + + /* during pendig read request, change of config not allowed */ + if (device->rx_active) { + retval = -EAGAIN; + mutex_unlock(&device->rx_lock); + break; + } + + if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) { + retval = -EINVAL; + mutex_unlock(&device->rx_lock); + break; + } + + if (__copy_from_user(&device->rx_cfg, + (void __user *)arg, + tmp)) + { + retval = -EFAULT; + mutex_unlock(&device->rx_lock); + break; + } + + mutex_unlock(&device->rx_lock); + break; + default: + retval = -EINVAL; + } + + return retval; +} + +#ifdef CONFIG_COMPAT +static long +pi433_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + return pi433_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#else +#define pi433_compat_ioctl NULL +#endif /* CONFIG_COMPAT */ + +/*-------------------------------------------------------------------------*/ + +static int pi433_open(struct inode *inode, struct file *filp) +{ + struct pi433_device *device; + struct pi433_instance *instance; + + mutex_lock(&minor_lock); + device = idr_find(&pi433_idr, iminor(inode)); + mutex_unlock(&minor_lock); + if (!device) { + pr_debug("device: minor %d unknown.\n", iminor(inode)); + return -ENODEV; + } + + if (!device->rx_buffer) { + device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL); + if (!device->rx_buffer) + { + dev_dbg(device->dev, "open/ENOMEM\n"); + return -ENOMEM; + } + } + + device->users++; + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) + { + kfree(device->rx_buffer); + device->rx_buffer = NULL; + return -ENOMEM; + } + + /* setup instance data*/ + instance->device = device; + instance->tx_cfg.bit_rate = 4711; + // TODO: fill instance->tx_cfg; + + /* instance data as context */ + filp->private_data = instance; + nonseekable_open(inode, filp); + + return 0; +} + +static int pi433_release(struct inode *inode, struct file *filp) +{ + struct pi433_instance *instance; + struct pi433_device *device; + + instance = filp->private_data; + device = instance->device; + kfree(instance); + filp->private_data = NULL; + + /* last close? */ + device->users--; + + if (!device->users) { + kfree(device->rx_buffer); + device->rx_buffer = NULL; + if (device->spi == NULL) + kfree(device); + } + + return 0; +} + + +/*-------------------------------------------------------------------------*/ + +static int setup_GPIOs(struct pi433_device *device) +{ + char name[5]; + int retval; + int i; + + for (i=0; i<NUM_DIO; i++) + { + /* "construct" name and get the gpio descriptor */ + snprintf(name, sizeof(name), "DIO%d", i); + device->gpiod[i] = gpiod_get(&device->spi->dev, name, 0 /*GPIOD_IN*/); + + if (device->gpiod[i] == ERR_PTR(-ENOENT)) + { + dev_dbg(&device->spi->dev, "Could not find entry for %s. Ignoring.", name); + continue; + } + + if (device->gpiod[i] == ERR_PTR(-EBUSY)) + dev_dbg(&device->spi->dev, "%s is busy.", name); + + if ( IS_ERR(device->gpiod[i]) ) + { + retval = PTR_ERR(device->gpiod[i]); + /* release already allocated gpios */ + for (i--; i>=0; i--) + { + free_irq(device->irq_num[i], device); + gpiod_put(device->gpiod[i]); + } + return retval; + } + + + /* configure the pin */ + gpiod_unexport(device->gpiod[i]); + retval = gpiod_direction_input(device->gpiod[i]); + if (retval) return retval; + + + /* configure irq */ + device->irq_num[i] = gpiod_to_irq(device->gpiod[i]); + if (device->irq_num[i] < 0) + { + device->gpiod[i] = ERR_PTR(-EINVAL);//(struct gpio_desc *)device->irq_num[i]; + return device->irq_num[i]; + } + retval = request_irq(device->irq_num[i], + DIO_irq_handler[i], + 0, /* flags */ + name, + device); + + if (retval) + return retval; + + dev_dbg(&device->spi->dev, "%s succesfully configured", name); + } + + return 0; +} + +static void free_GPIOs(struct pi433_device *device) +{ + int i; + + for (i=0; i<NUM_DIO; i++) + { + /* check if gpiod is valid */ + if ( IS_ERR(device->gpiod[i]) ) + continue; + + free_irq(device->irq_num[i], device); + gpiod_put(device->gpiod[i]); + } + return; +} + +static int pi433_get_minor(struct pi433_device *device) +{ + int retval = -ENOMEM; + + mutex_lock(&minor_lock); + retval = idr_alloc(&pi433_idr, device, 0, N_PI433_MINORS, GFP_KERNEL); + if (retval >= 0) { + device->minor = retval; + retval = 0; + } else if (retval == -ENOSPC) { + dev_err(device->dev, "too many pi433 devices\n"); + retval = -EINVAL; + } + mutex_unlock(&minor_lock); + return retval; +} + +static void pi433_free_minor(struct pi433_device *dev) +{ + mutex_lock(&minor_lock); + idr_remove(&pi433_idr, dev->minor); + mutex_unlock(&minor_lock); +} +/*-------------------------------------------------------------------------*/ + +static const struct file_operations pi433_fops = { + .owner = THIS_MODULE, + /* REVISIT switch to aio primitives, so that userspace + * gets more complete API coverage. It'll simplify things + * too, except for the locking. + */ + .write = pi433_write, + .read = pi433_read, + .unlocked_ioctl = pi433_ioctl, + .compat_ioctl = pi433_compat_ioctl, + .open = pi433_open, + .release = pi433_release, + .llseek = no_llseek, +}; + +/*-------------------------------------------------------------------------*/ + +static int pi433_probe(struct spi_device *spi) +{ + struct pi433_device *device; + int retval; + + /* setup spi parameters */ + spi->mode = 0x00; + spi->bits_per_word = 8; + /* spi->max_speed_hz = 10000000; 1MHz already set by device tree overlay */ + + retval = spi_setup(spi); + if (retval) + { + dev_dbg(&spi->dev, "configuration of SPI interface failed!\n"); + return retval; + } + else + { + dev_dbg(&spi->dev, + "spi interface setup: mode 0x%2x, %d bits per word, %dhz max speed", + spi->mode, spi->bits_per_word, spi->max_speed_hz); + } + + /* Ping the chip by reading the version register */ + retval = spi_w8r8(spi, 0x10); + if (retval < 0) + return retval; + + switch(retval) + { + case 0x24: + dev_dbg(&spi->dev, "found pi433 (ver. 0x%x)", retval); + break; + default: + dev_dbg(&spi->dev, "unknown chip version: 0x%x", retval); + return -ENODEV; + } + + /* Allocate driver data */ + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + + /* Initialize the driver data */ + device->spi = spi; + device->rx_active = false; + device->tx_active = false; + device->interrupt_rx_allowed = false; + + /* init wait queues */ + init_waitqueue_head(&device->tx_wait_queue); + init_waitqueue_head(&device->rx_wait_queue); + init_waitqueue_head(&device->fifo_wait_queue); + + /* init fifo */ + INIT_KFIFO(device->tx_fifo); + + /* init mutexes and locks */ + mutex_init(&device->tx_fifo_lock); + mutex_init(&device->rx_lock); + + /* setup GPIO (including irq_handler) for the different DIOs */ + retval = setup_GPIOs(device); + if (retval) + { + dev_dbg(&spi->dev, "setup of GPIOs failed"); + goto GPIO_failed; + } + + /* setup the radio module */ + SET_CHECKED(rf69_set_mode (spi, standby)); + SET_CHECKED(rf69_set_data_mode (spi, packet)); + SET_CHECKED(rf69_set_amplifier_0 (spi, optionOn)); + SET_CHECKED(rf69_set_amplifier_1 (spi, optionOff)); + SET_CHECKED(rf69_set_amplifier_2 (spi, optionOff)); + SET_CHECKED(rf69_set_output_power_level (spi, 13)); + SET_CHECKED(rf69_set_antenna_impedance (spi, fiftyOhm)); + + /* start tx thread */ + device->tx_task_struct = kthread_run(pi433_tx_thread, + device, + "pi433_tx_task"); + if (IS_ERR(device->tx_task_struct)) + { + dev_dbg(device->dev, "start of send thread failed"); + goto send_thread_failed; + } + + /* determ minor number */ + retval = pi433_get_minor(device); + if (retval) + { + dev_dbg(device->dev, "get of minor number failed"); + goto minor_failed; + } + + /* create device */ + device->devt = MKDEV(MAJOR(pi433_dev), device->minor); + device->dev = device_create(pi433_class, + &spi->dev, + device->devt, + device, + "pi433"); + if (IS_ERR(device->dev)) { + pr_err("pi433: device register failed\n"); + retval = PTR_ERR(device->dev); + goto device_create_failed; + } + else { + dev_dbg(device->dev, + "created device for major %d, minor %d\n", + MAJOR(pi433_dev), + device->minor); + } + + /* create cdev */ + device->cdev = cdev_alloc(); + device->cdev->owner = THIS_MODULE; + cdev_init(device->cdev, &pi433_fops); + retval = cdev_add(device->cdev, device->devt, 1); + if (retval) + { + dev_dbg(device->dev, "register of cdev failed"); + goto cdev_failed; + } + + /* spi setup */ + spi_set_drvdata(spi, device); + + return 0; + +cdev_failed: + device_destroy(pi433_class, device->devt); +device_create_failed: + pi433_free_minor(device); +minor_failed: + kthread_stop(device->tx_task_struct); +send_thread_failed: + free_GPIOs(device); +GPIO_failed: + kfree(device); + + return retval; +} + +static int pi433_remove(struct spi_device *spi) +{ + struct pi433_device *device = spi_get_drvdata(spi); + + /* free GPIOs */ + free_GPIOs(device); + + /* make sure ops on existing fds can abort cleanly */ + device->spi = NULL; + + kthread_stop(device->tx_task_struct); + + device_destroy(pi433_class, device->devt); + + cdev_del(device->cdev); + + pi433_free_minor(device); + + if (device->users == 0) + kfree(device); + + return 0; +} + +static const struct of_device_id pi433_dt_ids[] = { + { .compatible = "Smarthome-Wolf,pi433" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, pi433_dt_ids); + +static struct spi_driver pi433_spi_driver = { + .driver = { + .name = "pi433", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(pi433_dt_ids), + }, + .probe = pi433_probe, + .remove = pi433_remove, + + /* NOTE: suspend/resume methods are not necessary here. + * We don't do anything except pass the requests to/from + * the underlying controller. The refrigerator handles + * most issues; the controller driver handles the rest. + */ +}; + +/*-------------------------------------------------------------------------*/ + +static int __init pi433_init(void) +{ + int status; + + /* If MAX_MSG_SIZE is smaller then FIFO_SIZE, the driver won't + * work stable - risk of buffer overflow + */ + if (MAX_MSG_SIZE < FIFO_SIZE) + return -EINVAL; + + /* Claim device numbers. Then register a class + * that will key udev/mdev to add/remove /dev nodes. Last, register + * Last, register the driver which manages those device numbers. + */ + status = alloc_chrdev_region(&pi433_dev, 0 /*firstminor*/, N_PI433_MINORS /*count*/, "pi433" /*name*/); + if (status < 0) + return status; + + pi433_class = class_create(THIS_MODULE, "pi433"); + if (IS_ERR(pi433_class)) + { + unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name); + return PTR_ERR(pi433_class); + } + + status = spi_register_driver(&pi433_spi_driver); + if (status < 0) + { + class_destroy(pi433_class); + unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name); + } + + return status; +} + +module_init(pi433_init); + +static void __exit pi433_exit(void) +{ + spi_unregister_driver(&pi433_spi_driver); + class_destroy(pi433_class); + unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name); +} +module_exit(pi433_exit); + +MODULE_AUTHOR("Marcus Wolf, <linux@wolf-entwicklungen.de>"); +MODULE_DESCRIPTION("Driver for Pi433"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:pi433"); diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h new file mode 100644 index 000000000000..e6ed3cd9b2e2 --- /dev/null +++ b/drivers/staging/pi433/pi433_if.h @@ -0,0 +1,152 @@ +/* + * include/linux/TODO + * + * userspace interface for pi433 radio module + * + * Pi433 is a 433MHz radio module for the Raspberry Pi. + * It is based on the HopeRf Module RFM69CW. Therefore inside of this + * driver, you'll find an abstraction of the rf69 chip. + * + * If needed, this driver could be extended, to also support other + * devices, basing on HopeRfs rf69. + * + * The driver can also be extended, to support other modules of + * HopeRf with a similar interace - e. g. RFM69HCW, RFM12, RFM95, ... + * Copyright (C) 2016 Wolf-Entwicklungen + * Marcus Wolf <linux@wolf-entwicklungen.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PI433_H +#define PI433_H + +#include <linux/types.h> +#include "rf69_enum.h" + +/*---------------------------------------------------------------------------*/ + + +/*---------------------------------------------------------------------------*/ + +/* IOCTL structs and commands */ + +/** + * struct pi433_tx_config - describes the configuration of the radio module for sending + * @frequency: + * @bit_rate: + * @modulation: + * @data_mode: + * @preamble_length: + * @sync_pattern: + * @tx_start_condition: + * @payload_length: + * @repetitions: + * + * ATTENTION: + * If the contents of 'pi433_tx_config' ever change + * incompatibly, then the ioctl number (see define below) must change. + * + * NOTE: struct layout is the same in 64bit and 32bit userspace. + */ +#define PI433_TX_CFG_IOCTL_NR 0 +struct pi433_tx_cfg +{ + __u32 frequency; + __u16 bit_rate; + __u32 dev_frequency; + enum modulation modulation; + enum modShaping modShaping; + + enum paRamp pa_ramp; + + enum txStartCondition tx_start_condition; + + __u16 repetitions; + + + /* packet format */ + enum optionOnOff enable_preamble; + enum optionOnOff enable_sync; + enum optionOnOff enable_length_byte; + enum optionOnOff enable_address_byte; + enum optionOnOff enable_crc; + + __u16 preamble_length; + __u8 sync_length; + __u8 fixed_message_length; + + __u8 sync_pattern[8]; + __u8 address_byte; +}; + + +/** + * struct pi433_rx_config - describes the configuration of the radio module for sending + * @frequency: + * @bit_rate: + * @modulation: + * @data_mode: + * @preamble_length: + * @sync_pattern: + * @tx_start_condition: + * @payload_length: + * @repetitions: + * + * ATTENTION: + * If the contents of 'pi433_rx_config' ever change + * incompatibly, then the ioctl number (see define below) must change + * + * NOTE: struct layout is the same in 64bit and 32bit userspace. + */ +#define PI433_RX_CFG_IOCTL_NR 1 +struct pi433_rx_cfg { + __u32 frequency; + __u16 bit_rate; + __u32 dev_frequency; + + enum modulation modulation; + + __u8 rssi_threshold; + enum thresholdDecrement thresholdDecrement; + enum antennaImpedance antenna_impedance; + enum lnaGain lna_gain; + enum mantisse bw_mantisse; /* normal: 0x50 */ + __u8 bw_exponent; /* during AFC: 0x8b */ + enum dagc dagc; + + + + /* packet format */ + enum optionOnOff enable_sync; + enum optionOnOff enable_length_byte; /* should be used in combination with sync, only */ + enum addressFiltering enable_address_filtering; /* operational with sync, only */ + enum optionOnOff enable_crc; /* only operational, if sync on and fixed length or length byte is used */ + + __u8 sync_length; + __u8 fixed_message_length; + __u32 bytes_to_drop; + + __u8 sync_pattern[8]; + __u8 node_address; + __u8 broadcast_address; +}; + + +#define PI433_IOC_MAGIC 'r' + +#define PI433_IOC_RD_TX_CFG _IOR(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)]) +#define PI433_IOC_WR_TX_CFG _IOW(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)]) + +#define PI433_IOC_RD_RX_CFG _IOR(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)]) +#define PI433_IOC_WR_RX_CFG _IOW(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)]) + +#endif /* PI433_H */ diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c new file mode 100644 index 000000000000..f83523e3395d --- /dev/null +++ b/drivers/staging/pi433/rf69.c @@ -0,0 +1,985 @@ +/* + * abstraction of the spi interface of HopeRf rf69 radio module + * + * Copyright (C) 2016 Wolf-Entwicklungen + * Marcus Wolf <linux@wolf-entwicklungen.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* enable prosa debug info */ +#undef DEBUG +/* enable print of values on reg access */ +#undef DEBUG_VALUES +/* enable print of values on fifo access */ +#undef DEBUG_FIFO_ACCESS + +#include <linux/types.h> +#include <linux/spi/spi.h> + +#include "rf69.h" +#include "rf69_registers.h" + +#define F_OSC 32000000 /* in Hz */ +#define FIFO_SIZE 66 /* in byte */ + +/*-------------------------------------------------------------------------*/ + +#define READ_REG(x) rf69_read_reg (spi, x) +#define WRITE_REG(x,y) rf69_write_reg(spi, x, y) +#define INVALID_PARAM \ + { \ + dev_dbg(&spi->dev, "set: illegal input param"); \ + return -EINVAL; \ + } + +/*-------------------------------------------------------------------------*/ + +int rf69_set_mode(struct spi_device *spi, enum mode mode) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: mode"); + #endif + + switch (mode){ + case transmit: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_TRANSMIT); + case receive: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_RECEIVE); + case synthesizer: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_SYNTHESIZER); + case standby: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_STANDBY); + case mode_sleep: return WRITE_REG(REG_OPMODE, (READ_REG(REG_OPMODE) & ~MASK_OPMODE_MODE) | OPMODE_MODE_SLEEP); + default: INVALID_PARAM; + } + + // we are using packet mode, so this check is not really needed + // but waiting for mode ready is necessary when going from sleep because the FIFO may not be immediately available from previous mode + //while (_mode == RF69_MODE_SLEEP && (READ_REG(REG_IRQFLAGS1) & RF_IRQFLAGS1_MODEREADY) == 0x00); // Wait for ModeReady + +} + +int rf69_set_data_mode(struct spi_device *spi, enum dataMode dataMode) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: data mode"); + #endif + + switch (dataMode) { + case packet: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_PACKET); + case continuous: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_CONTINUOUS); + case continuousNoSync: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODE) | DATAMODUL_MODE_CONTINUOUS_NOSYNC); + default: INVALID_PARAM; + } +} + +int rf69_set_modulation(struct spi_device *spi, enum modulation modulation) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: modulation"); + #endif + + switch (modulation) { + case OOK: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_TYPE) | DATAMODUL_MODULATION_TYPE_OOK); + case FSK: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_TYPE) | DATAMODUL_MODULATION_TYPE_FSK); + default: INVALID_PARAM; + } +} + +enum modulation rf69_get_modulation(struct spi_device *spi) +{ + u8 currentValue; + + #ifdef DEBUG + dev_dbg(&spi->dev, "get: mode"); + #endif + + currentValue = READ_REG(REG_DATAMODUL); + + switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) // TODO improvement: change 3 to define + { + case DATAMODUL_MODULATION_TYPE_OOK: return OOK; + case DATAMODUL_MODULATION_TYPE_FSK: return FSK; + default: return undefined; + } +} + +int rf69_set_modulation_shaping(struct spi_device *spi, enum modShaping modShaping) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: mod shaping"); + #endif + + if (rf69_get_modulation(spi) == FSK) + { + switch (modShaping) { + case shapingOff: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_NONE); + case shaping1_0: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_1_0); + case shaping0_5: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_0_3); + case shaping0_3: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_0_5); + default: INVALID_PARAM; + } + } + else + { + switch (modShaping) { + case shapingOff: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_NONE); + case shapingBR: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_BR); + case shaping2BR: return WRITE_REG(REG_DATAMODUL, (READ_REG(REG_DATAMODUL) & ~MASK_DATAMODUL_MODULATION_SHAPE) | DATAMODUL_MODULATION_SHAPE_2BR); + default: INVALID_PARAM; + } + } +} + +int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate) +{ + int retval; + u32 bitRate_min; + u32 bitRate_reg; + u8 msb; + u8 lsb; + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: bit rate"); + #endif + + // check input value + bitRate_min = F_OSC / 8388608; // 8388608 = 2^23; + if (bitRate < bitRate_min) + { + dev_dbg(&spi->dev, "setBitRate: illegal input param"); + INVALID_PARAM; + } + + // calculate reg settings + bitRate_reg = (F_OSC / bitRate); + + msb = (bitRate_reg&0xff00) >> 8; + lsb = (bitRate_reg&0xff); + + // transmit to RF 69 + retval = WRITE_REG(REG_BITRATE_MSB, msb); + if (retval) return retval; + retval = WRITE_REG(REG_BITRATE_LSB, lsb); + if (retval) return retval; + + return 0; +} + +int rf69_set_deviation(struct spi_device *spi, u32 deviation) +{ + int retval; +// u32 f_max; TODO: Abhängigkeit von Bitrate beachten!! + u64 f_reg; + u64 f_step; + u8 msb; + u8 lsb; + u64 factor = 1000000; // to improve precision of calculation + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: deviation"); + #endif + + if (deviation < 600 || deviation > 500000) //TODO: Abhängigkeit von Bitrate beachten!! + { + dev_dbg(&spi->dev, "set_deviation: illegal input param"); + INVALID_PARAM; + } + + // calculat f step + f_step = F_OSC * factor; + do_div(f_step, 524288); // 524288 = 2^19 + + // calculate register settings + f_reg = deviation * factor; + do_div(f_reg , f_step); + + msb = (f_reg&0xff00) >> 8; + lsb = (f_reg&0xff); + + // check msb + if (msb & !FDEVMASB_MASK) + { + dev_dbg(&spi->dev, "set_deviation: err in calc of msb"); + INVALID_PARAM; + } + + // write to chip + retval = WRITE_REG(REG_FDEV_MSB, msb); + if (retval) return retval; + retval = WRITE_REG(REG_FDEV_LSB, lsb); + if (retval) return retval; + + return 0; +} + +int rf69_set_frequency(struct spi_device *spi, u32 frequency) +{ + int retval; + u32 f_max; + u64 f_reg; + u64 f_step; + u8 msb; + u8 mid; + u8 lsb; + u64 factor = 1000000; // to improve precision of calculation + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: frequency"); + #endif + + // calculat f step + f_step = F_OSC * factor; + do_div(f_step, 524288); // 524288 = 2^19 + + // check input value + f_max = div_u64(f_step * 8388608, factor); + if (frequency > f_max) + { + dev_dbg(&spi->dev, "setFrequency: illegal input param"); + INVALID_PARAM; + } + + // calculate reg settings + f_reg = frequency * factor; + do_div(f_reg , f_step); + + msb = (f_reg&0xff0000) >> 16; + mid = (f_reg&0xff00) >> 8; + lsb = (f_reg&0xff); + + // write to chip + retval = WRITE_REG(REG_FRF_MSB, msb); + if (retval) return retval; + retval = WRITE_REG(REG_FRF_MID, mid); + if (retval) return retval; + retval = WRITE_REG(REG_FRF_LSB, lsb); + if (retval) return retval; + + return 0; +} + +int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: amp #0"); + #endif + + switch(optionOnOff) { + case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0) ); + case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0) ); + default: INVALID_PARAM; + } +} + +int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: amp #1"); + #endif + + switch(optionOnOff) { + case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1) ); + case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1) ); + default: INVALID_PARAM; + } +} + +int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: amp #2"); + #endif + + switch(optionOnOff) { + case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2) ); + case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2) ); + default: INVALID_PARAM; + } +} + +int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: power level"); + #endif + + powerLevel +=18; // TODO Abhängigkeit von PA0,1,2 setting + + // check input value + if (powerLevel > 0x1f) + INVALID_PARAM; + + // write value + return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_OUTPUT_POWER) | powerLevel); +} + +int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: pa ramp"); + #endif + + switch(paRamp) { + case ramp3400: return WRITE_REG(REG_PARAMP, PARAMP_3400); + case ramp2000: return WRITE_REG(REG_PARAMP, PARAMP_2000); + case ramp1000: return WRITE_REG(REG_PARAMP, PARAMP_1000); + case ramp500: return WRITE_REG(REG_PARAMP, PARAMP_500); + case ramp250: return WRITE_REG(REG_PARAMP, PARAMP_250); + case ramp125: return WRITE_REG(REG_PARAMP, PARAMP_125); + case ramp100: return WRITE_REG(REG_PARAMP, PARAMP_100); + case ramp62: return WRITE_REG(REG_PARAMP, PARAMP_62); + case ramp50: return WRITE_REG(REG_PARAMP, PARAMP_50); + case ramp40: return WRITE_REG(REG_PARAMP, PARAMP_40); + case ramp31: return WRITE_REG(REG_PARAMP, PARAMP_31); + case ramp25: return WRITE_REG(REG_PARAMP, PARAMP_25); + case ramp20: return WRITE_REG(REG_PARAMP, PARAMP_20); + case ramp15: return WRITE_REG(REG_PARAMP, PARAMP_15); + case ramp12: return WRITE_REG(REG_PARAMP, PARAMP_12); + case ramp10: return WRITE_REG(REG_PARAMP, PARAMP_10); + default: INVALID_PARAM; + } +} + +int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance antennaImpedance) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: antenna impedance"); + #endif + + switch(antennaImpedance) { + case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN) ); + case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN) ); + default: INVALID_PARAM; + } +} + +int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: lna gain"); + #endif + + switch(lnaGain) { + case automatic: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO) ); + case max: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX) ); + case maxMinus6: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6) ); + case maxMinus12: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12) ); + case maxMinus24: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24) ); + case maxMinus36: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36) ); + case maxMinus48: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48) ); + default: INVALID_PARAM; + } +} + +enum lnaGain rf69_get_lna_gain(struct spi_device *spi) +{ + u8 currentValue; + + #ifdef DEBUG + dev_dbg(&spi->dev, "get: lna gain"); + #endif + + currentValue = READ_REG(REG_LNA); + + switch (currentValue & MASK_LNA_CURRENT_GAIN >> 3) // improvement: change 3 to define + { + case LNA_GAIN_AUTO: return automatic; + case LNA_GAIN_MAX: return max; + case LNA_GAIN_MAX_MINUS_6: return maxMinus6; + case LNA_GAIN_MAX_MINUS_12: return maxMinus12; + case LNA_GAIN_MAX_MINUS_24: return maxMinus24; + case LNA_GAIN_MAX_MINUS_36: return maxMinus36; + case LNA_GAIN_MAX_MINUS_48: return maxMinus48; + default: return undefined; + } +} + +int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi ,u8 reg, enum dccPercent dccPercent) +{ + switch (dccPercent) { + case dcc16Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT) ); + case dcc8Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT) ); + case dcc4Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT) ); + case dcc2Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT) ); + case dcc1Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT) ); + case dcc0_5Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT) ); + case dcc0_25Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT) ); + case dcc0_125Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT) ); + default: INVALID_PARAM; + } +} + +int rf69_set_dc_cut_off_frequency(struct spi_device *spi, enum dccPercent dccPercent) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: cut off freq"); + #endif + + return rf69_set_dc_cut_off_frequency_intern(spi, REG_RXBW, dccPercent); +} + +int rf69_set_dc_cut_off_frequency_during_afc(struct spi_device *spi, enum dccPercent dccPercent) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: cut off freq during afc"); + #endif + + return rf69_set_dc_cut_off_frequency_intern(spi, REG_AFCBW, dccPercent); +} + +static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg, + enum mantisse mantisse, u8 exponent) +{ + u8 newValue; + + // check value for mantisse and exponent + if (exponent > 7) INVALID_PARAM; + if ( (mantisse!=mantisse16) && + (mantisse!=mantisse20) && + (mantisse!=mantisse24) ) INVALID_PARAM; + + // read old value + newValue = READ_REG(reg); + + // "delete" mantisse and exponent = just keep the DCC setting + newValue = newValue & MASK_BW_DCC_FREQ; + + // add new mantisse + switch(mantisse) { + case mantisse16: newValue = newValue | BW_MANT_16; break; + case mantisse20: newValue = newValue | BW_MANT_20; break; + case mantisse24: newValue = newValue | BW_MANT_24; break; + } + + // add new exponent + newValue = newValue | exponent; + + // write back + return WRITE_REG(reg, newValue); +} + +int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: band width"); + #endif + + return rf69_set_bandwidth_intern(spi, REG_RXBW, mantisse, exponent); +} + +int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: band width during afc"); + #endif + + return rf69_set_bandwidth_intern(spi, REG_AFCBW, mantisse, exponent); +} + +int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thresholdType) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: threshold type"); + #endif + + switch (thresholdType) + { + case fixed: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED) ); + case peak: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK) ); + case average: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE) ); + default: INVALID_PARAM; + } +} + +int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thresholdStep) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: threshold step"); + #endif + + switch (thresholdStep) { + case step_0_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB) ); + case step_1_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB) ); + case step_1_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB) ); + case step_2_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB) ); + case step_3_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB) ); + case step_4_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB) ); + case step_5_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB) ); + case step_6_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB) ); + default: INVALID_PARAM; + } +} + +int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement thresholdDecrement) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: threshold decrement"); + #endif + + switch (thresholdDecrement) { + case dec_every8th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH) ); + case dec_every4th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH) ); + case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND) ); + case dec_once: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE) ); + case dec_twice: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE) ); + case dec_4times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES) ); + case dec_8times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES) ); + case dec_16times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES) ); + default: INVALID_PARAM; + } +} + +int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value) +{ + u8 mask; + u8 shift; + u8 regaddr; + u8 regValue; + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: DIO mapping"); + #endif + + // check DIO number + if (DIONumber > 5) INVALID_PARAM; + + switch (DIONumber) { + case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break; + case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break; + case 2: mask=MASK_DIO2; shift=SHIFT_DIO2; regaddr=REG_DIOMAPPING1; break; + case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break; + case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break; + case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break; + } + + // read reg + regValue=READ_REG(regaddr); + // delete old value + regValue = regValue & ~mask; + // add new value + regValue = regValue | value << shift; + // write back + return WRITE_REG(regaddr,regValue); +} + +bool rf69_get_flag(struct spi_device *spi, enum flag flag) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "get: flag"); + #endif + + switch(flag) { + case modeSwitchCompleted: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_MODE_READY); + case readyToReceive: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RX_READY); + case readyToSend: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TX_READY); + case pllLocked: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_PLL_LOCK); + case rssiExceededThreshold: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RSSI); + case timeout: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TIMEOUT); + case automode: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_AUTOMODE); + case syncAddressMatch: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH); + case fifoFull: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_FULL); +/* case fifoNotEmpty: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY); */ + case fifoEmpty: return !(READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_NOT_EMPTY); + case fifoLevelBelowThreshold: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_LEVEL); + case fifoOverrun: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_FIFO_OVERRUN); + case packetSent: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_PACKET_SENT); + case payloadReady: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_PAYLOAD_READY); + case crcOk: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_CRC_OK); + case batteryLow: return (READ_REG(REG_IRQFLAGS2) & MASK_IRQFLAGS2_LOW_BAT); + default: return false; + } +} + +int rf69_reset_flag(struct spi_device *spi, enum flag flag) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "reset: flag"); + #endif + + switch(flag) { + case rssiExceededThreshold: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_RSSI); + case syncAddressMatch: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH); + case fifoOverrun: return WRITE_REG(REG_IRQFLAGS2, MASK_IRQFLAGS2_FIFO_OVERRUN); + default: INVALID_PARAM; + } +} + +int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: rssi threshold"); + #endif + + /* no value check needed - u8 exactly matches register size */ + + return WRITE_REG(REG_RSSITHRESH, threshold); +} + +int rf69_set_rx_start_timeout(struct spi_device *spi, u8 timeout) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: start timeout"); + #endif + + /* no value check needed - u8 exactly matches register size */ + + return WRITE_REG(REG_RXTIMEOUT1, timeout); +} + +int rf69_set_rssi_timeout(struct spi_device *spi, u8 timeout) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: rssi timeout"); + #endif + + /* no value check needed - u8 exactly matches register size */ + + return WRITE_REG(REG_RXTIMEOUT2, timeout); +} + +int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength) +{ + int retval; + u8 msb, lsb; + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: preample length"); + #endif + + /* no value check needed - u16 exactly matches register size */ + + /* calculate reg settings */ + msb = (preambleLength&0xff00) >> 8; + lsb = (preambleLength&0xff); + + /* transmit to chip */ + retval = WRITE_REG(REG_PREAMBLE_MSB, msb); + if (retval) return retval; + retval = WRITE_REG(REG_PREAMBLE_LSB, lsb); + + return retval; +} + +int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: sync enable"); + #endif + + switch(optionOnOff) { + case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON) ); + case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON) ); + default: INVALID_PARAM; + } +} + +int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition fifoFillCondition) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: fifo fill condition"); + #endif + + switch(fifoFillCondition) { + case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) ); + case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) ); + default: INVALID_PARAM; + } +} + +int rf69_set_sync_size(struct spi_device *spi, u8 syncSize) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: sync size"); + #endif + + // check input value + if (syncSize > 0x07) + INVALID_PARAM; + + // write value + return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3) ); +} + +int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: sync tolerance"); + #endif + + // check input value + if (syncTolerance > 0x07) + INVALID_PARAM; + + // write value + return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | syncTolerance); +} + +int rf69_set_sync_values(struct spi_device *spi, u8 syncValues[8]) +{ + int retval = 0; + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: sync values"); + #endif + + retval += WRITE_REG(REG_SYNCVALUE1, syncValues[0]); + retval += WRITE_REG(REG_SYNCVALUE2, syncValues[1]); + retval += WRITE_REG(REG_SYNCVALUE3, syncValues[2]); + retval += WRITE_REG(REG_SYNCVALUE4, syncValues[3]); + retval += WRITE_REG(REG_SYNCVALUE5, syncValues[4]); + retval += WRITE_REG(REG_SYNCVALUE6, syncValues[5]); + retval += WRITE_REG(REG_SYNCVALUE7, syncValues[6]); + retval += WRITE_REG(REG_SYNCVALUE8, syncValues[7]); + + return retval; +} + +int rf69_set_packet_format(struct spi_device * spi, enum packetFormat packetFormat) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: packet format"); + #endif + + switch(packetFormat) { + case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) ); + case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) ); + default: INVALID_PARAM; + } +} + +int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: crc enable"); + #endif + + switch(optionOnOff) { + case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON) ); + case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON) ); + default: INVALID_PARAM; + } +} + +int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addressFiltering) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: address filtering"); + #endif + + switch (addressFiltering) { + case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF) ); + case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE) ); + case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST) ); + default: INVALID_PARAM; + } +} + +int rf69_set_payload_length(struct spi_device *spi, u8 payloadLength) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: payload length"); + #endif + + return WRITE_REG(REG_PAYLOAD_LENGTH, payloadLength); +} + +u8 rf69_get_payload_length(struct spi_device *spi) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "get: payload length"); + #endif + + return (u8) READ_REG(REG_PAYLOAD_LENGTH); +} + +int rf69_set_node_address(struct spi_device *spi, u8 nodeAddress) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: node address"); + #endif + + return WRITE_REG(REG_NODEADRS, nodeAddress); +} + +int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcastAddress) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: broadcast address"); + #endif + + return WRITE_REG(REG_BROADCASTADRS, broadcastAddress); +} + +int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition txStartCondition) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: start condition"); + #endif + + switch(txStartCondition) { + case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART) ); + case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART) ); + default: INVALID_PARAM; + } +} + +int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold) +{ + int retval; + + #ifdef DEBUG + dev_dbg(&spi->dev, "set: fifo threshold"); + #endif + + // check input value + if (threshold & 0x80) + INVALID_PARAM; + + // write value + retval = WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_VALUE) | threshold); + if (retval) + return retval; + + // access the fifo to activate new threshold + return rf69_read_fifo (spi, (u8*) &retval, 1); // retval used as buffer +} + +int rf69_set_dagc(struct spi_device *spi, enum dagc dagc) +{ + #ifdef DEBUG + dev_dbg(&spi->dev, "set: dagc"); + #endif + + switch(dagc) { + case normalMode: return WRITE_REG(REG_TESTDAGC, DAGC_NORMAL); + case improve: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0); + case improve4LowModulationIndex: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1); + default: INVALID_PARAM; + } +} + +/*-------------------------------------------------------------------------*/ + +int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size) +{ + #ifdef DEBUG_FIFO_ACCESS + int i; + #endif + struct spi_transfer transfer; + u8 local_buffer[FIFO_SIZE + 1]; + int retval; + + if (size > FIFO_SIZE) + { + #ifdef DEBUG + dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer \n"); + #endif + return -EMSGSIZE; + } + + /* prepare a bidirectional transfer */ + local_buffer[0] = REG_FIFO; + memset(&transfer, 0, sizeof(transfer)); + transfer.tx_buf = local_buffer; + transfer.rx_buf = local_buffer; + transfer.len = size+1; + + retval = spi_sync_transfer(spi, &transfer, 1); + + #ifdef DEBUG_FIFO_ACCESS + for (i=0; i<size; i++) + dev_dbg(&spi->dev, "%d - 0x%x\n", i, local_buffer[i+1]); + #endif + + memcpy(buffer, &local_buffer[1], size); // TODO: ohne memcopy wäre schöner + + return retval; +} + +int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size) +{ + #ifdef DEBUG_FIFO_ACCESS + int i; + #endif + char spi_address = REG_FIFO | WRITE_BIT; + u8 local_buffer[FIFO_SIZE + 1]; + + if (size > FIFO_SIZE) + { + #ifdef DEBUG + dev_dbg(&spi->dev, "read fifo: passed in buffer bigger then internal buffer \n"); + #endif + return -EMSGSIZE; + } + + local_buffer[0] = spi_address; + memcpy(&local_buffer[1], buffer, size); // TODO: ohne memcopy wäre schöner + + #ifdef DEBUG_FIFO_ACCESS + for (i=0; i<size; i++) + dev_dbg(&spi->dev, "0x%x\n",buffer[i]); + #endif + + return spi_write (spi, local_buffer, size + 1); +} + +/*-------------------------------------------------------------------------*/ + +u8 rf69_read_reg(struct spi_device *spi, u8 addr) +{ + int retval; + + retval = spi_w8r8(spi, addr); + + #ifdef DEBUG_VALUES + if (retval < 0) + /* should never happen, since we already checked, + * that module is connected. Therefore no error + * handling, just an optional error message... + */ + dev_dbg(&spi->dev, "read 0x%x FAILED\n", + addr); + else + dev_dbg(&spi->dev, "read 0x%x from reg 0x%x\n", + retval, + addr); + #endif + + return retval; +} + +int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value) +{ + int retval; + char buffer[2]; + + buffer[0] = addr | WRITE_BIT; + buffer[1] = value; + + retval = spi_write(spi, &buffer, 2); + + #ifdef DEBUG_VALUES + if (retval < 0) + /* should never happen, since we already checked, + * that module is connected. Therefore no error + * handling, just an optional error message... + */ + dev_dbg(&spi->dev, "write 0x%x to 0x%x FAILED\n", + value, + addr); + else + dev_dbg(&spi->dev, "wrote 0x%x to reg 0x%x\n", + value, + addr); + #endif + + return retval; +} + + diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h new file mode 100644 index 000000000000..b81e0762032e --- /dev/null +++ b/drivers/staging/pi433/rf69.h @@ -0,0 +1,82 @@ +/* + * hardware abstraction/register access for HopeRf rf69 radio module + * + * Copyright (C) 2016 Wolf-Entwicklungen + * Marcus Wolf <linux@wolf-entwicklungen.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef RF69_H +#define RF69_H + +#include "rf69_enum.h" +#include "rf69_registers.h" + +#define F_OSC 32000000 /* in Hz */ +#define FREQUENCY 433920000 /* in Hz, modifying this value impacts CE certification */ +#define FIFO_SIZE 66 /* in byte */ +#define FIFO_THRESHOLD 15 /* in byte */ + +int rf69_set_mode(struct spi_device *spi, enum mode mode); +int rf69_set_data_mode(struct spi_device *spi, enum dataMode dataMode); +int rf69_set_modulation(struct spi_device *spi, enum modulation modulation); +enum modulation rf69_get_modulation(struct spi_device *spi); +int rf69_set_modulation_shaping(struct spi_device *spi, enum modShaping modShaping); +int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate); +int rf69_set_deviation(struct spi_device *spi, u32 deviation); +int rf69_set_frequency(struct spi_device *spi, u32 frequency); +int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff); +int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff); +int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff); +int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel); +int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp); +int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance antennaImpedance); +int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain); +enum lnaGain rf69_get_lna_gain(struct spi_device *spi); +int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi, u8 reg, enum dccPercent dccPercent); +int rf69_set_dc_cut_off_frequency(struct spi_device *spi, enum dccPercent dccPercent); +int rf69_set_dc_cut_off_frequency_during_afc(struct spi_device *spi, enum dccPercent dccPercent); +int rf69_set_bandwidth(struct spi_device *spi, enum mantisse mantisse, u8 exponent); +int rf69_set_bandwidth_during_afc(struct spi_device *spi, enum mantisse mantisse, u8 exponent); +int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thresholdType); +int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thresholdStep); +int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement thresholdDecrement); +int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value); +bool rf69_get_flag(struct spi_device *spi, enum flag flag); +int rf69_reset_flag(struct spi_device *spi, enum flag flag); +int rf69_set_rssi_threshold(struct spi_device *spi, u8 threshold); +int rf69_set_rx_start_timeout(struct spi_device *spi, u8 timeout); +int rf69_set_rssi_timeout(struct spi_device *spi, u8 timeout); +int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength); +int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff); +int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition fifoFillCondition); +int rf69_set_sync_size(struct spi_device *spi, u8 sync_size); +int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance); +int rf69_set_sync_values(struct spi_device *spi, u8 syncValues[8]); +int rf69_set_packet_format(struct spi_device * spi, enum packetFormat packetFormat); +int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff); +int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addressFiltering); +int rf69_set_payload_length(struct spi_device *spi, u8 payloadLength); +u8 rf69_get_payload_length(struct spi_device *spi); +int rf69_set_node_address(struct spi_device *spi, u8 nodeAddress); +int rf69_set_broadcast_address(struct spi_device *spi, u8 broadcastAddress); +int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition txStartCondition); +int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold); +int rf69_set_dagc(struct spi_device *spi, enum dagc dagc); + +int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size); +int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size); + +u8 rf69_read_reg (struct spi_device *spi, u8 addr); +int rf69_write_reg(struct spi_device *spi, u8 addr, u8 value); + + +#endif diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h new file mode 100644 index 000000000000..fbfb59bd3f3d --- /dev/null +++ b/drivers/staging/pi433/rf69_enum.h @@ -0,0 +1,201 @@ +/* + * enumerations for HopeRf rf69 radio module + * + * Copyright (C) 2016 Wolf-Entwicklungen + * Marcus Wolf <linux@wolf-entwicklungen.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef RF69_ENUM_H +#define RF69_ENUM_H + +enum optionOnOff +{ + optionOff, + optionOn +}; + +enum mode +{ + mode_sleep, + standby, + synthesizer, + transmit, + receive +}; + +enum dataMode +{ + packet, + continuous, + continuousNoSync +}; + +enum modulation +{ + OOK, + FSK +}; + +enum modShaping +{ + shapingOff, + shaping1_0, + shaping0_5, + shaping0_3, + shapingBR, + shaping2BR +}; + +enum paRamp +{ + ramp3400, + ramp2000, + ramp1000, + ramp500, + ramp250, + ramp125, + ramp100, + ramp62, + ramp50, + ramp40, + ramp31, + ramp25, + ramp20, + ramp15, + ramp12, + ramp10 +}; + +enum antennaImpedance +{ + fiftyOhm, + twohundretOhm +}; + +enum lnaGain +{ + automatic, + max, + maxMinus6, + maxMinus12, + maxMinus24, + maxMinus36, + maxMinus48, + undefined +}; + +enum dccPercent +{ + dcc16Percent, + dcc8Percent, + dcc4Percent, + dcc2Percent, + dcc1Percent, + dcc0_5Percent, + dcc0_25Percent, + dcc0_125Percent +}; + +enum mantisse +{ + mantisse16, + mantisse20, + mantisse24 +}; + +enum thresholdType +{ + fixed, + peak, + average +}; + +enum thresholdStep +{ + step_0_5db, + step_1_0db, + step_1_5db, + step_2_0db, + step_3_0db, + step_4_0db, + step_5_0db, + step_6_0db +}; + +enum thresholdDecrement +{ + dec_every8th, + dec_every4th, + dec_every2nd, + dec_once, + dec_twice, + dec_4times, + dec_8times, + dec_16times +}; + +enum flag +{ + modeSwitchCompleted, + readyToReceive, + readyToSend, + pllLocked, + rssiExceededThreshold, + timeout, + automode, + syncAddressMatch, + fifoFull, +// fifoNotEmpty, collision with next enum; replaced by following enum... + fifoEmpty, + fifoLevelBelowThreshold, + fifoOverrun, + packetSent, + payloadReady, + crcOk, + batteryLow +}; + +enum fifoFillCondition +{ + afterSyncInterrupt, + always +}; + +enum packetFormat +{ + packetLengthFix, + packetLengthVar +}; + +enum txStartCondition +{ + fifoLevel, + fifoNotEmpty +}; + +enum addressFiltering +{ + filteringOff, + nodeAddress, + nodeOrBroadcastAddress +}; + +enum dagc +{ + normalMode, + improve, + improve4LowModulationIndex +}; + + +#endif diff --git a/drivers/staging/pi433/rf69_registers.h b/drivers/staging/pi433/rf69_registers.h new file mode 100644 index 000000000000..6335d42142fe --- /dev/null +++ b/drivers/staging/pi433/rf69_registers.h @@ -0,0 +1,489 @@ +/* + * register description for HopeRf rf69 radio module + * + * Copyright (C) 2016 Wolf-Entwicklungen + * Marcus Wolf <linux@wolf-entwicklungen.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/*******************************************/ +/* RF69 register addresses */ +/*******************************************/ +#define REG_FIFO 0x00 +#define REG_OPMODE 0x01 +#define REG_DATAMODUL 0x02 +#define REG_BITRATE_MSB 0x03 +#define REG_BITRATE_LSB 0x04 +#define REG_FDEV_MSB 0x05 +#define REG_FDEV_LSB 0x06 +#define REG_FRF_MSB 0x07 +#define REG_FRF_MID 0x08 +#define REG_FRF_LSB 0x09 +#define REG_OSC1 0x0A +#define REG_AFCCTRL 0x0B +#define REG_LOWBAT 0x0C +#define REG_LISTEN1 0x0D +#define REG_LISTEN2 0x0E +#define REG_LISTEN3 0x0F +#define REG_VERSION 0x10 +#define REG_PALEVEL 0x11 +#define REG_PARAMP 0x12 +#define REG_OCP 0x13 +#define REG_AGCREF 0x14 /* not available on RF69 */ +#define REG_AGCTHRESH1 0x15 /* not available on RF69 */ +#define REG_AGCTHRESH2 0x16 /* not available on RF69 */ +#define REG_AGCTHRESH3 0x17 /* not available on RF69 */ +#define REG_LNA 0x18 +#define REG_RXBW 0x19 +#define REG_AFCBW 0x1A +#define REG_OOKPEAK 0x1B +#define REG_OOKAVG 0x1C +#define REG_OOKFIX 0x1D +#define REG_AFCFEI 0x1E +#define REG_AFCMSB 0x1F +#define REG_AFCLSB 0x20 +#define REG_FEIMSB 0x21 +#define REG_FEILSB 0x22 +#define REG_RSSICONFIG 0x23 +#define REG_RSSIVALUE 0x24 +#define REG_DIOMAPPING1 0x25 +#define REG_DIOMAPPING2 0x26 +#define REG_IRQFLAGS1 0x27 +#define REG_IRQFLAGS2 0x28 +#define REG_RSSITHRESH 0x29 +#define REG_RXTIMEOUT1 0x2A +#define REG_RXTIMEOUT2 0x2B +#define REG_PREAMBLE_MSB 0x2C +#define REG_PREAMBLE_LSB 0x2D +#define REG_SYNC_CONFIG 0x2E +#define REG_SYNCVALUE1 0x2F +#define REG_SYNCVALUE2 0x30 +#define REG_SYNCVALUE3 0x31 +#define REG_SYNCVALUE4 0x32 +#define REG_SYNCVALUE5 0x33 +#define REG_SYNCVALUE6 0x34 +#define REG_SYNCVALUE7 0x35 +#define REG_SYNCVALUE8 0x36 +#define REG_PACKETCONFIG1 0x37 +#define REG_PAYLOAD_LENGTH 0x38 +#define REG_NODEADRS 0x39 +#define REG_BROADCASTADRS 0x3A +#define REG_AUTOMODES 0x3B +#define REG_FIFO_THRESH 0x3C +#define REG_PACKETCONFIG2 0x3D +#define REG_AESKEY1 0x3E +#define REG_AESKEY2 0x3F +#define REG_AESKEY3 0x40 +#define REG_AESKEY4 0x41 +#define REG_AESKEY5 0x42 +#define REG_AESKEY6 0x43 +#define REG_AESKEY7 0x44 +#define REG_AESKEY8 0x45 +#define REG_AESKEY9 0x46 +#define REG_AESKEY10 0x47 +#define REG_AESKEY11 0x48 +#define REG_AESKEY12 0x49 +#define REG_AESKEY13 0x4A +#define REG_AESKEY14 0x4B +#define REG_AESKEY15 0x4C +#define REG_AESKEY16 0x4D +#define REG_TEMP1 0x4E +#define REG_TEMP2 0x4F +#define REG_TESTPA1 0x5A /* only present on RFM69HW */ +#define REG_TESTPA2 0x5C /* only present on RFM69HW */ +#define REG_TESTDAGC 0x6F + +/******************************************************/ +/* RF69/SX1231 bit definition */ +/******************************************************/ +/* write bit */ +#define WRITE_BIT 0x80 + +/* RegOpMode */ +#define MASK_OPMODE_SEQUENCER_OFF 0x80 +#define MASK_OPMODE_LISTEN_ON 0x40 +#define MASK_OPMODE_LISTEN_ABORT 0x20 +#define MASK_OPMODE_MODE 0x1C + +#define OPMODE_MODE_SLEEP 0x00 +#define OPMODE_MODE_STANDBY 0x04 /* default */ +#define OPMODE_MODE_SYNTHESIZER 0x08 +#define OPMODE_MODE_TRANSMIT 0x0C +#define OPMODE_MODE_RECEIVE 0x10 + +/* RegDataModul */ +#define MASK_DATAMODUL_MODE 0x06 +#define MASK_DATAMODUL_MODULATION_TYPE 0x18 +#define MASK_DATAMODUL_MODULATION_SHAPE 0x03 + +#define DATAMODUL_MODE_PACKET 0x00 /* default */ +#define DATAMODUL_MODE_CONTINUOUS 0x40 +#define DATAMODUL_MODE_CONTINUOUS_NOSYNC 0x60 + +#define DATAMODUL_MODULATION_TYPE_FSK 0x00 /* default */ +#define DATAMODUL_MODULATION_TYPE_OOK 0x08 + +#define DATAMODUL_MODULATION_SHAPE_NONE 0x00 /* default */ +#define DATAMODUL_MODULATION_SHAPE_1_0 0x01 +#define DATAMODUL_MODULATION_SHAPE_0_5 0x02 +#define DATAMODUL_MODULATION_SHAPE_0_3 0x03 +#define DATAMODUL_MODULATION_SHAPE_BR 0x01 +#define DATAMODUL_MODULATION_SHAPE_2BR 0x02 + +/* RegFDevMsb (0x05)*/ +#define FDEVMASB_MASK 0x3f + +/* + * // RegOsc1 + * #define OSC1_RCCAL_START 0x80 + * #define OSC1_RCCAL_DONE 0x40 + * + * // RegLowBat + * #define LOWBAT_MONITOR 0x10 + * #define LOWBAT_ON 0x08 + * #define LOWBAT_OFF 0x00 // Default + * + * #define LOWBAT_TRIM_1695 0x00 + * #define LOWBAT_TRIM_1764 0x01 + * #define LOWBAT_TRIM_1835 0x02 // Default + * #define LOWBAT_TRIM_1905 0x03 + * #define LOWBAT_TRIM_1976 0x04 + * #define LOWBAT_TRIM_2045 0x05 + * #define LOWBAT_TRIM_2116 0x06 + * #define LOWBAT_TRIM_2185 0x07 + * + * + * // RegListen1 + * #define LISTEN1_RESOL_64 0x50 + * #define LISTEN1_RESOL_4100 0xA0 // Default + * #define LISTEN1_RESOL_262000 0xF0 + * + * #define LISTEN1_CRITERIA_RSSI 0x00 // Default + * #define LISTEN1_CRITERIA_RSSIANDSYNC 0x08 + * + * #define LISTEN1_END_00 0x00 + * #define LISTEN1_END_01 0x02 // Default + * #define LISTEN1_END_10 0x04 + * + * + * // RegListen2 + * #define LISTEN2_COEFIDLE_VALUE 0xF5 // Default + * + * // RegListen3 + * #define LISTEN3_COEFRX_VALUE 0x20 // Default + */ + +// RegPaLevel +#define MASK_PALEVEL_PA0 0x80 +#define MASK_PALEVEL_PA1 0x40 +#define MASK_PALEVEL_PA2 0x20 +#define MASK_PALEVEL_OUTPUT_POWER 0x1F + + + +// RegPaRamp +#define PARAMP_3400 0x00 +#define PARAMP_2000 0x01 +#define PARAMP_1000 0x02 +#define PARAMP_500 0x03 +#define PARAMP_250 0x04 +#define PARAMP_125 0x05 +#define PARAMP_100 0x06 +#define PARAMP_62 0x07 +#define PARAMP_50 0x08 +#define PARAMP_40 0x09 /* default */ +#define PARAMP_31 0x0A +#define PARAMP_25 0x0B +#define PARAMP_20 0x0C +#define PARAMP_15 0x0D +#define PARAMP_12 0x0E +#define PARAMP_10 0x0F + +#define MASK_PARAMP 0x0F + +/* + * // RegOcp + * #define OCP_OFF 0x0F + * #define OCP_ON 0x1A // Default + * + * #define OCP_TRIM_45 0x00 + * #define OCP_TRIM_50 0x01 + * #define OCP_TRIM_55 0x02 + * #define OCP_TRIM_60 0x03 + * #define OCP_TRIM_65 0x04 + * #define OCP_TRIM_70 0x05 + * #define OCP_TRIM_75 0x06 + * #define OCP_TRIM_80 0x07 + * #define OCP_TRIM_85 0x08 + * #define OCP_TRIM_90 0x09 + * #define OCP_TRIM_95 0x0A + * #define OCP_TRIM_100 0x0B // Default + * #define OCP_TRIM_105 0x0C + * #define OCP_TRIM_110 0x0D + * #define OCP_TRIM_115 0x0E + * #define OCP_TRIM_120 0x0F + */ + +/* RegLna (0x18) */ +#define MASK_LNA_ZIN 0x80 +#define MASK_LNA_CURRENT_GAIN 0x38 +#define MASK_LNA_GAIN 0x07 + +#define LNA_GAIN_AUTO 0x00 /* default */ +#define LNA_GAIN_MAX 0x01 +#define LNA_GAIN_MAX_MINUS_6 0x02 +#define LNA_GAIN_MAX_MINUS_12 0x03 +#define LNA_GAIN_MAX_MINUS_24 0x04 +#define LNA_GAIN_MAX_MINUS_36 0x05 +#define LNA_GAIN_MAX_MINUS_48 0x06 + + +/* RegRxBw (0x19) and RegAfcBw (0x1A) */ +#define MASK_BW_DCC_FREQ 0xE0 +#define MASK_BW_MANTISSE 0x18 +#define MASK_BW_EXPONENT 0x07 + +#define BW_DCC_16_PERCENT 0x00 +#define BW_DCC_8_PERCENT 0x20 +#define BW_DCC_4_PERCENT 0x40 /* default */ +#define BW_DCC_2_PERCENT 0x60 +#define BW_DCC_1_PERCENT 0x80 +#define BW_DCC_0_5_PERCENT 0xA0 +#define BW_DCC_0_25_PERCENT 0xC0 +#define BW_DCC_0_125_PERCENT 0xE0 + +#define BW_MANT_16 0x00 +#define BW_MANT_20 0x08 +#define BW_MANT_24 0x10 /* default */ + + +/* RegOokPeak (0x1B) */ +#define MASK_OOKPEAK_THRESTYPE 0xc0 +#define MASK_OOKPEAK_THRESSTEP 0x38 +#define MASK_OOKPEAK_THRESDEC 0x07 + +#define OOKPEAK_THRESHTYPE_FIXED 0x00 +#define OOKPEAK_THRESHTYPE_PEAK 0x40 /* default */ +#define OOKPEAK_THRESHTYPE_AVERAGE 0x80 + +#define OOKPEAK_THRESHSTEP_0_5_DB 0x00 /* default */ +#define OOKPEAK_THRESHSTEP_1_0_DB 0x08 +#define OOKPEAK_THRESHSTEP_1_5_DB 0x10 +#define OOKPEAK_THRESHSTEP_2_0_DB 0x18 +#define OOKPEAK_THRESHSTEP_3_0_DB 0x20 +#define OOKPEAK_THRESHSTEP_4_0_DB 0x28 +#define OOKPEAK_THRESHSTEP_5_0_DB 0x30 +#define OOKPEAK_THRESHSTEP_6_0_DB 0x38 + +#define OOKPEAK_THRESHDEC_ONCE 0x00 /* default */ +#define OOKPEAK_THRESHDEC_EVERY_2ND 0x01 +#define OOKPEAK_THRESHDEC_EVERY_4TH 0x02 +#define OOKPEAK_THRESHDEC_EVERY_8TH 0x03 +#define OOKPEAK_THRESHDEC_TWICE 0x04 +#define OOKPEAK_THRESHDEC_4_TIMES 0x05 +#define OOKPEAK_THRESHDEC_8_TIMES 0x06 +#define OOKPEAK_THRESHDEC_16_TIMES 0x07 + +/* + * // RegOokAvg + * #define OOKAVG_AVERAGETHRESHFILT_00 0x00 + * #define OOKAVG_AVERAGETHRESHFILT_01 0x40 + * #define OOKAVG_AVERAGETHRESHFILT_10 0x80 // Default + * #define OOKAVG_AVERAGETHRESHFILT_11 0xC0 + * + * + * // RegAfcFei + * #define AFCFEI_FEI_DONE 0x40 + * #define AFCFEI_FEI_START 0x20 + * #define AFCFEI_AFC_DONE 0x10 + * #define AFCFEI_AFCAUTOCLEAR_ON 0x08 + * #define AFCFEI_AFCAUTOCLEAR_OFF 0x00 // Default + * + * #define AFCFEI_AFCAUTO_ON 0x04 + * #define AFCFEI_AFCAUTO_OFF 0x00 // Default + * + * #define AFCFEI_AFC_CLEAR 0x02 + * #define AFCFEI_AFC_START 0x01 + * + * // RegRssiConfig + * #define RSSI_FASTRX_ON 0x08 + * #define RSSI_FASTRX_OFF 0x00 // Default + * #define RSSI_DONE 0x02 + * #define RSSI_START 0x01 + */ + +/* RegDioMapping1 */ +#define MASK_DIO0 0xC0 +#define MASK_DIO1 0x30 +#define MASK_DIO2 0x0C +#define MASK_DIO3 0x03 +#define SHIFT_DIO0 6 +#define SHIFT_DIO1 4 +#define SHIFT_DIO2 2 +#define SHIFT_DIO3 0 + +/* RegDioMapping2 */ +#define MASK_DIO4 0xC0 +#define MASK_DIO5 0x30 +#define SHIFT_DIO4 6 +#define SHIFT_DIO5 4 + +/* DIO numbers */ +#define DIO0 0 +#define DIO1 1 +#define DIO2 2 +#define DIO3 3 +#define DIO4 4 +#define DIO5 5 + +/* DIO Mapping values (packet mode) */ +#define DIO_ModeReady_DIO4 0x00 +#define DIO_ModeReady_DIO5 0x03 +#define DIO_ClkOut 0x00 +#define DIO_Data 0x01 +#define DIO_TimeOut_DIO1 0x03 +#define DIO_TimeOut_DIO4 0x00 +#define DIO_Rssi_DIO0 0x03 +#define DIO_Rssi_DIO3_4 0x01 +#define DIO_RxReady 0x02 +#define DIO_PLLLock 0x03 +#define DIO_TxReady 0x01 +#define DIO_FifoFull_DIO1 0x01 +#define DIO_FifoFull_DIO3 0x00 +#define DIO_SyncAddress 0x02 +#define DIO_FifoNotEmpty_DIO1 0x02 +#define DIO_FifoNotEmpty_FIO2 0x00 +#define DIO_Automode 0x04 +#define DIO_FifoLevel 0x00 +#define DIO_CrcOk 0x00 +#define DIO_PayloadReady 0x01 +#define DIO_PacketSent 0x00 +#define DIO_Dclk 0x00 + +/* RegDioMapping2 CLK_OUT part */ +#define MASK_DIOMAPPING2_CLK_OUT 0x07 + +#define DIOMAPPING2_CLK_OUT_NO_DIV 0x00 +#define DIOMAPPING2_CLK_OUT_DIV_2 0x01 +#define DIOMAPPING2_CLK_OUT_DIV_4 0x02 +#define DIOMAPPING2_CLK_OUT_DIV_8 0x03 +#define DIOMAPPING2_CLK_OUT_DIV_16 0x04 +#define DIOMAPPING2_CLK_OUT_DIV_32 0x05 +#define DIOMAPPING2_CLK_OUT_RC 0x06 +#define DIOMAPPING2_CLK_OUT_OFF 0x07 /* default */ + +/* RegIrqFlags1 */ +#define MASK_IRQFLAGS1_MODE_READY 0x80 +#define MASK_IRQFLAGS1_RX_READY 0x40 +#define MASK_IRQFLAGS1_TX_READY 0x20 +#define MASK_IRQFLAGS1_PLL_LOCK 0x10 +#define MASK_IRQFLAGS1_RSSI 0x08 +#define MASK_IRQFLAGS1_TIMEOUT 0x04 +#define MASK_IRQFLAGS1_AUTOMODE 0x02 +#define MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH 0x01 + +/* RegIrqFlags2 */ +#define MASK_IRQFLAGS2_FIFO_FULL 0x80 +#define MASK_IRQFLAGS2_FIFO_NOT_EMPTY 0x40 +#define MASK_IRQFLAGS2_FIFO_LEVEL 0x20 +#define MASK_IRQFLAGS2_FIFO_OVERRUN 0x10 +#define MASK_IRQFLAGS2_PACKET_SENT 0x08 +#define MASK_IRQFLAGS2_PAYLOAD_READY 0x04 +#define MASK_IRQFLAGS2_CRC_OK 0x02 +#define MASK_IRQFLAGS2_LOW_BAT 0x01 + +/* RegSyncConfig */ +#define MASK_SYNC_CONFIG_SYNC_ON 0x80 /* default */ +#define MASK_SYNC_CONFIG_FIFO_FILL_CONDITION 0x40 +#define MASK_SYNC_CONFIG_SYNC_SIZE 0x38 +#define MASK_SYNC_CONFIG_SYNC_TOLERANCE 0x07 + +/* RegPacketConfig1 */ +#define MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE 0x80 +#define MASK_PACKETCONFIG1_DCFREE 0x60 +#define MASK_PACKETCONFIG1_CRC_ON 0x10 /* default */ +#define MASK_PACKETCONFIG1_CRCAUTOCLEAR_OFF 0x08 +#define MASK_PACKETCONFIG1_ADDRESSFILTERING 0x06 + +#define PACKETCONFIG1_DCFREE_OFF 0x00 /* default */ +#define PACKETCONFIG1_DCFREE_MANCHESTER 0x20 +#define PACKETCONFIG1_DCFREE_WHITENING 0x40 +#define PACKETCONFIG1_ADDRESSFILTERING_OFF 0x00 /* default */ +#define PACKETCONFIG1_ADDRESSFILTERING_NODE 0x02 +#define PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST 0x04 + +/* + * // RegAutoModes + * #define AUTOMODES_ENTER_OFF 0x00 // Default + * #define AUTOMODES_ENTER_FIFONOTEMPTY 0x20 + * #define AUTOMODES_ENTER_FIFOLEVEL 0x40 + * #define AUTOMODES_ENTER_CRCOK 0x60 + * #define AUTOMODES_ENTER_PAYLOADREADY 0x80 + * #define AUTOMODES_ENTER_SYNCADRSMATCH 0xA0 + * #define AUTOMODES_ENTER_PACKETSENT 0xC0 + * #define AUTOMODES_ENTER_FIFOEMPTY 0xE0 + * + * #define AUTOMODES_EXIT_OFF 0x00 // Default + * #define AUTOMODES_EXIT_FIFOEMPTY 0x04 + * #define AUTOMODES_EXIT_FIFOLEVEL 0x08 + * #define AUTOMODES_EXIT_CRCOK 0x0C + * #define AUTOMODES_EXIT_PAYLOADREADY 0x10 + * #define AUTOMODES_EXIT_SYNCADRSMATCH 0x14 + * #define AUTOMODES_EXIT_PACKETSENT 0x18 + * #define AUTOMODES_EXIT_RXTIMEOUT 0x1C + * + * #define AUTOMODES_INTERMEDIATE_SLEEP 0x00 // Default + * #define AUTOMODES_INTERMEDIATE_STANDBY 0x01 + * #define AUTOMODES_INTERMEDIATE_RECEIVER 0x02 + * #define AUTOMODES_INTERMEDIATE_TRANSMITTER 0x03 + * + */ +/* RegFifoThresh (0x3c) */ +#define MASK_FIFO_THRESH_TXSTART 0x80 +#define MASK_FIFO_THRESH_VALUE 0x7F + +/* + * + * // RegPacketConfig2 + * #define PACKET2_RXRESTARTDELAY_1BIT 0x00 // Default + * #define PACKET2_RXRESTARTDELAY_2BITS 0x10 + * #define PACKET2_RXRESTARTDELAY_4BITS 0x20 + * #define PACKET2_RXRESTARTDELAY_8BITS 0x30 + * #define PACKET2_RXRESTARTDELAY_16BITS 0x40 + * #define PACKET2_RXRESTARTDELAY_32BITS 0x50 + * #define PACKET2_RXRESTARTDELAY_64BITS 0x60 + * #define PACKET2_RXRESTARTDELAY_128BITS 0x70 + * #define PACKET2_RXRESTARTDELAY_256BITS 0x80 + * #define PACKET2_RXRESTARTDELAY_512BITS 0x90 + * #define PACKET2_RXRESTARTDELAY_1024BITS 0xA0 + * #define PACKET2_RXRESTARTDELAY_2048BITS 0xB0 + * #define PACKET2_RXRESTARTDELAY_NONE 0xC0 + * #define PACKET2_RXRESTART 0x04 + * + * #define PACKET2_AUTORXRESTART_ON 0x02 // Default + * #define PACKET2_AUTORXRESTART_OFF 0x00 + * + * #define PACKET2_AES_ON 0x01 + * #define PACKET2_AES_OFF 0x00 // Default + * + * + * // RegTemp1 + * #define TEMP1_MEAS_START 0x08 + * #define TEMP1_MEAS_RUNNING 0x04 + * #define TEMP1_ADCLOWPOWER_ON 0x01 // Default + * #define TEMP1_ADCLOWPOWER_OFF 0x00 + */ + +// RegTestDagc (0x6F) +#define DAGC_NORMAL 0x00 /* Reset value */ +#define DAGC_IMPROVED_LOWBETA1 0x20 +#define DAGC_IMPROVED_LOWBETA0 0x30 /* Recommended val */ diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index 88a3a2b9c144..aae8d716a513 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -3961,7 +3961,8 @@ static void init_mlme_ext_priv_value(struct adapter *padapter) static int has_channel(struct rt_channel_info *channel_set, u8 chanset_size, - u8 chan) { + u8 chan) +{ int i; for (i = 0; i < chanset_size; i++) { diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h index 0fa78ed2c1ab..4c925e610997 100644 --- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h +++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h @@ -75,7 +75,8 @@ struct oid_par_priv { }; #if defined(_RTW_MP_IOCTL_C_) -static int oid_null_function(struct oid_par_priv *poid_par_priv) { +static int oid_null_function(struct oid_par_priv *poid_par_priv) +{ return NDIS_STATUS_SUCCESS; } #endif diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index a4aedb489e92..cbf8eb4a049d 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -2385,7 +2385,7 @@ static inline void ieee80211_process_probe_response( struct ieee80211_probe_response *beacon, struct ieee80211_rx_stats *stats) { - struct ieee80211_network network; + struct ieee80211_network *network; struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; #ifdef CONFIG_IEEE80211_DEBUG @@ -2397,7 +2397,10 @@ static inline void ieee80211_process_probe_response( u16 capability; //u8 wmm_info; - memset(&network, 0, sizeof(struct ieee80211_network)); + network = kzalloc(sizeof(*network), GFP_ATOMIC); + if (!network) + goto out; + capability = le16_to_cpu(beacon->capability); IEEE80211_DEBUG_SCAN( "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", @@ -2420,14 +2423,14 @@ static inline void ieee80211_process_probe_response( (capability & (1 << 0x1)) ? '1' : '0', (capability & (1 << 0x0)) ? '1' : '0'); - if (ieee80211_network_init(ieee, beacon, &network, stats)) { + if (ieee80211_network_init(ieee, beacon, network, stats)) { IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, fc == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); - return; + goto out; } // For Asus EeePc request, @@ -2437,8 +2440,8 @@ static inline void ieee80211_process_probe_response( // then wireless adapter should do active scan from ch1~11 and // passive scan from ch12~14 - if (!IsLegalChannel(ieee, network.channel)) - return; + if (!IsLegalChannel(ieee, network->channel)) + goto out; if (ieee->bGlobalDomain) { if (fc == IEEE80211_STYPE_PROBE_RESP) @@ -2446,19 +2449,19 @@ static inline void ieee80211_process_probe_response( // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { - if (!IsLegalChannel(ieee, network.channel)) { - printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel); - return; + if (!IsLegalChannel(ieee, network->channel)) { + printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network->channel); + goto out; } } // Case 2: No any country code. else { // Filter over channel ch12~14 - if (network.channel > 11) + if (network->channel > 11) { - printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel); - return; + printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network->channel); + goto out; } } } @@ -2467,19 +2470,19 @@ static inline void ieee80211_process_probe_response( // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { - if (!IsLegalChannel(ieee, network.channel)) { - printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel); - return; + if (!IsLegalChannel(ieee, network->channel)) { + printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network->channel); + goto out; } } // Case 2: No any country code. else { // Filter over channel ch12~14 - if (network.channel > 14) + if (network->channel > 14) { - printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel); - return; + printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network->channel); + goto out; } } } @@ -2497,8 +2500,8 @@ static inline void ieee80211_process_probe_response( spin_lock_irqsave(&ieee->lock, flags); - if (is_same_network(&ieee->current_network, &network, ieee)) { - update_network(&ieee->current_network, &network); + if (is_same_network(&ieee->current_network, network, ieee)) { + update_network(&ieee->current_network, network); if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G) && ieee->current_network.berp_info_valid){ if(ieee->current_network.erp_value& ERP_UseProtection) @@ -2512,11 +2515,11 @@ static inline void ieee80211_process_probe_response( ieee->LinkDetectInfo.NumRecvBcnInPeriod++; } else //hidden AP - network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags); + network->flags = (~NETWORK_EMPTY_ESSID & network->flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags); } list_for_each_entry(target, &ieee->network_list, list) { - if (is_same_network(target, &network, ieee)) + if (is_same_network(target, network, ieee)) break; if ((oldest == NULL) || (target->last_scanned < oldest->last_scanned)) @@ -2545,16 +2548,16 @@ static inline void ieee80211_process_probe_response( #ifdef CONFIG_IEEE80211_DEBUG IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n", - escape_essid(network.ssid, - network.ssid_len), - network.bssid, + escape_essid(network->ssid, + network->ssid_len), + network->bssid, fc == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); #endif - memcpy(target, &network, sizeof(*target)); + memcpy(target, network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) - ieee80211_softmac_new_net(ieee,&network); + ieee80211_softmac_new_net(ieee,network); } else { IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n", escape_essid(target->ssid, @@ -2570,27 +2573,30 @@ static inline void ieee80211_process_probe_response( renew = !time_after(target->last_scanned + ieee->scan_age, jiffies); //YJ,add,080819,for hidden ap if(is_beacon(beacon->header.frame_ctl) == 0) - network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags); - //if(strncmp(network.ssid, "linksys-c",9) == 0) - // printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags); - if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \ - && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\ - ||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK)))) + network->flags = (~NETWORK_EMPTY_ESSID & network->flags)|(NETWORK_EMPTY_ESSID & target->flags); + //if(strncmp(network->ssid, "linksys-c",9) == 0) + // printk("====>2 network->ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network->ssid, network->flags, target->ssid, target->flags); + if(((network->flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \ + && (((network->ssid_len > 0) && (strncmp(target->ssid, network->ssid, network->ssid_len)))\ + ||((ieee->current_network.ssid_len == network->ssid_len)&&(strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK)))) renew = 1; //YJ,add,080819,for hidden ap,end - update_network(target, &network); + update_network(target, network); if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)) - ieee80211_softmac_new_net(ieee,&network); + ieee80211_softmac_new_net(ieee,network); } spin_unlock_irqrestore(&ieee->lock, flags); - if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\ + if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, network, ieee)&&\ (ieee->state == IEEE80211_LINKED)) { if (ieee->handle_beacon != NULL) { ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network); } } + +out: + kfree(network); } void ieee80211_rx_mgt(struct ieee80211_device *ieee, diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c index 60720997784b..9248dbcf3370 100644 --- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c +++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c @@ -176,7 +176,7 @@ void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString) IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl); - IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSenondary channel ="); + IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSecondary channel ="); switch (pHTInfoEle->ExtChlOffset) { case 0: diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 779ecdbc4e17..46b3f19e0878 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -270,8 +270,7 @@ int write_nic_byte_E(struct net_device *dev, int indx, u8 data) kfree(usbdata); if (status < 0) { - netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n", - status); + netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status); return status; } return 0; @@ -321,7 +320,7 @@ int write_nic_byte(struct net_device *dev, int indx, u8 data) kfree(usbdata); if (status < 0) { - netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status); + netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status); return status; } @@ -348,7 +347,7 @@ int write_nic_word(struct net_device *dev, int indx, u16 data) kfree(usbdata); if (status < 0) { - netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status); + netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status); return status; } @@ -376,8 +375,7 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data) if (status < 0) { - netdev_err(dev, "write_nic_dword TimeOut! status: %d\n", - status); + netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status); return status; } @@ -3095,7 +3093,8 @@ static RESET_TYPE TxCheckStuck(struct net_device *dev) if (bCheckFwTxCnt) { if (HalTxCheckStuck819xUsb(dev)) { RT_TRACE(COMP_RESET, - "TxCheckStuck(): Fw indicates no Tx condition!\n"); + "%s: Fw indicates no Tx condition!\n", + __func__); return RESET_TYPE_SILENT; } } @@ -3237,7 +3236,7 @@ static void CamRestoreAllEntry(struct net_device *dev) static u8 CAM_CONST_BROAD[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - RT_TRACE(COMP_SEC, "CamRestoreAllEntry:\n"); + RT_TRACE(COMP_SEC, "%s:\n", __func__); if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) || @@ -3835,8 +3834,8 @@ static u8 HwRateToMRate90(bool bIsHT, u8 rate) default: ret_rate = 0xff; RT_TRACE(COMP_RECV, - "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", - rate, bIsHT); + "%s: Non supported Rate [%x], bIsHT = %d!!!\n", + __func__, rate, bIsHT); break; } @@ -3897,8 +3896,8 @@ static u8 HwRateToMRate90(bool bIsHT, u8 rate) default: ret_rate = 0xff; RT_TRACE(COMP_RECV, - "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", - rate, bIsHT); + "%s: Non supported Rate [%x], bIsHT = %d!!!\n", + __func__, rate, bIsHT); break; } } diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h index 174ccf618d3e..00a123d44207 100644 --- a/drivers/staging/rtl8192u/r8192U_hw.h +++ b/drivers/staging/rtl8192u/r8192U_hw.h @@ -20,25 +20,24 @@ #ifndef R8192_HW #define R8192_HW -typedef enum _VERSION_819xU{ +typedef enum _VERSION_819xU { VERSION_819xU_A, // A-cut VERSION_819xU_B, // B-cut VERSION_819xU_C,// C-cut } VERSION_819xU, *PVERSION_819xU; //added for different RF type -typedef enum _RT_RF_TYPE_DEF -{ +typedef enum _RT_RF_TYPE_DEF { RF_1T2R = 0, RF_2T4R, RF_819X_MAX_TYPE -}RT_RF_TYPE_DEF; +} RT_RF_TYPE_DEF; -typedef enum _BaseBand_Config_Type{ +typedef enum _BaseBand_Config_Type { BaseBand_Config_PHY_REG = 0, //Radio Path A BaseBand_Config_AGC_TAB = 1, //Radio Path B -}BaseBand_Config_Type, *PBaseBand_Config_Type; +} BaseBand_Config_Type, *PBaseBand_Config_Type; #define RTL8187_REQT_READ 0xc0 #define RTL8187_REQT_WRITE 0x40 #define RTL8187_REQ_GET_REGS 0x05 diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c index 87ab3ba760fc..ae9a4f1ac8fd 100644 --- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c +++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c @@ -294,7 +294,7 @@ static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg) * windows OS. So we have to read the content byte by byte or transfer * endian type before copy the message copy. */ - rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000) >> 31; + rx_query_cfg.cfg_action = (pmsg[4] & 0x80) >> 7; rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5; rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3; rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0; diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c index 20372659d15d..a077069d6227 100644 --- a/drivers/staging/rtl8712/mlme_linux.c +++ b/drivers/staging/rtl8712/mlme_linux.c @@ -111,8 +111,8 @@ void r8712_os_indicate_disconnect(struct _adapter *adapter) */ memcpy(&backupPMKIDList[0], - &adapter->securitypriv.PMKIDList[0], - sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE); + &adapter->securitypriv.PMKIDList[0], + sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE); backupPMKIDIndex = adapter->securitypriv.PMKIDIndex; backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure; diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index 5346c657485d..0104aced113e 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -385,7 +385,7 @@ _next: if (blnPending) wr_sz += 8; /* Append 8 bytes */ r8712_write_mem(padapter, RTL8712_DMA_H2CCMD, wr_sz, - (u8 *)pdesc); + (u8 *)pdesc); pcmdpriv->cmd_seq++; if (pcmd->cmdcode == GEN_CMD_CODE(_CreateBss)) { pcmd->res = H2C_SUCCESS; diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c index 205298e23656..d90213eb5e20 100644 --- a/drivers/staging/rtl8712/rtl8712_efuse.c +++ b/drivers/staging/rtl8712/rtl8712_efuse.c @@ -347,7 +347,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr) ret = false; if (value == 0xFF) /* write again */ efuse_one_byte_write(padapter, addr, - pkt.data[i * 2]); + pkt.data[i * 2]); } if (!efuse_one_byte_read(padapter, addr + 1, &value)) { ret = false; diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c index 7fe626583c8a..42d014007764 100644 --- a/drivers/staging/rtl8712/rtl8712_xmit.c +++ b/drivers/staging/rtl8712/rtl8712_xmit.c @@ -640,7 +640,7 @@ int r8712_xmitframe_complete(struct _adapter *padapter, /* 1st frame dequeued */ pxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry); /* need to remember the 1st frame */ - if (pxmitframe != NULL) { + if (pxmitframe) { #ifdef CONFIG_R8712_TX_AGGR /* 1. dequeue 2nd frame @@ -653,13 +653,13 @@ int r8712_xmitframe_complete(struct _adapter *padapter, r8712_free_xmitbuf(pxmitpriv, pxmitbuf); return false; } - if (p2ndxmitframe != NULL) + if (p2ndxmitframe) if (p2ndxmitframe->frame_tag != DATA_FRAMETAG) { r8712_free_xmitbuf(pxmitpriv, pxmitbuf); return false; } r8712_xmitframe_aggr_1st(pxmitbuf, pxmitframe); - if (p2ndxmitframe != NULL) { + if (p2ndxmitframe) { u16 total_length; total_length = r8712_xmitframe_aggr_next( @@ -667,7 +667,7 @@ int r8712_xmitframe_complete(struct _adapter *padapter, do { p2ndxmitframe = dequeue_xframe_ex( pxmitpriv, phwxmits, hwentry); - if (p2ndxmitframe != NULL) + if (p2ndxmitframe) total_length = r8712_xmitframe_aggr_next( pxmitbuf, diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c index 3c5cb78b52ea..01f78d1671de 100644 --- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c +++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c @@ -55,7 +55,7 @@ void rtw_btcoex_ConnectNotify(struct adapter *padapter, u8 action) void rtw_btcoex_MediaStatusNotify(struct adapter *padapter, u8 mediaStatus) { - if ((RT_MEDIA_CONNECT == mediaStatus) + if ((mediaStatus == RT_MEDIA_CONNECT) && (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) { rtw_hal_set_hwreg(padapter, HW_VAR_DL_RSVD_PAGE, NULL); } diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c index 8e29802fc67f..44b92ef5db92 100644 --- a/drivers/staging/rtl8723bs/core/rtw_efuse.c +++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c @@ -163,7 +163,7 @@ Efuse_CalculateWordCnts(u8 word_en) /* Description: */ /* 1. Execute E-Fuse read byte operation according as map offset and */ /* save to E-Fuse table. */ -/* 2. Refered from SD1 Richard. */ +/* 2. Referred from SD1 Richard. */ /* */ /* Assumption: */ /* 1. Boot from E-Fuse and successfully auto-load. */ diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c index d5ab12305e59..9f44dd020467 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c @@ -1384,7 +1384,7 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net /* define REJOIN */ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf) { - static u8 retry = 0; + static u8 retry; u8 timer_cancelled; struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL; struct sta_priv *pstapriv = &adapter->stapriv; @@ -2498,8 +2498,7 @@ sint rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in uint ndisauthmode = psecuritypriv->ndisauthtype; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, - ("+rtw_restruct_sec_ie: ndisauthmode =%d ndissecuritytype =%d\n", - ndisauthmode, ndissecuritytype)); + ("+rtw_restruct_sec_ie: ndisauthmode =%d\n", ndisauthmode)); /* copy fixed ie only */ memcpy(out_ie, in_ie, 12); diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 17d881d66910..99e3b682c834 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -2392,7 +2392,7 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe) { - static u8 seq_no = 0; + static u8 seq_no; s32 ret = _FAIL; u32 timeout_ms = 500;/* 500ms */ struct xmit_priv *pxmitpriv = &padapter->xmitpriv; diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c index f708dbf5bfd4..f060e54e11f5 100644 --- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c +++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c @@ -210,8 +210,8 @@ void pwr_state_check_handler(RTW_TIMER_HDL_ARGS) void traffic_check_for_leave_lps(struct adapter *padapter, u8 tx, u32 tx_packets) { - static unsigned long start_time = 0; - static u32 xmit_cnt = 0; + static unsigned long start_time; + static u32 xmit_cnt; u8 bLeaveLPS = false; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 695a5c958c80..c6018f07d712 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -1005,7 +1005,7 @@ sint ap2sta_data_frame( if (*psta == NULL) { /* for AP multicast issue , modify by yiwei */ - static unsigned long send_issue_deauth_time = 0; + static unsigned long send_issue_deauth_time; /* DBG_871X("After send deauth , %u ms has elapsed.\n", jiffies_to_msecs(jiffies - send_issue_deauth_time)); */ diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c index e832f16997b7..06a7e4059fbb 100644 --- a/drivers/staging/rtl8723bs/core/rtw_security.c +++ b/drivers/staging/rtl8723bs/core/rtw_security.c @@ -162,7 +162,7 @@ static void arcfour_encrypt( dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx); } -static sint bcrc32initialized = 0; +static sint bcrc32initialized; static u32 crc32_table[256]; @@ -791,9 +791,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]); if (stainfo != NULL) { if (IS_MCAST(prxattrib->ra)) { - static unsigned long start = 0; - static u32 no_gkey_bc_cnt = 0; - static u32 no_gkey_mc_cnt = 0; + static unsigned long start; + static u32 no_gkey_bc_cnt; + static u32 no_gkey_mc_cnt; if (psecuritypriv->binstallGrpkey == false) { res = _FAIL; @@ -1882,9 +1882,9 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe) RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n")); if (IS_MCAST(prxattrib->ra)) { - static unsigned long start = 0; - static u32 no_gkey_bc_cnt = 0; - static u32 no_gkey_mc_cnt = 0; + static unsigned long start; + static u32 no_gkey_bc_cnt; + static u32 no_gkey_mc_cnt; /* DBG_871X("rx bc/mc packets, to perform sw rtw_aes_decrypt\n"); */ /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */ diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index 8f2c9a6658bf..a0d79985f6b9 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -2301,8 +2301,8 @@ static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib */ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt) { - static unsigned long start = 0; - static u32 drop_cnt = 0; + static unsigned long start; + static u32 drop_cnt; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_frame *pxmitframe = NULL; diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c index 482a29dd06f8..7cdce87f3051 100644 --- a/drivers/staging/rts5208/ms.c +++ b/drivers/staging/rts5208/ms.c @@ -3064,7 +3064,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { chip->rw_need_retry = 0; - dev_dbg(rtsx_dev(chip), "No card exist, exit mspro_rw_multi_sector\n"); + dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n", + __func__); rtsx_trace(chip); return STATUS_FAIL; } @@ -3101,7 +3102,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip, u8 cnt, tmp; u8 data[8]; - dev_dbg(rtsx_dev(chip), "mspro_read_format_progress, short_data_len = %d\n", + dev_dbg(rtsx_dev(chip), "%s, short_data_len = %d\n", __func__, short_data_len); retval = ms_switch_clock(chip); diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index b8177f50fabc..53748d61e20e 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -999,7 +999,7 @@ static int rtsx_probe(struct pci_dev *pci, /* We come here if there are any problems */ errout: - dev_err(&pci->dev, "rtsx_probe() failed\n"); + dev_err(&pci->dev, "%s failed\n", __func__); release_everything(dev); return err; @@ -1009,7 +1009,7 @@ static void rtsx_remove(struct pci_dev *pci) { struct rtsx_dev *dev = pci_get_drvdata(pci); - dev_info(&pci->dev, "rtsx_remove() called\n"); + dev_info(&pci->dev, "%s called\n", __func__); quiesce_and_remove_host(dev); release_everything(dev); diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c index 7f4107bffe31..4ad472dd9daf 100644 --- a/drivers/staging/rts5208/rtsx_chip.c +++ b/drivers/staging/rts5208/rtsx_chip.c @@ -616,8 +616,8 @@ int rtsx_reset_chip(struct rtsx_chip *chip) else retval = rtsx_pre_handle_sdio_new(chip); - dev_dbg(rtsx_dev(chip), "chip->need_reset = 0x%x (rtsx_reset_chip)\n", - (unsigned int)(chip->need_reset)); + dev_dbg(rtsx_dev(chip), "chip->need_reset = 0x%x (%s)\n", + (unsigned int)(chip->need_reset), __func__); #else /* HW_AUTO_SWITCH_SD_BUS */ retval = rtsx_pre_handle_sdio_old(chip); #endif /* HW_AUTO_SWITCH_SD_BUS */ diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c index c2eb072cbe1d..b1fd0a10d065 100644 --- a/drivers/staging/rts5208/sd.c +++ b/drivers/staging/rts5208/sd.c @@ -910,8 +910,8 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir) int retval; bool ddr_rx = false; - dev_dbg(rtsx_dev(chip), "sd_change_phase (sample_point = %d, tune_dir = %d)\n", - sample_point, tune_dir); + dev_dbg(rtsx_dev(chip), "%s (sample_point = %d, tune_dir = %d)\n", + __func__, sample_point, tune_dir); if (tune_dir == TUNE_RX) { SD_VP_CTL = SD_VPRX_CTL; @@ -1225,8 +1225,8 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group, int retval; u8 cmd[5], buf[64]; - dev_dbg(rtsx_dev(chip), "sd_check_switch_mode (mode = %d, func_group = %d, func_to_switch = %d)\n", - mode, func_group, func_to_switch); + dev_dbg(rtsx_dev(chip), "%s (mode = %d, func_group = %d, func_to_switch = %d)\n", + __func__, mode, func_group, func_to_switch); cmd[0] = 0x40 | SWITCH; cmd[1] = mode; @@ -3575,8 +3575,8 @@ static int reset_mmc_only(struct rtsx_chip *chip) return STATUS_FAIL; } - dev_dbg(rtsx_dev(chip), "In reset_mmc_only, sd_card->sd_type = 0x%x\n", - sd_card->sd_type); + dev_dbg(rtsx_dev(chip), "In %s, sd_card->sd_type = 0x%x\n", + __func__, sd_card->sd_type); return STATUS_SUCCESS; } @@ -3699,11 +3699,11 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, int retval; if (srb->sc_data_direction == DMA_FROM_DEVICE) { - dev_dbg(rtsx_dev(chip), "sd_rw: Read %d %s from 0x%x\n", + dev_dbg(rtsx_dev(chip), "%s: Read %d %s from 0x%x\n", __func__, sector_cnt, (sector_cnt > 1) ? "sectors" : "sector", start_sector); } else { - dev_dbg(rtsx_dev(chip), "sd_rw: Write %d %s to 0x%x\n", + dev_dbg(rtsx_dev(chip), "%s: Write %d %s to 0x%x\n", __func__, sector_cnt, (sector_cnt > 1) ? "sectors" : "sector", start_sector); } @@ -3921,7 +3921,8 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, rtsx_clear_sd_error(chip); if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { chip->rw_need_retry = 0; - dev_dbg(rtsx_dev(chip), "No card exist, exit sd_rw\n"); + dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n", + __func__); rtsx_trace(chip); return STATUS_FAIL; } @@ -3964,7 +3965,7 @@ RW_FAIL: if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) { chip->rw_need_retry = 0; - dev_dbg(rtsx_dev(chip), "No card exist, exit sd_rw\n"); + dev_dbg(rtsx_dev(chip), "No card exist, exit %s\n", __func__); rtsx_trace(chip); return STATUS_FAIL; } diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c index 8b8cd955dfeb..b5646b62ec9e 100644 --- a/drivers/staging/rts5208/spi.c +++ b/drivers/staging/rts5208/spi.c @@ -520,7 +520,7 @@ int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct spi_info *spi = &chip->spi; - dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n", + dev_dbg(rtsx_dev(chip), "%s: err_code = 0x%x\n", __func__, spi->err_code); rtsx_stor_set_xfer_buf(&spi->err_code, min_t(int, scsi_bufflen(srb), 1), srb); @@ -543,8 +543,10 @@ int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip) spi->clk_div = ((u16)(srb->cmnd[4]) << 8) | srb->cmnd[5]; spi->write_en = srb->cmnd[6]; - dev_dbg(rtsx_dev(chip), "spi_set_parameter: spi_clock = %d, clk_div = %d, write_en = %d\n", - spi->spi_clock, spi->clk_div, spi->write_en); + dev_dbg(rtsx_dev(chip), "%s: ", __func__); + dev_dbg(rtsx_dev(chip), "spi_clock = %d, ", spi->spi_clock); + dev_dbg(rtsx_dev(chip), "clk_div = %d, ", spi->clk_div); + dev_dbg(rtsx_dev(chip), "write_en = %d\n", spi->write_en); return STATUS_SUCCESS; } diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c index 74d36f9a4c1d..11ea0c658e28 100644 --- a/drivers/staging/rts5208/xd.c +++ b/drivers/staging/rts5208/xd.c @@ -885,7 +885,7 @@ static int xd_init_l2p_tbl(struct rtsx_chip *chip) struct xd_info *xd_card = &chip->xd_card; int size, i; - dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n", + dev_dbg(rtsx_dev(chip), "%s: zone_cnt = %d\n", __func__, xd_card->zone_cnt); if (xd_card->zone_cnt < 1) { @@ -1026,7 +1026,8 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off) #ifdef XD_DELAY_WRITE retval = xd_delay_write(chip); if (retval != STATUS_SUCCESS) { - dev_dbg(rtsx_dev(chip), "In xd_get_l2p_tbl, delay write fail!\n"); + dev_dbg(rtsx_dev(chip), "In %s, delay write fail!\n", + __func__); return BLK_NOT_FOUND; } #endif @@ -1434,7 +1435,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) u16 cur_lst_page_logoff, ent_lst_page_logoff; u8 redunt[11]; - dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no); + dev_dbg(rtsx_dev(chip), "%s: %d\n", __func__, zone_no); if (!xd_card->zone) { retval = xd_init_l2p_tbl(chip); @@ -1774,8 +1775,10 @@ static int xd_finish_write(struct rtsx_chip *chip, int retval, zone_no; u16 log_off; - dev_dbg(rtsx_dev(chip), "xd_finish_write, old_blk = 0x%x, new_blk = 0x%x, log_blk = 0x%x\n", - old_blk, new_blk, log_blk); + dev_dbg(rtsx_dev(chip), "%s ", __func__); + dev_dbg(rtsx_dev(chip), "old_blk = 0x%x, ", old_blk); + dev_dbg(rtsx_dev(chip), "new_blk = 0x%x, ", new_blk); + dev_dbg(rtsx_dev(chip), "log_blk = 0x%x\n", log_blk); if (page_off > xd_card->page_off) { rtsx_trace(chip); @@ -1960,7 +1963,7 @@ int xd_delay_write(struct rtsx_chip *chip) int retval; if (delay_write->delay_write_flag) { - dev_dbg(rtsx_dev(chip), "xd_delay_write\n"); + dev_dbg(rtsx_dev(chip), "%s\n", __func__); retval = xd_switch_clock(chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -2002,7 +2005,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, xd_card->cleanup_counter = 0; - dev_dbg(rtsx_dev(chip), "xd_rw: scsi_sg_count = %d\n", + dev_dbg(rtsx_dev(chip), "%s: scsi_sg_count = %d\n", __func__, scsi_sg_count(srb)); ptr = (u8 *)scsi_sglist(srb); diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c index 256657077a46..3bc25e149034 100644 --- a/drivers/staging/skein/skein_block.c +++ b/drivers/staging/skein/skein_block.c @@ -21,329 +21,6 @@ #include "skein_base.h" #include "skein_block.h" -#ifndef SKEIN_USE_ASM -#define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */ -#endif - -#ifndef SKEIN_LOOP -#define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */ -#endif - -#define BLK_BITS (WCNT * 64) /* some useful definitions for code here */ -#define KW_TWK_BASE (0) -#define KW_KEY_BASE (3) -#define ks (kw + KW_KEY_BASE) -#define ts (kw + KW_TWK_BASE) - -#ifdef SKEIN_DEBUG -#define debug_save_tweak(ctx) \ -{ \ - ctx->h.tweak[0] = ts[0]; \ - ctx->h.tweak[1] = ts[1]; \ -} -#else -#define debug_save_tweak(ctx) -#endif - -#if !(SKEIN_USE_ASM & 256) -#undef RCNT -#define RCNT (SKEIN_256_ROUNDS_TOTAL / 8) -#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ -#define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10) -#else -#define SKEIN_UNROLL_256 (0) -#endif - -#if SKEIN_UNROLL_256 -#if (RCNT % SKEIN_UNROLL_256) -#error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */ -#endif -#endif -#define ROUND256(p0, p1, p2, p3, ROT, r_num) \ - do { \ - X##p0 += X##p1; \ - X##p1 = rol64(X##p1, ROT##_0); \ - X##p1 ^= X##p0; \ - X##p2 += X##p3; \ - X##p3 = rol64(X##p3, ROT##_1); \ - X##p3 ^= X##p2; \ - } while (0) - -#if SKEIN_UNROLL_256 == 0 -#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \ - ROUND256(p0, p1, p2, p3, ROT, r_num) - -#define I256(R) \ - do { \ - /* inject the key schedule value */ \ - X0 += ks[((R) + 1) % 5]; \ - X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \ - X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \ - X3 += ks[((R) + 4) % 5] + (R) + 1; \ - } while (0) -#else -/* looping version */ -#define R256(p0, p1, p2, p3, ROT, r_num) ROUND256(p0, p1, p2, p3, ROT, r_num) - -#define I256(R) \ - do { \ - /* inject the key schedule value */ \ - X0 += ks[r + (R) + 0]; \ - X1 += ks[r + (R) + 1] + ts[r + (R) + 0];\ - X2 += ks[r + (R) + 2] + ts[r + (R) + 1];\ - X3 += ks[r + (R) + 3] + r + (R); \ - /* rotate key schedule */ \ - ks[r + (R) + 4] = ks[r + (R) - 1]; \ - ts[r + (R) + 2] = ts[r + (R) - 1]; \ - } while (0) -#endif -#define R256_8_ROUNDS(R) \ - do { \ - R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \ - R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \ - R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \ - R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \ - I256(2 * (R)); \ - R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \ - R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \ - R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \ - R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \ - I256(2 * (R) + 1); \ - } while (0) - -#define R256_UNROLL_R(NN) \ - ((SKEIN_UNROLL_256 == 0 && \ - SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \ - (SKEIN_UNROLL_256 > (NN))) - -#if (SKEIN_UNROLL_256 > 14) -#error "need more unrolling in skein_256_process_block" -#endif -#endif - -#if !(SKEIN_USE_ASM & 512) -#undef RCNT -#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8) - -#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ -#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10) -#else -#define SKEIN_UNROLL_512 (0) -#endif - -#if SKEIN_UNROLL_512 -#if (RCNT % SKEIN_UNROLL_512) -#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */ -#endif -#endif -#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ - do { \ - X##p0 += X##p1; \ - X##p1 = rol64(X##p1, ROT##_0); \ - X##p1 ^= X##p0; \ - X##p2 += X##p3; \ - X##p3 = rol64(X##p3, ROT##_1); \ - X##p3 ^= X##p2; \ - X##p4 += X##p5; \ - X##p5 = rol64(X##p5, ROT##_2); \ - X##p5 ^= X##p4; \ - X##p6 += X##p7; \ - X##p7 = rol64(X##p7, ROT##_3); \ - X##p7 ^= X##p6; \ - } while (0) - -#if SKEIN_UNROLL_512 == 0 -#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \ - ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) - -#define I512(R) \ - do { \ - /* inject the key schedule value */ \ - X0 += ks[((R) + 1) % 9]; \ - X1 += ks[((R) + 2) % 9]; \ - X2 += ks[((R) + 3) % 9]; \ - X3 += ks[((R) + 4) % 9]; \ - X4 += ks[((R) + 5) % 9]; \ - X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \ - X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \ - X7 += ks[((R) + 8) % 9] + (R) + 1; \ - } while (0) - -#else /* looping version */ -#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ - ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ - -#define I512(R) \ - do { \ - /* inject the key schedule value */ \ - X0 += ks[r + (R) + 0]; \ - X1 += ks[r + (R) + 1]; \ - X2 += ks[r + (R) + 2]; \ - X3 += ks[r + (R) + 3]; \ - X4 += ks[r + (R) + 4]; \ - X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \ - X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \ - X7 += ks[r + (R) + 7] + r + (R); \ - /* rotate key schedule */ \ - ks[r + (R) + 8] = ks[r + (R) - 1]; \ - ts[r + (R) + 2] = ts[r + (R) - 1]; \ - } while (0) -#endif /* end of looped code definitions */ -#define R512_8_ROUNDS(R) /* do 8 full rounds */ \ - do { \ - R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \ - R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \ - R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \ - R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \ - I512(2 * (R)); \ - R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \ - R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \ - R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \ - R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \ - I512(2 * (R) + 1); /* and key injection */ \ - } while (0) -#define R512_UNROLL_R(NN) \ - ((SKEIN_UNROLL_512 == 0 && \ - SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \ - (SKEIN_UNROLL_512 > (NN))) - -#if (SKEIN_UNROLL_512 > 14) -#error "need more unrolling in skein_512_process_block" -#endif -#endif - -#if !(SKEIN_USE_ASM & 1024) -#undef RCNT -#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8) -#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ -#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10) -#else -#define SKEIN_UNROLL_1024 (0) -#endif - -#if (SKEIN_UNROLL_1024 != 0) -#if (RCNT % SKEIN_UNROLL_1024) -#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */ -#endif -#endif -#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \ - pF, ROT, r_num) \ - do { \ - X##p0 += X##p1; \ - X##p1 = rol64(X##p1, ROT##_0); \ - X##p1 ^= X##p0; \ - X##p2 += X##p3; \ - X##p3 = rol64(X##p3, ROT##_1); \ - X##p3 ^= X##p2; \ - X##p4 += X##p5; \ - X##p5 = rol64(X##p5, ROT##_2); \ - X##p5 ^= X##p4; \ - X##p6 += X##p7; \ - X##p7 = rol64(X##p7, ROT##_3); \ - X##p7 ^= X##p6; \ - X##p8 += X##p9; \ - X##p9 = rol64(X##p9, ROT##_4); \ - X##p9 ^= X##p8; \ - X##pA += X##pB; \ - X##pB = rol64(X##pB, ROT##_5); \ - X##pB ^= X##pA; \ - X##pC += X##pD; \ - X##pD = rol64(X##pD, ROT##_6); \ - X##pD ^= X##pC; \ - X##pE += X##pF; \ - X##pF = rol64(X##pF, ROT##_7); \ - X##pF ^= X##pE; \ - } while (0) - -#if SKEIN_UNROLL_1024 == 0 -#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \ - ROT, rn) \ - ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \ - pF, ROT, rn) \ - -#define I1024(R) \ - do { \ - /* inject the key schedule value */ \ - X00 += ks[((R) + 1) % 17]; \ - X01 += ks[((R) + 2) % 17]; \ - X02 += ks[((R) + 3) % 17]; \ - X03 += ks[((R) + 4) % 17]; \ - X04 += ks[((R) + 5) % 17]; \ - X05 += ks[((R) + 6) % 17]; \ - X06 += ks[((R) + 7) % 17]; \ - X07 += ks[((R) + 8) % 17]; \ - X08 += ks[((R) + 9) % 17]; \ - X09 += ks[((R) + 10) % 17]; \ - X10 += ks[((R) + 11) % 17]; \ - X11 += ks[((R) + 12) % 17]; \ - X12 += ks[((R) + 13) % 17]; \ - X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \ - X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \ - X15 += ks[((R) + 16) % 17] + (R) + 1; \ - } while (0) -#else /* looping version */ -#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \ - ROT, rn) \ - ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \ - pF, ROT, rn) \ - -#define I1024(R) \ - do { \ - /* inject the key schedule value */ \ - X00 += ks[r + (R) + 0]; \ - X01 += ks[r + (R) + 1]; \ - X02 += ks[r + (R) + 2]; \ - X03 += ks[r + (R) + 3]; \ - X04 += ks[r + (R) + 4]; \ - X05 += ks[r + (R) + 5]; \ - X06 += ks[r + (R) + 6]; \ - X07 += ks[r + (R) + 7]; \ - X08 += ks[r + (R) + 8]; \ - X09 += ks[r + (R) + 9]; \ - X10 += ks[r + (R) + 10]; \ - X11 += ks[r + (R) + 11]; \ - X12 += ks[r + (R) + 12]; \ - X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \ - X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \ - X15 += ks[r + (R) + 15] + r + (R); \ - /* rotate key schedule */ \ - ks[r + (R) + 16] = ks[r + (R) - 1]; \ - ts[r + (R) + 2] = ts[r + (R) - 1]; \ - } while (0) - -#endif -#define R1024_8_ROUNDS(R) \ - do { \ - R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \ - 13, 14, 15, R1024_0, 8 * (R) + 1); \ - R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \ - 05, 08, 01, R1024_1, 8 * (R) + 2); \ - R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \ - 11, 10, 09, R1024_2, 8 * (R) + 3); \ - R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \ - 03, 12, 07, R1024_3, 8 * (R) + 4); \ - I1024(2 * (R)); \ - R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \ - 13, 14, 15, R1024_4, 8 * (R) + 5); \ - R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \ - 05, 08, 01, R1024_5, 8 * (R) + 6); \ - R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \ - 11, 10, 09, R1024_6, 8 * (R) + 7); \ - R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \ - 03, 12, 07, R1024_7, 8 * (R) + 8); \ - I1024(2 * (R) + 1); \ - } while (0) - -#define R1024_UNROLL_R(NN) \ - ((SKEIN_UNROLL_1024 == 0 && \ - SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \ - (SKEIN_UNROLL_1024 > (NN))) - -#if (SKEIN_UNROLL_1024 > 14) -#error "need more unrolling in Skein_1024_Process_Block" -#endif -#endif - /***************************** SKEIN_256 ******************************/ #if !(SKEIN_USE_ASM & 256) void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr, diff --git a/drivers/staging/skein/skein_block.h b/drivers/staging/skein/skein_block.h index ec1baea25c9e..b3bb3d24273b 100644 --- a/drivers/staging/skein/skein_block.h +++ b/drivers/staging/skein/skein_block.h @@ -14,6 +14,329 @@ #include "skein_base.h" /* get the Skein API definitions */ +#ifndef SKEIN_USE_ASM +#define SKEIN_USE_ASM (0) /* default is all C code (no ASM) */ +#endif + +#ifndef SKEIN_LOOP +#define SKEIN_LOOP 001 /* default: unroll 256 and 512, but not 1024 */ +#endif + +#define BLK_BITS (WCNT * 64) /* some useful definitions for code here */ +#define KW_TWK_BASE (0) +#define KW_KEY_BASE (3) +#define ks (kw + KW_KEY_BASE) +#define ts (kw + KW_TWK_BASE) + +#ifdef SKEIN_DEBUG +#define debug_save_tweak(ctx) \ +{ \ + ctx->h.tweak[0] = ts[0]; \ + ctx->h.tweak[1] = ts[1]; \ +} +#else +#define debug_save_tweak(ctx) +#endif + +#if !(SKEIN_USE_ASM & 256) +#undef RCNT +#define RCNT (SKEIN_256_ROUNDS_TOTAL / 8) +#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ +#define SKEIN_UNROLL_256 (((SKEIN_LOOP) / 100) % 10) +#else +#define SKEIN_UNROLL_256 (0) +#endif + +#if SKEIN_UNROLL_256 +#if (RCNT % SKEIN_UNROLL_256) +#error "Invalid SKEIN_UNROLL_256" /* sanity check on unroll count */ +#endif +#endif +#define ROUND256(p0, p1, p2, p3, ROT, r_num) \ + do { \ + X##p0 += X##p1; \ + X##p1 = rol64(X##p1, ROT##_0); \ + X##p1 ^= X##p0; \ + X##p2 += X##p3; \ + X##p3 = rol64(X##p3, ROT##_1); \ + X##p3 ^= X##p2; \ + } while (0) + +#if SKEIN_UNROLL_256 == 0 +#define R256(p0, p1, p2, p3, ROT, r_num) /* fully unrolled */ \ + ROUND256(p0, p1, p2, p3, ROT, r_num) + +#define I256(R) \ + do { \ + /* inject the key schedule value */ \ + X0 += ks[((R) + 1) % 5]; \ + X1 += ks[((R) + 2) % 5] + ts[((R) + 1) % 3]; \ + X2 += ks[((R) + 3) % 5] + ts[((R) + 2) % 3]; \ + X3 += ks[((R) + 4) % 5] + (R) + 1; \ + } while (0) +#else +/* looping version */ +#define R256(p0, p1, p2, p3, ROT, r_num) ROUND256(p0, p1, p2, p3, ROT, r_num) + +#define I256(R) \ + do { \ + /* inject the key schedule value */ \ + X0 += ks[r + (R) + 0]; \ + X1 += ks[r + (R) + 1] + ts[r + (R) + 0];\ + X2 += ks[r + (R) + 2] + ts[r + (R) + 1];\ + X3 += ks[r + (R) + 3] + r + (R); \ + /* rotate key schedule */ \ + ks[r + (R) + 4] = ks[r + (R) - 1]; \ + ts[r + (R) + 2] = ts[r + (R) - 1]; \ + } while (0) +#endif +#define R256_8_ROUNDS(R) \ + do { \ + R256(0, 1, 2, 3, R_256_0, 8 * (R) + 1); \ + R256(0, 3, 2, 1, R_256_1, 8 * (R) + 2); \ + R256(0, 1, 2, 3, R_256_2, 8 * (R) + 3); \ + R256(0, 3, 2, 1, R_256_3, 8 * (R) + 4); \ + I256(2 * (R)); \ + R256(0, 1, 2, 3, R_256_4, 8 * (R) + 5); \ + R256(0, 3, 2, 1, R_256_5, 8 * (R) + 6); \ + R256(0, 1, 2, 3, R_256_6, 8 * (R) + 7); \ + R256(0, 3, 2, 1, R_256_7, 8 * (R) + 8); \ + I256(2 * (R) + 1); \ + } while (0) + +#define R256_UNROLL_R(NN) \ + ((SKEIN_UNROLL_256 == 0 && \ + SKEIN_256_ROUNDS_TOTAL / 8 > (NN)) || \ + (SKEIN_UNROLL_256 > (NN))) + +#if (SKEIN_UNROLL_256 > 14) +#error "need more unrolling in skein_256_process_block" +#endif +#endif + +#if !(SKEIN_USE_ASM & 512) +#undef RCNT +#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8) + +#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ +#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10) +#else +#define SKEIN_UNROLL_512 (0) +#endif + +#if SKEIN_UNROLL_512 +#if (RCNT % SKEIN_UNROLL_512) +#error "Invalid SKEIN_UNROLL_512" /* sanity check on unroll count */ +#endif +#endif +#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ + do { \ + X##p0 += X##p1; \ + X##p1 = rol64(X##p1, ROT##_0); \ + X##p1 ^= X##p0; \ + X##p2 += X##p3; \ + X##p3 = rol64(X##p3, ROT##_1); \ + X##p3 ^= X##p2; \ + X##p4 += X##p5; \ + X##p5 = rol64(X##p5, ROT##_2); \ + X##p5 ^= X##p4; \ + X##p6 += X##p7; \ + X##p7 = rol64(X##p7, ROT##_3); \ + X##p7 ^= X##p6; \ + } while (0) + +#if SKEIN_UNROLL_512 == 0 +#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) /* unrolled */ \ + ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) + +#define I512(R) \ + do { \ + /* inject the key schedule value */ \ + X0 += ks[((R) + 1) % 9]; \ + X1 += ks[((R) + 2) % 9]; \ + X2 += ks[((R) + 3) % 9]; \ + X3 += ks[((R) + 4) % 9]; \ + X4 += ks[((R) + 5) % 9]; \ + X5 += ks[((R) + 6) % 9] + ts[((R) + 1) % 3]; \ + X6 += ks[((R) + 7) % 9] + ts[((R) + 2) % 3]; \ + X7 += ks[((R) + 8) % 9] + (R) + 1; \ + } while (0) + +#else /* looping version */ +#define R512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ + ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \ + +#define I512(R) \ + do { \ + /* inject the key schedule value */ \ + X0 += ks[r + (R) + 0]; \ + X1 += ks[r + (R) + 1]; \ + X2 += ks[r + (R) + 2]; \ + X3 += ks[r + (R) + 3]; \ + X4 += ks[r + (R) + 4]; \ + X5 += ks[r + (R) + 5] + ts[r + (R) + 0]; \ + X6 += ks[r + (R) + 6] + ts[r + (R) + 1]; \ + X7 += ks[r + (R) + 7] + r + (R); \ + /* rotate key schedule */ \ + ks[r + (R) + 8] = ks[r + (R) - 1]; \ + ts[r + (R) + 2] = ts[r + (R) - 1]; \ + } while (0) +#endif /* end of looped code definitions */ +#define R512_8_ROUNDS(R) /* do 8 full rounds */ \ + do { \ + R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_0, 8 * (R) + 1); \ + R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_1, 8 * (R) + 2); \ + R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_2, 8 * (R) + 3); \ + R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_3, 8 * (R) + 4); \ + I512(2 * (R)); \ + R512(0, 1, 2, 3, 4, 5, 6, 7, R_512_4, 8 * (R) + 5); \ + R512(2, 1, 4, 7, 6, 5, 0, 3, R_512_5, 8 * (R) + 6); \ + R512(4, 1, 6, 3, 0, 5, 2, 7, R_512_6, 8 * (R) + 7); \ + R512(6, 1, 0, 7, 2, 5, 4, 3, R_512_7, 8 * (R) + 8); \ + I512(2 * (R) + 1); /* and key injection */ \ + } while (0) +#define R512_UNROLL_R(NN) \ + ((SKEIN_UNROLL_512 == 0 && \ + SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \ + (SKEIN_UNROLL_512 > (NN))) + +#if (SKEIN_UNROLL_512 > 14) +#error "need more unrolling in skein_512_process_block" +#endif +#endif + +#if !(SKEIN_USE_ASM & 1024) +#undef RCNT +#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8) +#ifdef SKEIN_LOOP /* configure how much to unroll the loop */ +#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10) +#else +#define SKEIN_UNROLL_1024 (0) +#endif + +#if (SKEIN_UNROLL_1024 != 0) +#if (RCNT % SKEIN_UNROLL_1024) +#error "Invalid SKEIN_UNROLL_1024" /* sanity check on unroll count */ +#endif +#endif +#define ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \ + pF, ROT, r_num) \ + do { \ + X##p0 += X##p1; \ + X##p1 = rol64(X##p1, ROT##_0); \ + X##p1 ^= X##p0; \ + X##p2 += X##p3; \ + X##p3 = rol64(X##p3, ROT##_1); \ + X##p3 ^= X##p2; \ + X##p4 += X##p5; \ + X##p5 = rol64(X##p5, ROT##_2); \ + X##p5 ^= X##p4; \ + X##p6 += X##p7; \ + X##p7 = rol64(X##p7, ROT##_3); \ + X##p7 ^= X##p6; \ + X##p8 += X##p9; \ + X##p9 = rol64(X##p9, ROT##_4); \ + X##p9 ^= X##p8; \ + X##pA += X##pB; \ + X##pB = rol64(X##pB, ROT##_5); \ + X##pB ^= X##pA; \ + X##pC += X##pD; \ + X##pD = rol64(X##pD, ROT##_6); \ + X##pD ^= X##pC; \ + X##pE += X##pF; \ + X##pF = rol64(X##pF, ROT##_7); \ + X##pF ^= X##pE; \ + } while (0) + +#if SKEIN_UNROLL_1024 == 0 +#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \ + ROT, rn) \ + ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \ + pF, ROT, rn) \ + +#define I1024(R) \ + do { \ + /* inject the key schedule value */ \ + X00 += ks[((R) + 1) % 17]; \ + X01 += ks[((R) + 2) % 17]; \ + X02 += ks[((R) + 3) % 17]; \ + X03 += ks[((R) + 4) % 17]; \ + X04 += ks[((R) + 5) % 17]; \ + X05 += ks[((R) + 6) % 17]; \ + X06 += ks[((R) + 7) % 17]; \ + X07 += ks[((R) + 8) % 17]; \ + X08 += ks[((R) + 9) % 17]; \ + X09 += ks[((R) + 10) % 17]; \ + X10 += ks[((R) + 11) % 17]; \ + X11 += ks[((R) + 12) % 17]; \ + X12 += ks[((R) + 13) % 17]; \ + X13 += ks[((R) + 14) % 17] + ts[((R) + 1) % 3]; \ + X14 += ks[((R) + 15) % 17] + ts[((R) + 2) % 3]; \ + X15 += ks[((R) + 16) % 17] + (R) + 1; \ + } while (0) +#else /* looping version */ +#define R1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, pF, \ + ROT, rn) \ + ROUND1024(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pA, pB, pC, pD, pE, \ + pF, ROT, rn) \ + +#define I1024(R) \ + do { \ + /* inject the key schedule value */ \ + X00 += ks[r + (R) + 0]; \ + X01 += ks[r + (R) + 1]; \ + X02 += ks[r + (R) + 2]; \ + X03 += ks[r + (R) + 3]; \ + X04 += ks[r + (R) + 4]; \ + X05 += ks[r + (R) + 5]; \ + X06 += ks[r + (R) + 6]; \ + X07 += ks[r + (R) + 7]; \ + X08 += ks[r + (R) + 8]; \ + X09 += ks[r + (R) + 9]; \ + X10 += ks[r + (R) + 10]; \ + X11 += ks[r + (R) + 11]; \ + X12 += ks[r + (R) + 12]; \ + X13 += ks[r + (R) + 13] + ts[r + (R) + 0]; \ + X14 += ks[r + (R) + 14] + ts[r + (R) + 1]; \ + X15 += ks[r + (R) + 15] + r + (R); \ + /* rotate key schedule */ \ + ks[r + (R) + 16] = ks[r + (R) - 1]; \ + ts[r + (R) + 2] = ts[r + (R) - 1]; \ + } while (0) + +#endif +#define R1024_8_ROUNDS(R) \ + do { \ + R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \ + 13, 14, 15, R1024_0, 8 * (R) + 1); \ + R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \ + 05, 08, 01, R1024_1, 8 * (R) + 2); \ + R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \ + 11, 10, 09, R1024_2, 8 * (R) + 3); \ + R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \ + 03, 12, 07, R1024_3, 8 * (R) + 4); \ + I1024(2 * (R)); \ + R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \ + 13, 14, 15, R1024_4, 8 * (R) + 5); \ + R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \ + 05, 08, 01, R1024_5, 8 * (R) + 6); \ + R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \ + 11, 10, 09, R1024_6, 8 * (R) + 7); \ + R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \ + 03, 12, 07, R1024_7, 8 * (R) + 8); \ + I1024(2 * (R) + 1); \ + } while (0) + +#define R1024_UNROLL_R(NN) \ + ((SKEIN_UNROLL_1024 == 0 && \ + SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \ + (SKEIN_UNROLL_1024 > (NN))) + +#if (SKEIN_UNROLL_1024 > 14) +#error "need more unrolling in Skein_1024_Process_Block" +#endif +#endif + void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr, size_t blk_cnt, size_t byte_cnt_add); void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr, diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c index 20eb4ebcf8c3..2195c80235a1 100644 --- a/drivers/staging/typec/tcpm.c +++ b/drivers/staging/typec/tcpm.c @@ -1015,8 +1015,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt, if (port->data_role == TYPEC_DEVICE && port->nr_snk_vdo) { for (i = 0; i < port->nr_snk_vdo; i++) - response[i + 1] - = cpu_to_le32(port->snk_vdo[i]); + response[i + 1] = port->snk_vdo[i]; rlen = port->nr_snk_vdo + 1; } break; diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h index 692efcb38245..d6d99cc33a33 100644 --- a/drivers/staging/unisys/include/channel.h +++ b/drivers/staging/unisys/include/channel.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010 - 2013 UNISYS CORPORATION +/* + * Copyright (C) 2010 - 2013 UNISYS CORPORATION * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -20,87 +21,67 @@ #include <linux/io.h> #include <linux/uuid.h> -#define __SUPERVISOR_CHANNEL_H__ - #define SIGNATURE_16(A, B) ((A) | ((B) << 8)) #define SIGNATURE_32(A, B, C, D) \ (SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16)) -#define SIGNATURE_64(A, B, C, D, E, F, G, H) \ - (SIGNATURE_32(A, B, C, D) | ((u64)(SIGNATURE_32(E, F, G, H)) << 32)) - -#ifndef COVER -#define COVER(v, d) ((d) * DIV_ROUND_UP(v, d)) -#endif #define VISOR_CHANNEL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L') +/* + * enum channel_serverstate + * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state. + * @CHANNELSRV_READY: Channel has been initialized by server. + */ enum channel_serverstate { - CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */ - CHANNELSRV_READY = 1 /* channel has been initialized by server */ + CHANNELSRV_UNINITIALIZED = 0, + CHANNELSRV_READY = 1 }; +/* + * enum channel_clientstate + * @CHANNELCLI_DETACHED: + * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it + * unless given TBD* explicit request + * (should actually be < DETACHED). + * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach. + * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time. + * @CHANNELCLI_BUSY: Client either wants to use or is using channel. + * @CHANNELCLI_OWNED: "No worries" state - client can access channel + * anytime. + */ enum channel_clientstate { CHANNELCLI_DETACHED = 0, - CHANNELCLI_DISABLED = 1, /* client can see channel but is NOT - * allowed to use it unless given TBD - * explicit request (should actually be - * < DETACHED) - */ - CHANNELCLI_ATTACHING = 2, /* legacy EFI client request - * for EFI server to attach - */ - CHANNELCLI_ATTACHED = 3, /* idle, but client may want - * to use channel any time - */ - CHANNELCLI_BUSY = 4, /* client either wants to use or is - * using channel - */ - CHANNELCLI_OWNED = 5 /* "no worries" state - client can */ - /* access channel anytime */ + CHANNELCLI_DISABLED = 1, + CHANNELCLI_ATTACHING = 2, + CHANNELCLI_ATTACHED = 3, + CHANNELCLI_BUSY = 4, + CHANNELCLI_OWNED = 5 }; -#define VISOR_CHANNEL_SERVER_READY(ch) \ - (readl(&(ch)->srv_state) == CHANNELSRV_READY) - -#define VISOR_VALID_CHANNELCLI_TRANSITION(o, n) \ - (((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \ - (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \ - (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \ - (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DETACHED)) || \ - (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DETACHED)) || \ - (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHING)) || \ - (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_ATTACHED)) || \ - (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_ATTACHED)) || \ - (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_ATTACHED)) || \ - (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_BUSY)) || \ - (((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_OWNED)) || \ - (((o) == CHANNELCLI_DISABLED) && ((n) == CHANNELCLI_OWNED)) || \ - (((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_OWNED)) || \ - (((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_OWNED)) || \ - (((o) == CHANNELCLI_BUSY) && ((n) == CHANNELCLI_OWNED)) || (0)) \ - ? (1) : (0)) - /* Values for VISORA_CHANNEL_PROTOCOL.CliErrorBoot: */ -/* throttling invalid boot channel statetransition error due to client - * disabled + +/* + * Throttling invalid boot channel statetransition error due to client + * disabled. */ #define VISOR_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01 -/* throttling invalid boot channel statetransition error due to client - * not attached +/* + * Throttling invalid boot channel statetransition error due to client + * not attached. */ #define VISOR_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02 -/* throttling invalid boot channel statetransition error due to busy channel */ +/* Throttling invalid boot channel statetransition error due to busy channel */ #define VISOR_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04 -/* Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so - * that windows guest can look at the FeatureFlags in the io channel, - * and configure the windows driver to use interrupts or not based on - * this setting. This flag is set in uislib after the - * VISOR_VHBA_init_channel is called. All feature bits for all - * channels should be defined here. The io channel feature bits are - * defined right here +/* + * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that + * windows guest can look at the FeatureFlags in the io channel, and configure + * the windows driver to use interrupts or not based on this setting. This flag + * is set in uislib after the VISOR_VHBA_init_channel is called. All feature + * bits for all channels should be defined here. The io channel feature bits + * are defined right here */ #define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1) #define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3) @@ -108,151 +89,120 @@ enum channel_clientstate { #define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5) #define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6) -/* Common Channel Header */ +/* + * struct channel_header - Common Channel Header + * @signature: Signature. + * @legacy_state: DEPRECATED - being replaced by. + * @header_size: sizeof(struct channel_header). + * @size: Total size of this channel in bytes. + * @features: Flags to modify behavior. + * @chtype: Channel type: data, bus, control, etc.. + * @partition_handle: ID of guest partition. + * @handle: Device number of this channel in client. + * @ch_space_offset: Offset in bytes to channel specific area. + * @version_id: Struct channel_header Version ID. + * @partition_index: Index of guest partition. + * @zone_uuid: Guid of Channel's zone. + * @cli_str_offset: Offset from channel header to null-terminated + * ClientString (0 if ClientString not present). + * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this + * channel. + * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see + * ServerStateUp, ServerStateDown, etc). + * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel. + * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>. + * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see + * ServerStateUp, ServerStateDown, etc). + * @srv_state: CHANNEL_SERVERSTATE. + * @cli_error_boot: Bits to indicate err states for boot clients, so err + * messages can be throttled. + * @cli_error_os: Bits to indicate err states for OS clients, so err + * messages can be throttled. + * @filler: Pad out to 128 byte cacheline. + * @recover_channel: Please add all new single-byte values below here. + */ struct channel_header { - u64 signature; /* Signature */ - u32 legacy_state; /* DEPRECATED - being replaced by */ - /* SrvState, CliStateBoot, and CliStateOS below */ - u32 header_size; /* sizeof(struct channel_header) */ - u64 size; /* Total size of this channel in bytes */ - u64 features; /* Flags to modify behavior */ - uuid_le chtype; /* Channel type: data, bus, control, etc. */ - u64 partition_handle; /* ID of guest partition */ - u64 handle; /* Device number of this channel in client */ - u64 ch_space_offset; /* Offset in bytes to channel specific area */ - u32 version_id; /* struct channel_header Version ID */ - u32 partition_index; /* Index of guest partition */ - uuid_le zone_uuid; /* Guid of Channel's zone */ - u32 cli_str_offset; /* offset from channel header to - * nul-terminated ClientString (0 if - * ClientString not present) - */ - u32 cli_state_boot; /* CHANNEL_CLIENTSTATE of pre-boot - * EFI client of this channel - */ - u32 cmd_state_cli; /* CHANNEL_COMMANDSTATE (overloaded in - * Windows drivers, see ServerStateUp, - * ServerStateDown, etc) - */ - u32 cli_state_os; /* CHANNEL_CLIENTSTATE of Guest OS - * client of this channel - */ - u32 ch_characteristic; /* CHANNEL_CHARACTERISTIC_<xxx> */ - u32 cmd_state_srv; /* CHANNEL_COMMANDSTATE (overloaded in - * Windows drivers, see ServerStateUp, - * ServerStateDown, etc) - */ - u32 srv_state; /* CHANNEL_SERVERSTATE */ - u8 cli_error_boot; /* bits to indicate err states for - * boot clients, so err messages can - * be throttled - */ - u8 cli_error_os; /* bits to indicate err states for OS - * clients, so err messages can be - * throttled - */ - u8 filler[1]; /* Pad out to 128 byte cacheline */ - /* Please add all new single-byte values below here */ + u64 signature; + u32 legacy_state; + /* SrvState, CliStateBoot, and CliStateOS below */ + u32 header_size; + u64 size; + u64 features; + uuid_le chtype; + u64 partition_handle; + u64 handle; + u64 ch_space_offset; + u32 version_id; + u32 partition_index; + uuid_le zone_uuid; + u32 cli_str_offset; + u32 cli_state_boot; + u32 cmd_state_cli; + u32 cli_state_os; + u32 ch_characteristic; + u32 cmd_state_srv; + u32 srv_state; + u8 cli_error_boot; + u8 cli_error_os; + u8 filler[1]; u8 recover_channel; } __packed; #define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0) -/* Subheader for the Signal Type variation of the Common Channel */ +/* + * struct signal_queue_header - Subheader for the Signal Type variation of the + * Common Channel. + * @version: SIGNAL_QUEUE_HEADER Version ID. + * @chtype: Queue type: storage, network. + * @size: Total size of this queue in bytes. + * @sig_base_offset: Offset to signal queue area. + * @features: Flags to modify behavior. + * @num_sent: Total # of signals placed in this queue. + * @num_overflows: Total # of inserts failed due to full queue. + * @signal_size: Total size of a signal for this queue. + * @max_slots: Max # of slots in queue, 1 slot is always empty. + * @max_signals: Max # of signals in queue (MaxSignalSlots-1). + * @head: Queue head signal #. + * @num_received: Total # of signals removed from this queue. + * @tail: Queue tail signal. + * @reserved1: Reserved field. + * @reserved2: Reserved field. + * @client_queue: + * @num_irq_received: Total # of Interrupts received. This is incremented by the + * ISR in the guest windows driver. + * @num_empty: Number of times that visor_signal_remove is called and + * returned Empty Status. + * @errorflags: Error bits set during SignalReinit to denote trouble with + * client's fields. + * @filler: Pad out to 64 byte cacheline. + */ struct signal_queue_header { /* 1st cache line */ - u32 version; /* SIGNAL_QUEUE_HEADER Version ID */ - u32 chtype; /* Queue type: storage, network */ - u64 size; /* Total size of this queue in bytes */ - u64 sig_base_offset; /* Offset to signal queue area */ - u64 features; /* Flags to modify behavior */ - u64 num_sent; /* Total # of signals placed in this queue */ - u64 num_overflows; /* Total # of inserts failed due to - * full queue - */ - u32 signal_size; /* Total size of a signal for this queue */ - u32 max_slots; /* Max # of slots in queue, 1 slot is - * always empty - */ - u32 max_signals; /* Max # of signals in queue - * (MaxSignalSlots-1) - */ - u32 head; /* Queue head signal # */ + u32 version; + u32 chtype; + u64 size; + u64 sig_base_offset; + u64 features; + u64 num_sent; + u64 num_overflows; + u32 signal_size; + u32 max_slots; + u32 max_signals; + u32 head; /* 2nd cache line */ - u64 num_received; /* Total # of signals removed from this queue */ - u32 tail; /* Queue tail signal */ - u32 reserved1; /* Reserved field */ - u64 reserved2; /* Reserved field */ + u64 num_received; + u32 tail; + u32 reserved1; + u64 reserved2; u64 client_queue; - u64 num_irq_received; /* Total # of Interrupts received. This - * is incremented by the ISR in the - * guest windows driver - */ - u64 num_empty; /* Number of times that visor_signal_remove - * is called and returned Empty Status. - */ - u32 errorflags; /* Error bits set during SignalReinit - * to denote trouble with client's - * fields - */ - u8 filler[12]; /* Pad out to 64 byte cacheline */ + u64 num_irq_received; + u64 num_empty; + u32 errorflags; + u8 filler[12]; } __packed; -/* Generic function useful for validating any type of channel when it is - * received by the client that will be accessing the channel. - * Note that <logCtx> is only needed for callers in the EFI environment, and - * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages. - */ -static inline int -visor_check_channel(struct channel_header *ch, - uuid_le expected_uuid, - char *chname, - u64 expected_min_bytes, - u32 expected_version, - u64 expected_signature) -{ - if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) { - /* caller wants us to verify type GUID */ - if (uuid_le_cmp(ch->chtype, expected_uuid) != 0) { - pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n", - chname, &expected_uuid, - &expected_uuid, &ch->chtype); - return 0; - } - } - if (expected_min_bytes > 0) { /* verify channel size */ - if (ch->size < expected_min_bytes) { - pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n", - chname, &expected_uuid, - (unsigned long long)expected_min_bytes, - ch->size); - return 0; - } - } - if (expected_version > 0) { /* verify channel version */ - if (ch->version_id != expected_version) { - pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n", - chname, &expected_uuid, - (unsigned long)expected_version, - ch->version_id); - return 0; - } - } - if (expected_signature > 0) { /* verify channel signature */ - if (ch->signature != expected_signature) { - pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n", - chname, &expected_uuid, - expected_signature, ch->signature); - return 0; - } - } - return 1; -} - -/* - * CHANNEL Guids - */ - +/* CHANNEL Guids */ /* {414815ed-c58c-11da-95a9-00e08161165f} */ #define VISOR_VHBA_CHANNEL_UUID \ UUID_LE(0x414815ed, 0xc58c, 0x11da, \ @@ -260,7 +210,6 @@ visor_check_channel(struct channel_header *ch, static const uuid_le visor_vhba_channel_uuid = VISOR_VHBA_CHANNEL_UUID; #define VISOR_VHBA_CHANNEL_UUID_STR \ "414815ed-c58c-11da-95a9-00e08161165f" - /* {8cd5994d-c58e-11da-95a9-00e08161165f} */ #define VISOR_VNIC_CHANNEL_UUID \ UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \ @@ -268,7 +217,6 @@ static const uuid_le visor_vhba_channel_uuid = VISOR_VHBA_CHANNEL_UUID; static const uuid_le visor_vnic_channel_uuid = VISOR_VNIC_CHANNEL_UUID; #define VISOR_VNIC_CHANNEL_UUID_STR \ "8cd5994d-c58e-11da-95a9-00e08161165f" - /* {72120008-4AAB-11DC-8530-444553544200} */ #define VISOR_SIOVM_UUID \ UUID_LE(0x72120008, 0x4AAB, 0x11DC, \ diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h index c7cb3fbde7b2..80b9ef3ceb0c 100644 --- a/drivers/staging/unisys/include/iochannel.h +++ b/drivers/staging/unisys/include/iochannel.h @@ -7,9 +7,7 @@ * Everything needed for IOPart-GuestPart communication is define in * this file. Note: Everything is OS-independent because this file is * used by Windows, Linux and possible EFI drivers. - */ - -/* + * * Communication flow between the IOPart and GuestPart uses the channel headers * channel state. The following states are currently being used: * UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED @@ -30,14 +28,9 @@ */ #include <linux/uuid.h> - #include <linux/dma-direction.h> #include "channel.h" -#define VISOR_VHBA_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE -#define VISOR_VNIC_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE -#define VISOR_VSWITCH_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE - /* * Must increment these whenever you insert or delete fields within this channel * struct. Also increment whenever you change the meaning of fields within this @@ -47,19 +40,6 @@ */ #define VISOR_VHBA_CHANNEL_VERSIONID 2 #define VISOR_VNIC_CHANNEL_VERSIONID 2 -#define VISOR_VSWITCH_CHANNEL_VERSIONID 1 - -#define VISOR_VHBA_CHANNEL_OK_CLIENT(ch) \ - (visor_check_channel(ch, visor_vhba_channel_uuid, \ - "vhba", MIN_IO_CHANNEL_SIZE, \ - VISOR_VHBA_CHANNEL_VERSIONID, \ - VISOR_VHBA_CHANNEL_SIGNATURE)) - -#define VISOR_VNIC_CHANNEL_OK_CLIENT(ch) \ - (visor_check_channel(ch, visor_vnic_channel_uuid, \ - "vnic", MIN_IO_CHANNEL_SIZE, \ - VISOR_VNIC_CHANNEL_VERSIONID, \ - VISOR_VNIC_CHANNEL_SIGNATURE)) /* * Everything necessary to handle SCSI & NIC traffic between Guest Partition and @@ -71,58 +51,57 @@ #define MAXNUM(a, b) (((a) > (b)) ? (a) : (b)) /* Define the two queues per data channel between iopart and ioguestparts. */ + /* Used by ioguestpart to 'insert' signals to iopart. */ #define IOCHAN_TO_IOPART 0 + /* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */ #define IOCHAN_FROM_IOPART 1 /* Size of cdb - i.e., SCSI cmnd */ #define MAX_CMND_SIZE 16 - #define MAX_SENSE_SIZE 64 - #define MAX_PHYS_INFO 64 -/* Various types of network packets that can be sent in cmdrsp. */ +/* + * enum net_types - Various types of network packets that can be sent in cmdrsp. + * @NET_RCV_POST: Submit buffer to hold receiving incoming packet. + * @NET_RCV: visornic -> uisnic. Incoming packet received. + * @NET_XMIT: uisnic -> visornic. For outgoing packet. + * @NET_XMIT_DONE: visornic -> uisnic. Outgoing packet xmitted. + * @NET_RCV_ENBDIS: uisnic -> visornic. Enable/Disable packet reception. + * @NET_RCV_ENBDIS_ACK: visornic -> uisnic. Acknowledge enable/disable packet. + * @NET_RCV_PROMISC: uisnic -> visornic. Enable/Disable promiscuous mode. + * @NET_CONNECT_STATUS: visornic -> uisnic. Indicate the loss or restoration of + * a network connection. + * @NET_MACADDR: uisnic -> visornic. Indicates the client has requested + * to update it's MAC address. + * @NET_MACADDR_ACK: MAC address acknowledge. + */ enum net_types { - NET_RCV_POST = 0, /* - * Submit buffer to hold receiving - * incoming packet - */ - /* visornic -> uisnic */ - NET_RCV, /* incoming packet received */ - /* uisnic -> visornic */ - NET_XMIT, /* for outgoing net packets */ - /* visornic -> uisnic */ - NET_XMIT_DONE, /* outgoing packet xmitted */ - /* uisnic -> visornic */ - NET_RCV_ENBDIS, /* enable/disable packet reception */ - /* visornic -> uisnic */ - NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */ - /* reception */ - /* uisnic -> visornic */ - NET_RCV_PROMISC, /* enable/disable promiscuous mode */ - /* visornic -> uisnic */ - NET_CONNECT_STATUS, /* - * indicate the loss or restoration of a network - * connection - */ - /* uisnic -> visornic */ - NET_MACADDR, /* - * Indicates the client has requested to update - * it's MAC address - */ - NET_MACADDR_ACK, /* MAC address acknowledge */ - + NET_RCV_POST = 0, + NET_RCV, + NET_XMIT, + NET_XMIT_DONE, + NET_RCV_ENBDIS, + NET_RCV_ENBDIS_ACK, + /* Reception */ + NET_RCV_PROMISC, + NET_CONNECT_STATUS, + NET_MACADDR, + NET_MACADDR_ACK, }; -#define ETH_MIN_DATA_SIZE 46 /* minimum eth data size */ +/* Minimum eth data size */ +#define ETH_MIN_DATA_SIZE 46 #define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE) -#define VISOR_ETH_MAX_MTU 16384 /* maximum data size */ +/* Maximum data size */ +#define VISOR_ETH_MAX_MTU 16384 #ifndef MAX_MACADDR_LEN -#define MAX_MACADDR_LEN 6 /* number of bytes in MAC address */ +/* Number of bytes in MAC address */ +#define MAX_MACADDR_LEN 6 #endif /* Various types of scsi task mgmt commands. */ @@ -156,10 +135,16 @@ struct guest_phys_info { #define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info)) +/* + * struct uisscsi_dest + * @channel: Bus number. + * @id: Target number. + * @lun: Logical unit number. + */ struct uisscsi_dest { - u32 channel; /* channel == bus number */ - u32 id; /* id == target number */ - u32 lun; /* lun == logical unit number */ + u32 channel; + u32 id; + u32 lun; } __packed; struct vhba_wwnn { @@ -168,61 +153,77 @@ struct vhba_wwnn { } __packed; /* - * WARNING: Values stired in this structure must contain maximum counts (not + * struct vhba_config_max + * @max_channel: Maximum channel for devices attached to this bus. + * @max_id: Maximum SCSI ID for devices attached to bus. + * @max_lun: Maximum SCSI LUN for devices attached to bus. + * @cmd_per_lun: Maximum number of outstanding commands per LUN. + * @max_io_size: Maximum io size for devices attached to this bus. Max io size + * is often determined by the resource of the hba. + * e.g Max scatter gather list length * page size / sector size. + * + * WARNING: Values stored in this structure must contain maximum counts (not * maximum values). + * + * 20 bytes */ -struct vhba_config_max {/* 20 bytes */ - u32 max_channel;/* maximum channel for devices attached to this bus */ - u32 max_id; /* maximum SCSI ID for devices attached to bus */ - u32 max_lun; /* maximum SCSI LUN for devices attached to bus */ - u32 cmd_per_lun;/* maximum number of outstanding commands per LUN */ - u32 max_io_size;/* maximum io size for devices attached to this bus */ - /* max io size is often determined by the resource of the hba. e.g */ - /* max scatter gather list length * page size / sector size */ +struct vhba_config_max { + u32 max_channel; + u32 max_id; + u32 max_lun; + u32 cmd_per_lun; + u32 max_io_size; } __packed; +/* + * struct uiscmdrsp_scsi + * + * @handle: The handle to the cmd that was received. Send it back as + * is in the rsp packet. + * @cmnd: The cdb for the command. + * @bufflen: Length of data to be transferred out or in. + * @guest_phys_entries: Number of entries in scatter-gather list. + * @struct gpi_list: Physical address information for each fragment. + * @enum data_dir: Direction of the data, if any. + * @struct vdest: Identifies the virtual hba, id, channel, lun to which + * cmd was sent. + * @linuxstat: Original Linux status used by Linux vdisk. + * @scsistat: The scsi status. + * @addlstat: Non-scsi status. + * @sensebuf: Sense info in case cmd failed. sensebuf holds the + * sense_data struct. See sense_data struct for more + * details. + * @*vdisk: Pointer to the vdisk to clean up when IO completes. + * @no_disk_result: Used to return no disk inquiry result when + * no_disk_result is set to 1 + * scsi.scsistat is SAM_STAT_GOOD + * scsi.addlstat is 0 + * scsi.linuxstat is SAM_STAT_GOOD + * That is, there is NO error. + */ struct uiscmdrsp_scsi { - u64 handle; /* the handle to the cmd that was received */ - /* send it back as is in the rsp packet. */ - u8 cmnd[MAX_CMND_SIZE]; /* the cdb for the command */ - u32 bufflen; /* length of data to be transferred out or in */ - u16 guest_phys_entries; /* Number of entries in scatter-gather list */ - struct guest_phys_info gpi_list[MAX_PHYS_INFO]; /* physical address - * information for each - * fragment - */ - enum dma_data_direction data_dir; /* direction of the data, if any */ - struct uisscsi_dest vdest; /* identifies the virtual hba, id, */ - /* channel, lun to which cmd was sent */ - + u64 handle; + u8 cmnd[MAX_CMND_SIZE]; + u32 bufflen; + u16 guest_phys_entries; + struct guest_phys_info gpi_list[MAX_PHYS_INFO]; + enum dma_data_direction data_dir; + struct uisscsi_dest vdest; /* Needed to queue the rsp back to cmd originator. */ - int linuxstat; /* original Linux status used by Linux vdisk */ - u8 scsistat; /* the scsi status */ - u8 addlstat; /* non-scsi status */ + int linuxstat; + u8 scsistat; + u8 addlstat; #define ADDL_SEL_TIMEOUT 4 - /* The following fields are need to determine the result of command. */ - u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */ - /* sensebuf holds the sense_data struct; */ - /* See sense_data struct for more details. */ - void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */ + u8 sensebuf[MAX_SENSE_SIZE]; + void *vdisk; int no_disk_result; - /* - * Used to return no disk inquiry result - * when no_disk_result is set to 1, - * scsi.scsistat is SAM_STAT_GOOD - * scsi.addlstat is 0 - * scsi.linuxstat is SAM_STAT_GOOD - * That is, there is NO error. - */ } __packed; /* * Defines to support sending correct inquiry result when no disk is * configured. - */ - -/* + * * From SCSI SPC2 - * * If the target is not capable of supporting a device on this logical unit, the @@ -234,22 +235,35 @@ struct uiscmdrsp_scsi { * connected to this logical unit. */ -#define DEV_NOT_CAPABLE 0x7f /* - * peripheral qualifier of 0x3 - * peripheral type of 0x1f - * specifies no device but target present - */ - -#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 - * peripheral type of 0 - disk - * Specifies device capable, but - * not present - */ +/* + * Peripheral qualifier of 0x3 + * Peripheral type of 0x1f + * Specifies no device but target present + */ +#define DEV_NOT_CAPABLE 0x7f +/* + * Peripheral qualifier of 0x1 + * Peripheral type of 0 - disk + * Specifies device capable, but not present + */ +#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 +/* HiSup = 1; shows support for report luns must be returned for lun 0. */ +#define DEV_HISUPPORT 0x10 -#define DEV_HISUPPORT 0x10 /* - * HiSup = 1; shows support for report luns - * must be returned for lun 0. - */ +/* + * Peripheral qualifier of 0x3 + * Peripheral type of 0x1f + * Specifies no device but target present + */ +#define DEV_NOT_CAPABLE 0x7f +/* + * Peripheral qualifier of 0x1 + * Peripheral type of 0 - disk + * Specifies device capable, but not present + */ +#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 +/* HiSup = 1; shows support for report luns must be returned for lun 0. */ +#define DEV_HISUPPORT 0x10 /* * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length @@ -258,11 +272,12 @@ struct uiscmdrsp_scsi { * inquiry result. */ #define NO_DISK_INQUIRY_RESULT_LEN 36 - -#define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */ +/* 5 bytes minimum for inquiry result */ +#define MIN_INQUIRY_RESULT_LEN 5 /* SCSI device version for no disk inquiry result */ -#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */ +/* indicates SCSI SPC2 (SPC3 is 5) */ +#define SCSI_SPC2_VER 4 /* Struct and Defines to support sense information. */ @@ -297,35 +312,48 @@ struct sense_data { u8 sense_key_specific[3]; } __packed; +/* + * struct net_pkt_xmt + * @len: Full length of data in the packet. + * @num_frags: Number of fragments in frags containing data. + * @struct phys_info frags: Physical page information. + * @ethhdr: The ethernet header. + * @struct lincsum: These are needed for csum at uisnic end. + * @valid: 1 = struct is valid - else ignore. + * @hrawoffv: 1 = hwrafoff is valid. + * @nhrawoffv: 1 = nhwrafoff is valid. + * @protocol: Specifies packet protocol. + * @csum: Value used to set skb->csum at IOPart. + * @hrawoff: Value used to set skb->h.raw at IOPart. hrawoff points to + * the start of the TRANSPORT LAYER HEADER. + * @nhrawoff: Value used to set skb->nh.raw at IOPart. nhrawoff points to + * the start of the NETWORK LAYER HEADER. + * + * NOTE: + * The full packet is described in frags but the ethernet header is + * separately kept in ethhdr so that uisnic doesn't have "MAP" the + * guest memory to get to the header. uisnic needs ethhdr to + * determine how to route the packet. + */ struct net_pkt_xmt { - int len; /* full length of data in the packet */ - int num_frags; /* number of fragments in frags containing data */ - struct phys_info frags[MAX_PHYS_INFO]; /* physical page information */ - char ethhdr[ETH_HLEN]; /* the ethernet header */ + int len; + int num_frags; + struct phys_info frags[MAX_PHYS_INFO]; + char ethhdr[ETH_HLEN]; struct { - /* These are needed for csum at uisnic end */ - u8 valid; /* 1 = struct is valid - else ignore */ - u8 hrawoffv; /* 1 = hwrafoff is valid */ - u8 nhrawoffv; /* 1 = nhwrafoff is valid */ - __be16 protocol; /* specifies packet protocol */ - __wsum csum; /* value used to set skb->csum at IOPart */ - u32 hrawoff; /* value used to set skb->h.raw at IOPart */ - /* hrawoff points to the start of the TRANSPORT LAYER HEADER */ - u32 nhrawoff; /* value used to set skb->nh.raw at IOPart */ - /* nhrawoff points to the start of the NETWORK LAYER HEADER */ + u8 valid; + u8 hrawoffv; + u8 nhrawoffv; + __be16 protocol; + __wsum csum; + u32 hrawoff; + u32 nhrawoff; } lincsum; - - /* - * NOTE: - * The full packet is described in frags but the ethernet header is - * separately kept in ethhdr so that uisnic doesn't have "MAP" the - * guest memory to get to the header. uisnic needs ethhdr to - * determine how to route the packet. - */ } __packed; struct net_pkt_xmtdone { - u32 xmt_done_result; /* result of NET_XMIT */ + /* Result of NET_XMIT */ + u32 xmt_done_result; } __packed; /* @@ -341,14 +369,12 @@ struct net_pkt_xmtdone { ((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \ / RCVPOST_BUF_SIZE) -/* - * rcv buf size must be large enough to include ethernet data len + ethernet +/* rcv buf size must be large enough to include ethernet data len + ethernet * header len - we are choosing 2K because it is guaranteed to be describable. */ struct net_pkt_rcvpost { /* Physical page information for the single fragment 2K rcv buf */ struct phys_info frag; - /* * Ensures that receive posts are returned to the adapter which we sent * them from originally. @@ -358,143 +384,148 @@ struct net_pkt_rcvpost { } __packed; /* + * struct net_pkt_rcv + * @rcv_done_len: Length of the received data. + * @numrcvbufs: Contains the incoming data. Guest side MUST chain these + * together. + * @*rcvbuf: List of chained rcvbufa. Each entry is a receive buffer + * provided by NET_RCV_POST. NOTE: First rcvbuf in the + * chain will also be provided in net.buf. + * @unique_num: + * @rcvs_dropped_delta: + * * The number of rcvbuf that can be chained is based on max mtu and size of each * rcvbuf. */ struct net_pkt_rcv { - u32 rcv_done_len; /* length of received data */ - - /* - * numrcvbufs: contain the incoming data; guest side MUST chain these - * together. - */ + u32 rcv_done_len; u8 numrcvbufs; - - void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */ - - /* Each entry is a receive buffer provided by NET_RCV_POST. */ - /* NOTE: first rcvbuf in the chain will also be provided in net.buf. */ + void *rcvbuf[MAX_NET_RCV_CHAIN]; u64 unique_num; u32 rcvs_dropped_delta; } __packed; struct net_pkt_enbdis { void *context; - u16 enable; /* 1 = enable, 0 = disable */ + /* 1 = enable, 0 = disable */ + u16 enable; } __packed; struct net_pkt_macaddr { void *context; - u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */ + /* 6 bytes */ + u8 macaddr[MAX_MACADDR_LEN]; } __packed; -/* cmd rsp packet used for VNIC network traffic */ +/* + * struct uiscmdrsp_net - cmd rsp packet used for VNIC network traffic. + * @enum type: + * @*buf: + * @union: + * @struct xmt: Used for NET_XMIT. + * @struct xmtdone: Used for NET_XMIT_DONE. + * @struct rcvpost: Used for NET_RCV_POST. + * @struct rcv: Used for NET_RCV. + * @struct enbdis: Used for NET_RCV_ENBDIS, NET_RCV_ENBDIS_ACK, + * NET_RCV_PROMSIC, and NET_CONNECT_STATUS. + * @struct macaddr: + */ struct uiscmdrsp_net { enum net_types type; void *buf; union { - struct net_pkt_xmt xmt; /* used for NET_XMIT */ - struct net_pkt_xmtdone xmtdone; /* used for NET_XMIT_DONE */ - struct net_pkt_rcvpost rcvpost; /* used for NET_RCV_POST */ - struct net_pkt_rcv rcv; /* used for NET_RCV */ - struct net_pkt_enbdis enbdis; /* used for NET_RCV_ENBDIS, */ - /* NET_RCV_ENBDIS_ACK, */ - /* NET_RCV_PROMSIC, */ - /* and NET_CONNECT_STATUS */ + struct net_pkt_xmt xmt; + struct net_pkt_xmtdone xmtdone; + struct net_pkt_rcvpost rcvpost; + struct net_pkt_rcv rcv; + struct net_pkt_enbdis enbdis; struct net_pkt_macaddr macaddr; }; } __packed; +/* + * struct uiscmdrsp_scsitaskmgmt + * @enum tasktype: The type of task. + * @struct vdest: The vdisk for which this task mgmt is generated. + * @handle: This is a handle that the guest has saved off for its + * own use. The handle value is preserved by iopart and + * returned as in task mgmt rsp. + * @notify_handle: For Linux guests, this is a pointer to wait_queue_head + * that a thread is waiting on to see if the taskmgmt + * command has completed. When the rsp is received by + * guest, the thread receiving the response uses this to + * notify the thread waiting for taskmgmt command + * completion. It's value is preserved by iopart and + * returned as in the task mgmt rsp. + * @notifyresult_handle: This is a handle to the location in the guest where + * the result of the taskmgmt command (result field) is + * saved to when the response is handled. It's value is + * preserved by iopart and returned as is in the task mgmt + * rsp. + * @result: Result of taskmgmt command - set by IOPart. + */ struct uiscmdrsp_scsitaskmgmt { - /* The type of task. */ enum task_mgmt_types tasktype; - - /* The vdisk for which this task mgmt is generated. */ struct uisscsi_dest vdest; - - /* - * This is a handle that the guest has saved off for its own use. - * The handle value is preserved by iopart and returned as in task - * mgmt rsp. - */ u64 handle; - - /* - * For Linux guests, this is a pointer to wait_queue_head that a - * thread is waiting on to see if the taskmgmt command has completed. - * When the rsp is received by guest, the thread receiving the - * response uses this to notify the thread waiting for taskmgmt - * command completion. It's value is preserved by iopart and returned - * as in the task mgmt rsp. - */ u64 notify_handle; - - /* - * This is a handle to the location in the guest where the result of - * the taskmgmt command (result field) is saved to when the response - * is handled. It's value is preserved by iopart and returned as in - * the task mgmt rsp. - */ u64 notifyresult_handle; - - /* Result of taskmgmt command - set by IOPart - values are: */ char result; #define TASK_MGMT_FAILED 0 } __packed; -/* Used by uissd to send disk add/remove notifications to Guest. */ -/* Note that the vHba pointer is not used by the Client/Guest side. */ +/* + * struct uiscmdrsp_disknotify - Used by uissd to send disk add/remove + * notifications to Guest. + * @add: 0-remove, 1-add. + * @*v_hba: Channel info to route msg. + * @channel: SCSI Path of Disk to added or removed. + * @id: SCSI Path of Disk to added or removed. + * @lun: SCSI Path of Disk to added or removed. + * + * Note that the vHba pointer is not used by the Client/Guest side. + */ struct uiscmdrsp_disknotify { - u8 add; /* 0-remove, 1-add */ - void *v_hba; /* channel info to route msg */ - u32 channel, id, lun; /* SCSI Path of Disk to added or removed */ + u8 add; + void *v_hba; + u32 channel, id, lun; } __packed; /* - * The following is used by virthba/vSCSI to send the Acquire/Release commands - * to the IOVM. + * struct uiscmdrsp_vdiskmgmt - The following is used by virthba/vSCSI to send + * the Acquire/Release commands to the IOVM. + * @enum vdisktype: The type of task. + * @struct vdest: The vdisk for which this task mgmt is generated. + * @handle: This is a handle that the guest has saved off for its + * own use. It's value is preserved by iopart and returned + * as in the task mgmt rsp. + * @notify_handle: For Linux guests, this is a pointer to wait_queue_head + * that a thread is waiting on to see if the tskmgmt + * command has completed. When the rsp is received by + * guest, the thread receiving the response uses this to + * notify the thread waiting for taskmgmt command + * completion. It's value is preserved by iopart and + * returned as in the task mgmt rsp. + * @notifyresult_handle: Handle to the location in guest where the result of the + * taskmgmt command (result field) is saved to when the + * response is handled. It's value is preserved by iopart + * and returned as in the task mgmt rsp. + * @result: Result of taskmgmt command - set by IOPart. */ struct uiscmdrsp_vdiskmgmt { - /* The type of task */ enum vdisk_mgmt_types vdisktype; - - /* The vdisk for which this task mgmt is generated */ struct uisscsi_dest vdest; - - /* - * This is a handle that the guest has saved off for its own use. It's - * value is preserved by iopart and returned as in the task mgmt rsp. - */ u64 handle; - - /* - * For Linux guests, this is a pointer to wait_queue_head that a - * thread is waiting on to see if the tskmgmt command has completed. - * When the rsp is received by guest, the thread receiving the - * response uses this to notify the thread waiting for taskmgmt - * command completion. It's value is preserved by iopart and returned - * as in the task mgmt rsp. - */ u64 notify_handle; - - /* - * Handle to the location in guest where the result of the - * taskmgmt command (result field) is saved to when the response - * is handled. It's value is preserved by iopart and returned as in - * the task mgmt rsp. - */ u64 notifyresult_handle; - - /* Result of taskmgmt command - set by IOPart - values are: */ char result; } __packed; /* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */ struct uiscmdrsp { char cmdtype; - -/* Describes what type of information is in the struct */ + /* Describes what type of information is in the struct */ #define CMD_SCSI_TYPE 1 #define CMD_NET_TYPE 2 #define CMD_SCSITASKMGMT_TYPE 3 @@ -509,21 +540,33 @@ struct uiscmdrsp { }; /* Send the response when the cmd is done (scsi and scsittaskmgmt). */ void *private_data; - struct uiscmdrsp *next; /* General Purpose Queue Link */ - struct uiscmdrsp *activeQ_next; /* Pointer to the nextactive commands */ - struct uiscmdrsp *activeQ_prev; /* Pointer to the prevactive commands */ + /* General Purpose Queue Link */ + struct uiscmdrsp *next; + /* Pointer to the nextactive commands */ + struct uiscmdrsp *activeQ_next; + /* Pointer to the prevactive commands */ + struct uiscmdrsp *activeQ_prev; } __packed; +/* total = 28 bytes */ struct iochannel_vhba { - struct vhba_wwnn wwnn; /* 8 bytes */ - struct vhba_config_max max; /* 20 bytes */ -} __packed; /* total = 28 bytes */ + /* 8 bytes */ + struct vhba_wwnn wwnn; + /* 20 bytes */ + struct vhba_config_max max; +} __packed; + struct iochannel_vnic { - u8 macaddr[6]; /* 6 bytes */ - u32 num_rcv_bufs; /* 4 bytes */ - u32 mtu; /* 4 bytes */ - uuid_le zone_uuid; /* 16 bytes */ + /* 6 bytes */ + u8 macaddr[6]; + /* 4 bytes */ + u32 num_rcv_bufs; + /* 4 bytes */ + u32 mtu; + /* 16 bytes */ + uuid_le zone_uuid; } __packed; + /* * This is just the header of the IO channel. It is assumed that directly after * this header there is a large region of memory which contains the command and @@ -544,10 +587,11 @@ struct visor_io_channel { } __packed; /* INLINE functions for initializing and accessing I/O data channels. */ -#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64)) +#define SIZEOF_CMDRSP (64 * DIV_ROUND_UP(sizeof(struct uiscmdrsp), 64)) /* Use 4K page sizes when passing page info between Guest and IOPartition. */ #define PI_PAGE_SIZE 0x1000 #define PI_PAGE_MASK 0x0FFF -#endif /* __IOCHANNEL_H__ */ +/* __IOCHANNEL_H__ */ +#endif diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h index de0635542fbd..17c92294b4d1 100644 --- a/drivers/staging/unisys/include/visorbus.h +++ b/drivers/staging/unisys/include/visorbus.h @@ -23,7 +23,6 @@ * * There should be nothing in this file that is private to the visorbus * bus implementation itself. - * */ #ifndef __VISORBUS_H__ @@ -45,6 +44,7 @@ extern struct bus_type visorbus_type; typedef void (*visorbus_state_complete_func) (struct visor_device *dev, int status); + struct visorchipset_state { u32 created:1; u32 attached:1; @@ -172,6 +172,10 @@ struct visor_device { #define to_visor_device(x) container_of(x, struct visor_device, device) +int visor_check_channel(struct channel_header *ch, uuid_le expected_uuid, + char *chname, u64 expected_min_bytes, + u32 expected_version, u64 expected_signature); + int visorbus_register_visor_driver(struct visor_driver *drv); void visorbus_unregister_visor_driver(struct visor_driver *drv); int visorbus_read_channel(struct visor_device *dev, diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h index ed045eff0e33..96ac574ef1e7 100644 --- a/drivers/staging/unisys/visorbus/controlvmchannel.h +++ b/drivers/staging/unisys/visorbus/controlvmchannel.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010 - 2015 UNISYS CORPORATION +/* + * Copyright (C) 2010 - 2015 UNISYS CORPORATION * All rights reserved. * * This program is free software; you can redistribute it and/or modify it @@ -23,56 +24,51 @@ UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \ 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d) -#define VISOR_CONTROLVM_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE #define CONTROLVM_MESSAGE_MAX 64 -/* Must increment this whenever you insert or delete fields within - * this channel struct. Also increment whenever you change the meaning - * of fields within this channel struct so as to break pre-existing - * software. Note that you can usually add fields to the END of the - * channel struct withOUT needing to increment this. +/* + * Must increment this whenever you insert or delete fields within this channel + * struct. Also increment whenever you change the meaning of fields within this + * channel struct so as to break pre-existing software. Note that you can + * usually add fields to the END of the channel struct withOUT needing to + * increment this. */ #define VISOR_CONTROLVM_CHANNEL_VERSIONID 1 -#define VISOR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \ - (visor_check_channel(ch, \ - VISOR_CONTROLVM_CHANNEL_UUID, \ - "controlvm", \ - sizeof(struct visor_controlvm_channel), \ - VISOR_CONTROLVM_CHANNEL_VERSIONID, \ - VISOR_CONTROLVM_CHANNEL_SIGNATURE)) - /* Defines for various channel queues */ -#define CONTROLVM_QUEUE_REQUEST 0 -#define CONTROLVM_QUEUE_RESPONSE 1 -#define CONTROLVM_QUEUE_EVENT 2 -#define CONTROLVM_QUEUE_ACK 3 +#define CONTROLVM_QUEUE_REQUEST 0 +#define CONTROLVM_QUEUE_RESPONSE 1 +#define CONTROLVM_QUEUE_EVENT 2 +#define CONTROLVM_QUEUE_ACK 3 /* Max num of messages stored during IOVM creation to be reused after crash */ #define CONTROLVM_CRASHMSG_MAX 2 +/* + * struct visor_segment_state + * @enabled: May enter other states. + * @active: Assigned to active partition. + * @alive: Configure message sent to service/server. + * @revoked: Similar to partition state ShuttingDown. + * @allocated: Memory (device/port number) has been selected by Command. + * @known: Has been introduced to the service/guest partition. + * @ready: Service/Guest partition has responded to introduction. + * @operating: Resource is configured and operating. + * @reserved: Natural alignment. + * + * Note: Don't use high bit unless we need to switch to ushort which is + * non-compliant. + */ struct visor_segment_state { - /* Bit 0: May enter other states */ u16 enabled:1; - /* Bit 1: Assigned to active partition */ u16 active:1; - /* Bit 2: Configure message sent to service/server */ u16 alive:1; - /* Bit 3: similar to partition state ShuttingDown */ u16 revoked:1; - /* Bit 4: memory (device/port number) has been selected by Command */ u16 allocated:1; - /* Bit 5: has been introduced to the service/guest partition */ u16 known:1; - /* Bit 6: service/Guest partition has responded to introduction */ u16 ready:1; - /* Bit 7: resource is configured and operating */ u16 operating:1; - /* Natural alignment*/ u16 reserved:8; -/* Note: don't use high bit unless we need to switch to ushort - * which is non-compliant - */ } __packed; static const struct visor_segment_state segment_state_running = { @@ -87,74 +83,101 @@ static const struct visor_segment_state segment_state_standby = { 1, 1, 0, 0, 1, 1, 1, 0 }; -/* Ids for commands that may appear in either queue of a ControlVm channel. +/* + * enum controlvm_id + * @CONTROLVM_INVALID: + * @CONTROLVM_BUS_CREATE: CP --> SP, GP. + * @CONTROLVM_BUS_DESTROY: CP --> SP, GP. + * @CONTROLVM_BUS_CONFIGURE: CP --> SP. + * @CONTROLVM_BUS_CHANGESTATE: CP --> SP, GP. + * @CONTROLVM_BUS_CHANGESTATE_EVENT: SP, GP --> CP. + * @CONTROLVM_DEVICE_CREATE: CP --> SP, GP. + * @CONTROLVM_DEVICE_DESTROY: CP --> SP, GP. + * @CONTROLVM_DEVICE_CONFIGURE: CP --> SP. + * @CONTROLVM_DEVICE_CHANGESTATE: CP --> SP, GP. + * @CONTROLVM_DEVICE_CHANGESTATE_EVENT: SP, GP --> CP. + * @CONTROLVM_DEVICE_RECONFIGURE: CP --> Boot. + * @CONTROLVM_CHIPSET_INIT: CP --> SP, GP. + * @CONTROLVM_CHIPSET_STOP: CP --> SP, GP. + * @CONTROLVM_CHIPSET_READY: CP --> SP. + * @CONTROLVM_CHIPSET_SELFTEST: CP --> SP. * - * Commands that are initiated by the command partition (CP), by an IO or - * console service partition (SP), or by a guest partition (GP)are: - * - issued on the RequestQueue queue (q #0) in the ControlVm channel - * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel + * Ids for commands that may appear in either queue of a ControlVm channel. * - * Events that are initiated by an IO or console service partition (SP) or - * by a guest partition (GP) are: - * - issued on the EventQueue queue (q #2) in the ControlVm channel - * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel + * Commands that are initiated by the command partition (CP), by an IO or + * console service partition (SP), or by a guest partition (GP) are: + * - issued on the RequestQueue queue (q #0) in the ControlVm channel + * - responded to on the ResponseQueue queue (q #1) in the ControlVm channel + * + * Events that are initiated by an IO or console service partition (SP) or + * by a guest partition (GP) are: + * - issued on the EventQueue queue (q #2) in the ControlVm channel + * - responded to on the EventAckQueue queue (q #3) in the ControlVm channel */ enum controlvm_id { CONTROLVM_INVALID = 0, - /* SWITCH commands required Parameter: SwitchNumber */ - /* BUS commands required Parameter: BusNumber */ - CONTROLVM_BUS_CREATE = 0x101, /* CP --> SP, GP */ - CONTROLVM_BUS_DESTROY = 0x102, /* CP --> SP, GP */ - CONTROLVM_BUS_CONFIGURE = 0x104, /* CP --> SP */ - CONTROLVM_BUS_CHANGESTATE = 0x105, /* CP --> SP, GP */ - CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, /* SP, GP --> CP */ -/* DEVICE commands required Parameter: BusNumber, DeviceNumber */ - - CONTROLVM_DEVICE_CREATE = 0x201, /* CP --> SP, GP */ - CONTROLVM_DEVICE_DESTROY = 0x202, /* CP --> SP, GP */ - CONTROLVM_DEVICE_CONFIGURE = 0x203, /* CP --> SP */ - CONTROLVM_DEVICE_CHANGESTATE = 0x204, /* CP --> SP, GP */ - CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, /* SP, GP --> CP */ - CONTROLVM_DEVICE_RECONFIGURE = 0x206, /* CP --> Boot */ -/* CHIPSET commands */ - CONTROLVM_CHIPSET_INIT = 0x301, /* CP --> SP, GP */ - CONTROLVM_CHIPSET_STOP = 0x302, /* CP --> SP, GP */ - CONTROLVM_CHIPSET_READY = 0x304, /* CP --> SP */ - CONTROLVM_CHIPSET_SELFTEST = 0x305, /* CP --> SP */ - + /* + * SWITCH commands required Parameter: SwitchNumber. + * BUS commands required Parameter: BusNumber + */ + CONTROLVM_BUS_CREATE = 0x101, + CONTROLVM_BUS_DESTROY = 0x102, + CONTROLVM_BUS_CONFIGURE = 0x104, + CONTROLVM_BUS_CHANGESTATE = 0x105, + CONTROLVM_BUS_CHANGESTATE_EVENT = 0x106, + /* DEVICE commands required Parameter: BusNumber, DeviceNumber */ + CONTROLVM_DEVICE_CREATE = 0x201, + CONTROLVM_DEVICE_DESTROY = 0x202, + CONTROLVM_DEVICE_CONFIGURE = 0x203, + CONTROLVM_DEVICE_CHANGESTATE = 0x204, + CONTROLVM_DEVICE_CHANGESTATE_EVENT = 0x205, + CONTROLVM_DEVICE_RECONFIGURE = 0x206, + /* CHIPSET commands */ + CONTROLVM_CHIPSET_INIT = 0x301, + CONTROLVM_CHIPSET_STOP = 0x302, + CONTROLVM_CHIPSET_READY = 0x304, + CONTROLVM_CHIPSET_SELFTEST = 0x305, }; +/* + * struct irq_info + * @reserved1: Natural alignment purposes + * @recv_irq_handle: Specifies interrupt handle. It is used to retrieve the + * corresponding interrupt pin from Monitor; and the interrupt + * pin is used to connect to the corresponding interrupt. + * Used by IOPart-GP only. + * @recv_irq_vector: Specifies interrupt vector. It, interrupt pin, and shared + * are used to connect to the corresponding interrupt. + * Used by IOPart-GP only. + * @recv_irq_shared: Specifies if the recvInterrupt is shared. It, interrupt + * pin and vector are used to connect to 0 = not shared; + * 1 = shared the corresponding interrupt. + * Used by IOPart-GP only. + * @reserved: Natural alignment purposes + */ struct irq_info { u64 reserved1; - - /* specifies interrupt handle. It is used to retrieve the - * corresponding interrupt pin from Monitor; and the - * interrupt pin is used to connect to the corresponding - * interrupt. Used by IOPart-GP only. - */ u64 recv_irq_handle; - - /* specifies interrupt vector. It, interrupt pin, and shared are - * used to connect to the corresponding interrupt. Used by - * IOPart-GP only. - */ u32 recv_irq_vector; - - /* specifies if the recvInterrupt is shared. It, interrupt pin - * and vector are used to connect to 0 = not shared; 1 = shared. - * the corresponding interrupt. Used by IOPart-GP only. - */ u8 recv_irq_shared; - u8 reserved[3]; /* Natural alignment purposes */ + u8 reserved[3]; } __packed; +/* + * struct efi_visor_indication + * @boot_to_fw_ui: Stop in UEFI UI + * @clear_nvram: Clear NVRAM + * @clear_cmos: Clear CMOS + * @boot_to_tool: Run install tool + * @reserved: Natural alignment + */ struct efi_visor_indication { - u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */ - u64 clear_nvram:1; /* Bit 1: Clear NVRAM */ - u64 clear_cmos:1; /* Bit 2: Clear CMOS */ - u64 boot_to_tool:1; /* Bit 3: Run install tool */ - /* remaining bits are available */ - u64 reserved:60; /* Natural alignment */ + u64 boot_to_fw_ui:1; + u64 clear_nvram:1; + u64 clear_cmos:1; + u64 boot_to_tool:1; + /* Remaining bits are available */ + u64 reserved:60; } __packed; enum visor_chipset_feature { @@ -162,182 +185,249 @@ enum visor_chipset_feature { VISOR_CHIPSET_FEATURE_PARA_HOTPLUG = 0x00000002, }; -/* This is the common structure that is at the beginning of every - * ControlVm message (both commands and responses) in any ControlVm - * queue. Commands are easily distinguished from responses by - * looking at the flags.response field. +/* + * struct controlvm_message_header + * @id: See CONTROLVM_ID. + * @message_size: Includes size of this struct + size of message. + * @segment_index: Index of segment containing Vm message/information. + * @completion_status: Error status code or result of message completion. + * @struct flags: + * @failed: =1 in a response to signify failure. + * @response_expected: =1 in all messages that expect a response. + * @server: =1 in all bus & device-related messages where the + * message receiver is to act as the bus or device + * server. + * @test_message: =1 for testing use only (Control and Command + * ignore this). + * @partial_completion: =1 if there are forthcoming responses/acks + * associated with this message. + * @preserve: =1 this is to let us know to preserve channel + * contents. + * @writer_in_diag: =1 the DiagWriter is active in the Diagnostic + * Partition. + * @reserve: Natural alignment. + * @reserved: Natural alignment. + * @message_handle: Identifies the particular message instance. + * @payload_vm_offset: Offset of payload area from start of this instance. + * @payload_max_bytes: Maximum bytes allocated in payload area of ControlVm + * segment. + * @payload_bytes: Actual number of bytes of payload area to copy between + * IO/Command. If non-zero, there is a payload to copy. + * + * This is the common structure that is at the beginning of every + * ControlVm message (both commands and responses) in any ControlVm + * queue. Commands are easily distinguished from responses by + * looking at the flags.response field. */ struct controlvm_message_header { - u32 id; /* See CONTROLVM_ID. */ - /* For requests, indicates the message type. */ - /* For responses, indicates the type of message we are responding to. */ - - /* Includes size of this struct + size of message */ + u32 id; + /* + * For requests, indicates the message type. For responses, indicates + * the type of message we are responding to. + */ u32 message_size; - /* Index of segment containing Vm message/information */ u32 segment_index; - /* Error status code or result of message completion */ u32 completion_status; struct { - /* =1 in a response to signify failure */ u32 failed:1; - /* =1 in all messages that expect a response */ u32 response_expected:1; - /* =1 in all bus & device-related messages where the message - * receiver is to act as the bus or device server - */ u32 server:1; - /* =1 for testing use only (Control and Command ignore this */ u32 test_message:1; - /* =1 if there are forthcoming responses/acks associated - * with this message - */ u32 partial_completion:1; - /* =1 this is to let us know to preserve channel contents */ u32 preserve:1; - /* =1 the DiagWriter is active in the Diagnostic Partition */ u32 writer_in_diag:1; - /* Natural alignment */ u32 reserve:25; } __packed flags; - /* Natural alignment */ u32 reserved; - /* Identifies the particular message instance */ u64 message_handle; - /* request instances with the corresponding response instance. */ - /* Offset of payload area from start of this instance */ u64 payload_vm_offset; - /* Maximum bytes allocated in payload area of ControlVm segment */ u32 payload_max_bytes; - /* Actual number of bytes of payload area to copy between IO/Command */ u32 payload_bytes; - /* if non-zero, there is a payload to copy. */ } __packed; +/* + * struct controlvm_packet_device_create - For CONTROLVM_DEVICE_CREATE + * @bus_no: Bus # (0..n-1) from the msg receiver's end. + * @dev_no: Bus-relative (0..n-1) device number. + * @channel_addr: Guest physical address of the channel, which can be + * dereferenced by the receiver of this ControlVm command. + * @channel_bytes: Specifies size of the channel in bytes. + * @data_type_uuid: Specifies format of data in channel. + * @dev_inst_uuid: Instance guid for the device. + * @irq_info intr: Specifies interrupt information. + */ struct controlvm_packet_device_create { - u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */ - u32 dev_no; /* bus-relative (0..n-1) device number */ - /* Guest physical address of the channel, which can be dereferenced by - * the receiver of this ControlVm command - */ + u32 bus_no; + u32 dev_no; u64 channel_addr; - u64 channel_bytes; /* specifies size of the channel in bytes */ - uuid_le data_type_uuid; /* specifies format of data in channel */ - uuid_le dev_inst_uuid; /* instance guid for the device */ - struct irq_info intr; /* specifies interrupt information */ -} __packed; /* for CONTROLVM_DEVICE_CREATE */ + u64 channel_bytes; + uuid_le data_type_uuid; + uuid_le dev_inst_uuid; + struct irq_info intr; +} __packed; +/* + * struct controlvm_packet_device_configure - For CONTROLVM_DEVICE_CONFIGURE + * @bus_no: Bus number (0..n-1) from the msg receiver's perspective. + * @dev_no: Bus-relative (0..n-1) device number. + */ struct controlvm_packet_device_configure { - /* bus # (0..n-1) from the msg receiver's perspective */ u32 bus_no; - /* Control uses header SegmentIndex field to access bus number... */ - u32 dev_no; /* bus-relative (0..n-1) device number */ -} __packed; /* for CONTROLVM_DEVICE_CONFIGURE */ + /* Control uses header SegmentIndex field to access bus number. */ + u32 dev_no; +} __packed; +/* Total 128 bytes */ struct controlvm_message_device_create { struct controlvm_message_header header; struct controlvm_packet_device_create packet; -} __packed; /* total 128 bytes */ +} __packed; +/* Total 56 bytes */ struct controlvm_message_device_configure { struct controlvm_message_header header; struct controlvm_packet_device_configure packet; -} __packed; /* total 56 bytes */ +} __packed; -/* This is the format for a message in any ControlVm queue. */ +/* + * struct controlvm_message_packet - This is the format for a message in any + * ControlVm queue. + * @struct create_bus: For CONTROLVM_BUS_CREATE. + * @bus_no: Bus # (0..n-1) from the msg receiver's perspective. + * @dev_count: Indicates the max number of devices on this bus. + * @channel_addr: Guest physical address of the channel, which can be + * dereferenced by the receiver of this ControlVM + * command. + * @channel_bytes: Size of the channel. + * @bus_data_type_uuid: Indicates format of data in bus channel. + * @bus_inst_uuid: Instance uuid for the bus. + * + * @struct destroy_bus: For CONTROLVM_BUS_DESTROY. + * @bus_no: Bus # (0..n-1) from the msg receiver's perspective. + * @reserved: Natural alignment purposes. + * + * @struct configure_bus: For CONTROLVM_BUS_CONFIGURE. + * @bus_no: Bus # (0..n-1) from the receiver's perspective. + * @reserved1: For alignment purposes. + * @guest_handle: This is used to convert guest physical address to + * physical address. + * @recv_bus_irq_handle: Specifies interrupt info. It is used by SP to + * register to receive interrupts from the CP. This + * interrupt is used for bus level notifications. + * The corresponding sendBusInterruptHandle is kept + * in CP. + * + * @struct create_device: For CONTROLVM_DEVICE_CREATE. + * + * @struct destroy_device: For CONTROLVM_DEVICE_DESTROY. + * @bus_no: Bus # (0..n-1) from the msg receiver's perspective. + * @dev_no: Bus-relative (0..n-1) device number. + * + * @struct configure_device: For CONTROLVM_DEVICE_CONFIGURE. + * + * @struct reconfigure_device: For CONTROLVM_DEVICE_RECONFIGURE. + * @bus_no: Bus # (0..n-1) from the msg receiver's perspective. + * @dev_no: Bus-relative (0..n-1) device number. + * + * @struct bus_change_state: For CONTROLVM_BUS_CHANGESTATE. + * @bus_no: + * @struct state: + * @reserved: Natural alignment purposes. + * + * @struct device_change_state: For CONTROLVM_DEVICE_CHANGESTATE. + * @bus_no: + * @dev_no: + * @struct state: + * @struct flags: + * @phys_device: =1 if message is for a physical device. + * @reserved: Natural alignment. + * @reserved1: Natural alignment. + * @reserved: Natural alignment purposes. + * + * @struct device_change_state_event: For CONTROLVM_DEVICE_CHANGESTATE_EVENT. + * @bus_no: + * @dev_no: + * @struct state: + * @reserved: Natural alignment purposes. + * + * @struct init_chipset: For CONTROLVM_CHIPSET_INIT. + * @bus_count: Indicates the max number of busses. + * @switch_count: Indicates the max number of switches. + * @enum features: + * @platform_number: + * + * @struct chipset_selftest: For CONTROLVM_CHIPSET_SELFTEST. + * @options: Reserved. + * @test: Bit 0 set to run embedded selftest. + * + * @addr: A physical address of something, that can be dereferenced by the + * receiver of this ControlVm command. + * + * @handle: A handle of something (depends on command id). + */ struct controlvm_message_packet { union { struct { - /* bus # (0..n-1) from the msg receiver's perspective */ u32 bus_no; - /* indicates the max number of devices on this bus */ u32 dev_count; - /* Guest physical address of the channel, which can be - * dereferenced by the receiver of this ControlVm command - */ u64 channel_addr; - u64 channel_bytes; /* size of the channel */ - /* indicates format of data in bus channel*/ + u64 channel_bytes; uuid_le bus_data_type_uuid; - uuid_le bus_inst_uuid; /* instance uuid for the bus */ - } __packed create_bus; /* for CONTROLVM_BUS_CREATE */ + uuid_le bus_inst_uuid; + } __packed create_bus; struct { - /* bus # (0..n-1) from the msg receiver's perspective */ u32 bus_no; - u32 reserved; /* Natural alignment purposes */ - } __packed destroy_bus; /* for CONTROLVM_BUS_DESTROY */ + u32 reserved; + } __packed destroy_bus; struct { - /* bus # (0..n-1) from the receiver's perspective */ u32 bus_no; - u32 reserved1; /* for alignment purposes */ - /* This is used to convert guest physical address to physical address */ + u32 reserved1; u64 guest_handle; u64 recv_bus_irq_handle; - /* specifies interrupt info. It is used by SP - * to register to receive interrupts from the - * CP. This interrupt is used for bus level - * notifications. The corresponding - * sendBusInterruptHandle is kept in CP. - */ - } __packed configure_bus; /* for CONTROLVM_BUS_CONFIGURE */ - /* for CONTROLVM_DEVICE_CREATE */ + } __packed configure_bus; struct controlvm_packet_device_create create_device; struct { - /* bus # (0..n-1) from the msg receiver's perspective */ u32 bus_no; - u32 dev_no; /* bus-relative (0..n-1) device # */ - } __packed destroy_device; /* for CONTROLVM_DEVICE_DESTROY */ - /* for CONTROLVM_DEVICE_CONFIGURE */ + u32 dev_no; + } __packed destroy_device; struct controlvm_packet_device_configure configure_device; struct { - /* bus # (0..n-1) from the msg receiver's perspective */ u32 bus_no; - u32 dev_no; /* bus-relative (0..n-1) device # */ + u32 dev_no; } __packed reconfigure_device; - /* for CONTROLVM_DEVICE_RECONFIGURE */ struct { u32 bus_no; struct visor_segment_state state; - u8 reserved[2]; /* Natural alignment purposes */ - } __packed bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */ + u8 reserved[2]; + } __packed bus_change_state; struct { u32 bus_no; u32 dev_no; struct visor_segment_state state; struct { - /* =1 if message is for a physical device */ u32 phys_device:1; - u32 reserved:31; /* Natural alignment */ - u32 reserved1; /* Natural alignment */ + u32 reserved:31; + u32 reserved1; } __packed flags; - u8 reserved[2]; /* Natural alignment purposes */ + u8 reserved[2]; } __packed device_change_state; - /* for CONTROLVM_DEVICE_CHANGESTATE */ struct { u32 bus_no; u32 dev_no; struct visor_segment_state state; - u8 reserved[6]; /* Natural alignment purposes */ + u8 reserved[6]; } __packed device_change_state_event; - /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */ struct { - /* indicates the max number of busses */ u32 bus_count; - /* indicates the max number of switches */ u32 switch_count; enum visor_chipset_feature features; - u32 platform_number; /* Platform Number */ - } __packed init_chipset; /* for CONTROLVM_CHIPSET_INIT */ + u32 platform_number; + } __packed init_chipset; struct { - u32 options; /* reserved */ - u32 test; /* bit 0 set to run embedded selftest */ + u32 options; + u32 test; } __packed chipset_selftest; - /* for CONTROLVM_CHIPSET_SELFTEST */ - /* a physical address of something, that can be dereferenced - * by the receiver of this ControlVm command - */ u64 addr; - /* a handle of something (depends on command id) */ u64 handle; }; } __packed; @@ -348,93 +438,139 @@ struct controlvm_message { struct controlvm_message_packet cmd; } __packed; +/* + * struct visor_controlvm_channel + * @struct header: + * @gp_controlvm: Guest phys addr of this channel. + * @gp_partition_tables: Guest phys addr of partition tables. + * @gp_diag_guest: Guest phys addr of diagnostic channel. + * @gp_boot_romdisk: Guest phys addr of (read* only) Boot + * ROM disk. + * @gp_boot_ramdisk: Guest phys addr of writable Boot RAM + * disk. + * @gp_acpi_table: Guest phys addr of acpi table. + * @gp_control_channel: Guest phys addr of control channel. + * @gp_diag_romdisk: Guest phys addr of diagnostic ROM disk. + * @gp_nvram: Guest phys addr of NVRAM channel. + * @request_payload_offset: Offset to request payload area. + * @event_payload_offset: Offset to event payload area. + * @request_payload_bytes: Bytes available in request payload area. + * @event_payload_bytes: Bytes available in event payload area. + * @control_channel_bytes: + * @nvram_channel_bytes: Bytes in PartitionNvram segment. + * @message_bytes: sizeof(CONTROLVM_MESSAGE). + * @message_count: CONTROLVM_MESSAGE_MAX. + * @gp_smbios_table: Guest phys addr of SMBIOS tables. + * @gp_physical_smbios_table: Guest phys addr of SMBIOS table. + * @gp_reserved: VISOR_MAX_GUESTS_PER_SERVICE. + * @virtual_guest_firmware_image_base: Guest physical address of EFI firmware + * image base. + * @virtual_guest_firmware_entry_point: Guest physical address of EFI firmware + * entry point. + * @virtual_guest_firmware_image_size: Guest EFI firmware image size. + * @virtual_guest_firmware_boot_base: GPA = 1MB where EFI firmware image is + * copied to. + * @virtual_guest_image_base: + * @virtual_guest_image_size: + * @prototype_control_channel_offset: + * @virtual_guest_partition_handle: + * @restore_action: Restore Action field to restore the + * guest partition. + * @dump_action: For Windows guests it shows if the + * visordisk is in dump mode. + * @nvram_fail_count: + * @saved_crash_message_count: = CONTROLVM_CRASHMSG_MAX. + * @saved_crash_message_offset: Offset to request payload area needed + * for crash dump. + * @installation_error: Type of error encountered during + * installation. + * @installation_text_id: Id of string to display. + * @installation_remaining_steps: Number of remaining installation steps + * (for progress bars). + * @tool_action: VISOR_TOOL_ACTIONS Installation Action + * field. + * @reserved: Alignment. + * @struct efi_visor_ind: + * @sp_reserved: + * @reserved2: Force signals to begin on 128-byte + * cache line. + * @struct request_queue: Guest partition uses this queue to send + * requests to Control. + * @struct response_queue: Control uses this queue to respond to + * service or guest partition request. + * @struct event_queue: Control uses this queue to send events + * to guest partition. + * @struct event_ack_queue: Service or guest partition uses this + * queue to ack Control events. + * @struct request_msg: Request fixed-size message pool - + * does not include payload. + * @struct response_msg: Response fixed-size message pool - + * does not include payload. + * @struct event_msg: Event fixed-size message pool - + * does not include payload. + * @struct event_ack_msg: Ack fixed-size message pool - + * does not include payload. + * @struct saved_crash_msg: Message stored during IOVM creation to + * be reused after crash. + */ struct visor_controlvm_channel { struct channel_header header; - u64 gp_controlvm; /* guest phys addr of this channel */ - u64 gp_partition_tables;/* guest phys addr of partition tables */ - u64 gp_diag_guest; /* guest phys addr of diagnostic channel */ - u64 gp_boot_romdisk;/* guest phys addr of (read* only) Boot ROM disk */ - u64 gp_boot_ramdisk;/* guest phys addr of writable Boot RAM disk */ - u64 gp_acpi_table; /* guest phys addr of acpi table */ - u64 gp_control_channel;/* guest phys addr of control channel */ - u64 gp_diag_romdisk;/* guest phys addr of diagnostic ROM disk */ - u64 gp_nvram; /* guest phys addr of NVRAM channel */ - u64 request_payload_offset; /* Offset to request payload area */ - u64 event_payload_offset; /* Offset to event payload area */ - /* Bytes available in request payload area */ + u64 gp_controlvm; + u64 gp_partition_tables; + u64 gp_diag_guest; + u64 gp_boot_romdisk; + u64 gp_boot_ramdisk; + u64 gp_acpi_table; + u64 gp_control_channel; + u64 gp_diag_romdisk; + u64 gp_nvram; + u64 request_payload_offset; + u64 event_payload_offset; u32 request_payload_bytes; - u32 event_payload_bytes;/* Bytes available in event payload area */ + u32 event_payload_bytes; u32 control_channel_bytes; - u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */ - u32 message_bytes; /* sizeof(CONTROLVM_MESSAGE) */ - u32 message_count; /* CONTROLVM_MESSAGE_MAX */ - u64 gp_smbios_table; /* guest phys addr of SMBIOS tables */ - u64 gp_physical_smbios_table; /* guest phys addr of SMBIOS table */ - /* VISOR_MAX_GUESTS_PER_SERVICE */ + u32 nvram_channel_bytes; + u32 message_bytes; + u32 message_count; + u64 gp_smbios_table; + u64 gp_physical_smbios_table; char gp_reserved[2688]; - - /* guest physical address of EFI firmware image base */ u64 virtual_guest_firmware_image_base; - - /* guest physical address of EFI firmware entry point */ u64 virtual_guest_firmware_entry_point; - - /* guest EFI firmware image size */ u64 virtual_guest_firmware_image_size; - - /* GPA = 1MB where EFI firmware image is copied to */ u64 virtual_guest_firmware_boot_base; u64 virtual_guest_image_base; u64 virtual_guest_image_size; u64 prototype_control_channel_offset; u64 virtual_guest_partition_handle; - /* Restore Action field to restore the guest partition */ u16 restore_action; - /* For Windows guests it shows if the visordisk is in dump mode */ u16 dump_action; u16 nvram_fail_count; - u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */ - /* Offset to request payload area needed for crash dump */ + u16 saved_crash_message_count; u32 saved_crash_message_offset; - /* Type of error encountered during installation */ u32 installation_error; - u32 installation_text_id; /* Id of string to display */ - /* Number of remaining installation steps (for progress bars) */ + u32 installation_text_id; u16 installation_remaining_steps; - /* VISOR_TOOL_ACTIONS Installation Action field */ u8 tool_action; - u8 reserved; /* alignment */ + u8 reserved; struct efi_visor_indication efi_visor_ind; u32 sp_reserved; - /* Force signals to begin on 128-byte cache line */ u8 reserved2[28]; - /* guest partition uses this queue to send requests to Control */ struct signal_queue_header request_queue; - /* Control uses this queue to respond to service or guest - * partition requests - */ struct signal_queue_header response_queue; - /* Control uses this queue to send events to guest partition */ struct signal_queue_header event_queue; - /* Service or guest partition uses this queue to ack Control events */ struct signal_queue_header event_ack_queue; - /* Request fixed-size message pool - does not include payload */ - struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX]; - - /* Response fixed-size message pool - does not include payload */ - struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX]; - - /* Event fixed-size message pool - does not include payload */ - struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX]; - - /* Ack fixed-size message pool - does not include payload */ - struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX]; - - /* Message stored during IOVM creation to be reused after crash */ - struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX]; + struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX]; + struct controlvm_message response_msg[CONTROLVM_MESSAGE_MAX]; + struct controlvm_message event_msg[CONTROLVM_MESSAGE_MAX]; + struct controlvm_message event_ack_msg[CONTROLVM_MESSAGE_MAX]; + struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX]; } __packed; -/* The following header will be located at the beginning of PayloadVmOffset for +/* + * struct visor_controlvm_parameters_header + * + * The following header will be located at the beginning of PayloadVmOffset for * various ControlVm commands. The receiver of a ControlVm command with a * PayloadVmOffset will dereference this address and then use connection_offset, * initiator_offset, and target_offset to get the location of UTF-8 formatted @@ -457,7 +593,8 @@ struct visor_controlvm_parameters_header { u32 name_length; uuid_le id; u32 revision; - u32 reserved; /* Natural alignment */ + /* Natural alignment */ + u32 reserved; } __packed; /* General Errors------------------------------------------------------[0-99] */ @@ -467,72 +604,57 @@ struct visor_controlvm_parameters_header { #define CONTROLVM_RESP_KMALLOC_FAILED 3 #define CONTROLVM_RESP_ID_UNKNOWN 4 #define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT 5 - /* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */ #define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO 100 #define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT 101 - /* Maximum Limit----------------------------------------------------[200-299] */ -#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */ -#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */ +/* BUS_CREATE */ +#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 +/* DEVICE_CREATE */ +#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* Payload and Parameter Related------------------------------------[400-499] */ -#define CONTROLVM_RESP_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT, - * DEVICE_CONFIGURE - */ -#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401 /* Multiple */ -#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */ -#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */ -/* Specified[Packet Structure] Value-------------------------------[500-599] */ -#define CONTROLVM_RESP_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT, - * BUS_CONFIGURE, - * DEVICE_CREATE, - * DEVICE_CONFIG - * DEVICE_DESTROY - */ -#define CONTROLVM_RESP_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT*/ - /* DEVICE_CREATE, - * DEVICE_CONFIGURE, - * DEVICE_DESTROY - */ -#define CONTROLVM_RESP_CHANNEL_INVALID 502 /* DEVICE_CREATE, - * DEVICE_CONFIGURE - */ -/* Partition Driver Callback Interface----------------------[600-699] */ -#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE, - * BUS_DESTROY, - * DEVICE_CREATE, - * DEVICE_DESTROY - */ -/* Unable to invoke VIRTPCI callback */ -#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605 /* BUS_CREATE, - * BUS_DESTROY, - * DEVICE_CREATE, - * DEVICE_DESTROY - */ -/* VIRTPCI Callback returned error */ +/* SWITCH_ATTACHEXTPORT, DEVICE_CONFIGURE */ +#define CONTROLVM_RESP_PAYLOAD_INVALID 400 +/* Multiple */ +#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401 +/* DEVICE_CONFIGURE */ +#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402 +/* DEVICE_CONFIGURE */ +#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403 +/* Specified[Packet Structure] Value--------------------------------[500-599] */ +/* SWITCH_ATTACHINTPORT */ +/* BUS_CONFIGURE, DEVICE_CREATE, DEVICE_CONFIG, DEVICE_DESTROY */ +#define CONTROLVM_RESP_BUS_INVALID 500 +/* SWITCH_ATTACHINTPORT*/ +/* DEVICE_CREATE, DEVICE_CONFIGURE, DEVICE_DESTROY */ +#define CONTROLVM_RESP_DEVICE_INVALID 501 +/* DEVICE_CREATE, DEVICE_CONFIGURE */ +#define CONTROLVM_RESP_CHANNEL_INVALID 502 +/* Partition Driver Callback Interface------------------------------[600-699] */ +/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */ +#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604 +/* Unable to invoke VIRTPCI callback. VIRTPCI Callback returned error. */ +/* BUS_CREATE, BUS_DESTROY, DEVICE_CREATE, DEVICE_DESTROY */ +#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605 +/* Generic device callback returned error. */ +/* SWITCH_ATTACHEXTPORT, SWITCH_DETACHEXTPORT, DEVICE_CONFIGURE */ #define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR 606 - /* SWITCH_ATTACHEXTPORT, - * SWITCH_DETACHEXTPORT - * DEVICE_CONFIGURE - */ - -/* generic device callback returned error */ /* Bus Related------------------------------------------------------[700-799] */ -#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */ +/* BUS_DESTROY */ +#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* Channel Related--------------------------------------------------[800-899] */ -#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO, - * DEVICE_DESTROY - */ -#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */ +/* GET_CHANNELINFO, DEVICE_DESTROY */ +#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800 +/* DEVICE_CREATE */ +#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801 /* Chipset Shutdown Related---------------------------------------[1000-1099] */ #define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED 1000 #define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001 - /* Chipset Stop Related-------------------------------------------[1100-1199] */ #define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS 1100 #define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH 1101 - /* Device Related-------------------------------------------------[1400-1499] */ #define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT 1400 -#endif /* __CONTROLVMCHANNEL_H__ */ +/* __CONTROLVMCHANNEL_H__ */ +#endif diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h index 01d7d517dba7..2c820e21f1b7 100644 --- a/drivers/staging/unisys/visorbus/vbuschannel.h +++ b/drivers/staging/unisys/visorbus/vbuschannel.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010 - 2015 UNISYS CORPORATION +/* + * Copyright (C) 2010 - 2015 UNISYS CORPORATION * All rights reserved. * * This program is free software; you can redistribute it and/or modify it @@ -15,13 +16,15 @@ #ifndef __VBUSCHANNEL_H__ #define __VBUSCHANNEL_H__ -/* The vbus channel is the channel area provided via the BUS_CREATE controlvm - * message for each virtual bus. This channel area is provided to both server - * and client ends of the bus. The channel header area is initialized by - * the server, and the remaining information is filled in by the client. - * We currently use this for the client to provide various information about - * the client devices and client drivers for the server end to see. +/* + * The vbus channel is the channel area provided via the BUS_CREATE controlvm + * message for each virtual bus. This channel area is provided to both server + * and client ends of the bus. The channel header area is initialized by + * the server, and the remaining information is filled in by the client. + * We currently use this for the client to provide various information about + * the client devices and client drivers for the server end to see. */ + #include <linux/uuid.h> #include <linux/ctype.h> #include "channel.h" @@ -30,11 +33,9 @@ #define VISOR_VBUS_CHANNEL_UUID \ UUID_LE(0x193b331b, 0xc58f, 0x11da, \ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) -static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID; -#define VISOR_VBUS_CHANNEL_SIGNATURE VISOR_CHANNEL_SIGNATURE - -/* Must increment this whenever you insert or delete fields within this channel +/* + * Must increment this whenever you insert or delete fields within this channel * struct. Also increment whenever you change the meaning of fields within this * channel struct so as to break pre-existing software. Note that you can * usually add fields to the END of the channel struct withOUT needing to @@ -42,43 +43,65 @@ static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID; */ #define VISOR_VBUS_CHANNEL_VERSIONID 1 -/* +static const uuid_le visor_vbus_channel_uuid = VISOR_VBUS_CHANNEL_UUID; + +/* struct visor_vbus_deviceinfo + * @devtype: Short string identifying the device type. + * @drvname: Driver .sys file name. + * @infostrs: Kernel vversion. + * @reserved: Pad size to 256 bytes. + * * An array of this struct is present in the channel area for each vbus. - * (See vbuschannel.h.) - * It is filled in by the client side to provide info about the device - * and driver from the client's perspective. + * (See vbuschannel.h.). It is filled in by the client side to provide info + * about the device and driver from the client's perspective. */ struct visor_vbus_deviceinfo { - u8 devtype[16]; /* short string identifying the device type */ - u8 drvname[16]; /* driver .sys file name */ - u8 infostrs[96]; /* kernel version */ - u8 reserved[128]; /* pad size to 256 bytes */ + u8 devtype[16]; + u8 drvname[16]; + u8 infostrs[96]; + u8 reserved[128]; } __packed; +/* + * struct visor_vbus_headerinfo + * @struct_bytes: Size of this struct in bytes. + * @device_info_struct_bytes: Size of VISOR_VBUS_DEVICEINFO. + * @dev_info_count: Num of items in DevInfo member. This is the + * allocated size. + * @chp_info_offset: Byte offset from beginning of this struct to the + * ChpInfo struct. + * @bus_info_offset: Byte offset from beginning of this struct to the + * BusInfo struct. + * @dev_info_offset: Byte offset from beginning of this struct to the + * DevInfo array. + * @reserved: Natural Alignment + */ struct visor_vbus_headerinfo { - u32 struct_bytes; /* size of this struct in bytes */ - u32 device_info_struct_bytes; /* sizeof(VISOR_VBUS_DEVICEINFO) */ - u32 dev_info_count; /* num of items in DevInfo member */ - /* (this is the allocated size) */ - u32 chp_info_offset; /* byte offset from beginning of this struct */ - /* to the ChpInfo struct (below) */ - u32 bus_info_offset; /* byte offset from beginning of this struct */ - /* to the BusInfo struct (below) */ - u32 dev_info_offset; /* byte offset from beginning of this struct */ - /* to the DevInfo array (below) */ + u32 struct_bytes; + u32 device_info_struct_bytes; + u32 dev_info_count; + u32 chp_info_offset; + u32 bus_info_offset; + u32 dev_info_offset; u8 reserved[104]; } __packed; +/* + * struct visor_vbus_channel + * @channel_header: Initialized by server. + * @hdr_info: Initialized by server. + * @chp_info: Describes client chipset device and driver. + * @bus_info: Describes client bus device and driver. + * @dev_info: Describes client device and driver for each device on the + * bus. + */ struct visor_vbus_channel { - struct channel_header channel_header; /* initialized by server */ - struct visor_vbus_headerinfo hdr_info; /* initialized by server */ - /* the remainder of this channel is filled in by the client */ + struct channel_header channel_header; + struct visor_vbus_headerinfo hdr_info; + /* The remainder of this channel is filled in by the client */ struct visor_vbus_deviceinfo chp_info; - /* describes client chipset device and driver */ struct visor_vbus_deviceinfo bus_info; - /* describes client bus device and driver */ struct visor_vbus_deviceinfo dev_info[0]; - /* describes client device and driver for each device on the bus */ } __packed; #endif diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c index 1c785dd19ddd..ff80ab248a35 100644 --- a/drivers/staging/unisys/visorbus/visorbus_main.c +++ b/drivers/staging/unisys/visorbus/visorbus_main.c @@ -1,4 +1,5 @@ -/* visorbus_main.c +/* + * visorbus_main.c * * Copyright � 2010 - 2015 UNISYS CORPORATION * All rights reserved. @@ -20,15 +21,12 @@ #include "visorbus.h" #include "visorbus_private.h" -#define MYDRVNAME "visorbus" - -/* Display string that is guaranteed to be no longer the 99 characters*/ +/* Display string that is guaranteed to be no longer the 99 characters */ #define LINESIZE 99 - -#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c #define POLLJIFFIES_NORMALCHANNEL 10 -static bool initialized; /* stores whether bus_registration was successful */ +/* stores whether bus_registration was successful */ +static bool initialized; static struct dentry *visorbus_debugfs_dir; /* @@ -73,8 +71,62 @@ static LIST_HEAD(list_all_bus_instances); /* list of visor_device structs, linked via .list_all */ static LIST_HEAD(list_all_device_instances); -static int -visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env) +/* + * Generic function useful for validating any type of channel when it is + * received by the client that will be accessing the channel. + * Note that <logCtx> is only needed for callers in the EFI environment, and + * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages. + */ +int visor_check_channel(struct channel_header *ch, + uuid_le expected_uuid, + char *chname, + u64 expected_min_bytes, + u32 expected_version, + u64 expected_signature) +{ + if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) { + /* caller wants us to verify type GUID */ + if (uuid_le_cmp(ch->chtype, expected_uuid) != 0) { + pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n", + chname, &expected_uuid, + &expected_uuid, &ch->chtype); + return 0; + } + } + /* verify channel size */ + if (expected_min_bytes > 0) { + if (ch->size < expected_min_bytes) { + pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n", + chname, &expected_uuid, + (unsigned long long)expected_min_bytes, + ch->size); + return 0; + } + } + /* verify channel version */ + if (expected_version > 0) { + if (ch->version_id != expected_version) { + pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n", + chname, &expected_uuid, + (unsigned long)expected_version, + ch->version_id); + return 0; + } + } + /* verify channel signature */ + if (expected_signature > 0) { + if (ch->signature != expected_signature) { + pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n", + chname, &expected_uuid, + expected_signature, ch->signature); + return 0; + } + } + return 1; +} +EXPORT_SYMBOL_GPL(visor_check_channel); + +static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env) { struct visor_device *dev; uuid_le guid; @@ -94,8 +146,7 @@ visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env) * * Return: 1 iff the provided driver can control the specified device */ -static int -visorbus_match(struct device *xdev, struct device_driver *xdrv) +static int visorbus_match(struct device *xdev, struct device_driver *xdrv) { uuid_le channel_type; int i; @@ -103,9 +154,8 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv) struct visor_driver *drv; dev = to_visor_device(xdev); - drv = to_visor_driver(xdrv); channel_type = visorchannel_get_uuid(dev->visorchannel); - + drv = to_visor_driver(xdrv); if (!drv->channel_types) return 0; @@ -122,7 +172,7 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv) /* * This describes the TYPE of bus. - * (Don't confuse this with an INSTANCE of the bus.) + * (Don't confuse this with an INSTANCE of the bus.) */ struct bus_type visorbus_type = { .name = "visorbus", @@ -137,8 +187,7 @@ struct bus_type visorbus_type = { * involved with destroying the dev are complete * @xdev: struct device for the bus being released */ -static void -visorbus_release_busdevice(struct device *xdev) +static void visorbus_release_busdevice(struct device *xdev) { struct visor_device *dev = dev_get_drvdata(xdev); @@ -152,15 +201,11 @@ visorbus_release_busdevice(struct device *xdev) * each child device instance * @xdev: struct device for the visor device being released */ -static void -visorbus_release_device(struct device *xdev) +static void visorbus_release_device(struct device *xdev) { struct visor_device *dev = to_visor_device(xdev); - if (dev->visorchannel) { - visorchannel_destroy(dev->visorchannel); - dev->visorchannel = NULL; - } + visorchannel_destroy(dev->visorchannel); kfree(dev); } @@ -229,7 +274,7 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr, struct device_driver *xdrv = dev->driver; struct visor_driver *drv = NULL; - if (!xbus || !xdrv) + if (!xdrv) return 0; i = xbus->match(dev, xdrv); if (!i) @@ -240,23 +285,23 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(typename); static struct attribute *channel_attrs[] = { - &dev_attr_physaddr.attr, - &dev_attr_nbytes.attr, - &dev_attr_clientpartition.attr, - &dev_attr_typeguid.attr, - &dev_attr_zoneguid.attr, - &dev_attr_typename.attr, - NULL + &dev_attr_physaddr.attr, + &dev_attr_nbytes.attr, + &dev_attr_clientpartition.attr, + &dev_attr_typeguid.attr, + &dev_attr_zoneguid.attr, + &dev_attr_typename.attr, + NULL }; -static struct attribute_group channel_attr_grp = { - .name = "channel", - .attrs = channel_attrs, +static const struct attribute_group channel_attr_grp = { + .name = "channel", + .attrs = channel_attrs, }; static const struct attribute_group *visorbus_channel_groups[] = { - &channel_attr_grp, - NULL + &channel_attr_grp, + NULL }; /* end implementation of specific channel attributes */ @@ -270,7 +315,8 @@ static const struct attribute_group *visorbus_channel_groups[] = { static ssize_t partition_handle_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct visor_device *vdev = to_visor_device(dev); u64 handle = visorchannel_get_clientpartition(vdev->visorchannel); @@ -280,7 +326,8 @@ static DEVICE_ATTR_RO(partition_handle); static ssize_t partition_guid_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct visor_device *vdev = to_visor_device(dev); return sprintf(buf, "{%pUb}\n", &vdev->partition_uuid); @@ -289,7 +336,8 @@ static DEVICE_ATTR_RO(partition_guid); static ssize_t partition_name_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct visor_device *vdev = to_visor_device(dev); return sprintf(buf, "%s\n", vdev->name); @@ -298,7 +346,8 @@ static DEVICE_ATTR_RO(partition_name); static ssize_t channel_addr_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct visor_device *vdev = to_visor_device(dev); u64 addr = visorchannel_get_physaddr(vdev->visorchannel); @@ -308,7 +357,8 @@ static DEVICE_ATTR_RO(channel_addr); static ssize_t channel_bytes_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct visor_device *vdev = to_visor_device(dev); u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel); @@ -318,7 +368,8 @@ static DEVICE_ATTR_RO(channel_bytes); static ssize_t channel_id_show(struct device *dev, struct device_attribute *attr, - char *buf) { + char *buf) +{ struct visor_device *vdev = to_visor_device(dev); int len = 0; @@ -331,22 +382,22 @@ static ssize_t channel_id_show(struct device *dev, static DEVICE_ATTR_RO(channel_id); static struct attribute *dev_attrs[] = { - &dev_attr_partition_handle.attr, - &dev_attr_partition_guid.attr, - &dev_attr_partition_name.attr, - &dev_attr_channel_addr.attr, - &dev_attr_channel_bytes.attr, - &dev_attr_channel_id.attr, - NULL + &dev_attr_partition_handle.attr, + &dev_attr_partition_guid.attr, + &dev_attr_partition_name.attr, + &dev_attr_channel_addr.attr, + &dev_attr_channel_bytes.attr, + &dev_attr_channel_id.attr, + NULL }; -static struct attribute_group dev_attr_grp = { - .attrs = dev_attrs, +static const struct attribute_group dev_attr_grp = { + .attrs = dev_attrs, }; static const struct attribute_group *visorbus_groups[] = { - &dev_attr_grp, - NULL + &dev_attr_grp, + NULL }; /* @@ -355,6 +406,7 @@ static const struct attribute_group *visorbus_groups[] = { * define & implement display of debugfs attributes under * /sys/kernel/debug/visorbus/visorbus<n>. */ + /* * vbuschannel_print_devinfo() - format a struct visor_vbus_deviceinfo * and write it to a seq_file @@ -365,12 +417,12 @@ static const struct attribute_group *visorbus_groups[] = { * * Reads @devInfo, and writes it in human-readable notation to @seq. */ -static void -vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo, - struct seq_file *seq, int devix) +static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo, + struct seq_file *seq, int devix) { + /* uninitialized vbus device entry */ if (!isprint(devinfo->devtype[0])) - return; /* uninitialized vbus device entry */ + return; if (devix >= 0) seq_printf(seq, "[%d]", devix); @@ -392,12 +444,11 @@ vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo, static int client_bus_info_debugfs_show(struct seq_file *seq, void *v) { - struct visor_device *vdev = seq->private; - struct visorchannel *channel = vdev->visorchannel; - - int i; + int i = 0; unsigned long off; struct visor_vbus_deviceinfo dev_info; + struct visor_device *vdev = seq->private; + struct visorchannel *channel = vdev->visorchannel; if (!channel) return 0; @@ -406,6 +457,7 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v) "Client device / client driver info for %s partition (vbus #%u):\n", ((vdev->name) ? (char *)(vdev->name) : ""), vdev->chipset_bus_no); + if (visorchannel_read(channel, offsetof(struct visor_vbus_channel, chp_info), &dev_info, sizeof(dev_info)) >= 0) @@ -414,8 +466,8 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v) offsetof(struct visor_vbus_channel, bus_info), &dev_info, sizeof(dev_info)) >= 0) vbuschannel_print_devinfo(&dev_info, seq, -1); + off = offsetof(struct visor_vbus_channel, dev_info); - i = 0; while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) { if (visorchannel_read(channel, off, &dev_info, sizeof(dev_info)) >= 0) @@ -441,8 +493,7 @@ static const struct file_operations client_bus_info_debugfs_fops = { .release = single_release, }; -static void -dev_periodic_work(unsigned long __opaque) +static void dev_periodic_work(unsigned long __opaque) { struct visor_device *dev = (struct visor_device *)__opaque; struct visor_driver *drv = to_visor_driver(dev->device.driver); @@ -451,8 +502,7 @@ dev_periodic_work(unsigned long __opaque) mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL); } -static int -dev_start_periodic_work(struct visor_device *dev) +static int dev_start_periodic_work(struct visor_device *dev) { if (dev->being_removed || dev->timer_active) return -EINVAL; @@ -464,8 +514,7 @@ dev_start_periodic_work(struct visor_device *dev) return 0; } -static void -dev_stop_periodic_work(struct visor_device *dev) +static void dev_stop_periodic_work(struct visor_device *dev) { if (!dev->timer_active) return; @@ -484,40 +533,39 @@ dev_stop_periodic_work(struct visor_device *dev) * * Return: 0 iff successful */ -static int -visordriver_remove_device(struct device *xdev) +static int visordriver_remove_device(struct device *xdev) { struct visor_device *dev; struct visor_driver *drv; dev = to_visor_device(xdev); drv = to_visor_driver(xdev->driver); + mutex_lock(&dev->visordriver_callback_lock); dev->being_removed = true; - if (drv->remove) - drv->remove(dev); + drv->remove(dev); mutex_unlock(&dev->visordriver_callback_lock); - dev_stop_periodic_work(dev); + dev_stop_periodic_work(dev); put_device(&dev->device); + return 0; } -/** +/* * visorbus_unregister_visor_driver() - unregisters the provided driver * @drv: the driver to unregister * * A visor function driver calls this function to unregister the driver, * i.e., within its module_exit function. */ -void -visorbus_unregister_visor_driver(struct visor_driver *drv) +void visorbus_unregister_visor_driver(struct visor_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver); -/** +/* * visorbus_read_channel() - reads from the designated channel into * the provided buffer * @dev: the device whose channel is read from @@ -530,15 +578,14 @@ EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver); * * Return: integer indicating success (zero) or failure (non-zero) */ -int -visorbus_read_channel(struct visor_device *dev, unsigned long offset, - void *dest, unsigned long nbytes) +int visorbus_read_channel(struct visor_device *dev, unsigned long offset, + void *dest, unsigned long nbytes) { return visorchannel_read(dev->visorchannel, offset, dest, nbytes); } EXPORT_SYMBOL_GPL(visorbus_read_channel); -/** +/* * visorbus_write_channel() - writes the provided buffer into the designated * channel * @dev: the device whose channel is written to @@ -551,15 +598,14 @@ EXPORT_SYMBOL_GPL(visorbus_read_channel); * * Return: integer indicating success (zero) or failure (non-zero) */ -int -visorbus_write_channel(struct visor_device *dev, unsigned long offset, - void *src, unsigned long nbytes) +int visorbus_write_channel(struct visor_device *dev, unsigned long offset, + void *src, unsigned long nbytes) { return visorchannel_write(dev->visorchannel, offset, src, nbytes); } EXPORT_SYMBOL_GPL(visorbus_write_channel); -/** +/* * visorbus_enable_channel_interrupts() - enables interrupts on the * designated device * @dev: the device on which to enable interrupts @@ -567,8 +613,7 @@ EXPORT_SYMBOL_GPL(visorbus_write_channel); * Currently we don't yet have a real interrupt, so for now we just call the * interrupt function periodically via a timer. */ -int -visorbus_enable_channel_interrupts(struct visor_device *dev) +int visorbus_enable_channel_interrupts(struct visor_device *dev) { struct visor_driver *drv = to_visor_driver(dev->device.driver); @@ -581,13 +626,12 @@ visorbus_enable_channel_interrupts(struct visor_device *dev) } EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts); -/** +/* * visorbus_disable_channel_interrupts() - disables interrupts on the * designated device * @dev: the device on which to disable interrupts */ -void -visorbus_disable_channel_interrupts(struct visor_device *dev) +void visorbus_disable_channel_interrupts(struct visor_device *dev) { dev_stop_periodic_work(dev); } @@ -616,8 +660,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts); * Return: 0 if successful, otherwise the negative value returned by * device_add() indicating the reason for failure */ -static int -create_visor_device(struct visor_device *dev) +static int create_visor_device(struct visor_device *dev) { int err; u32 chipset_bus_no = dev->chipset_bus_no; @@ -664,7 +707,8 @@ create_visor_device(struct visor_device *dev) goto err_put; list_add_tail(&dev->list_all, &list_all_device_instances); - return 0; /* success: reference kept via unmatched get_device() */ + /* success: reference kept via unmatched get_device() */ + return 0; err_put: put_device(&dev->device); @@ -672,17 +716,15 @@ err_put: return err; } -static void -remove_visor_device(struct visor_device *dev) +static void remove_visor_device(struct visor_device *dev) { list_del(&dev->list_all); put_device(&dev->device); device_unregister(&dev->device); } -static int -get_vbus_header_info(struct visorchannel *chan, - struct visor_vbus_headerinfo *hdr_info) +static int get_vbus_header_info(struct visorchannel *chan, + struct visor_vbus_headerinfo *hdr_info) { int err; @@ -691,7 +733,7 @@ get_vbus_header_info(struct visorchannel *chan, "vbus", sizeof(struct visor_vbus_channel), VISOR_VBUS_CHANNEL_VERSIONID, - VISOR_VBUS_CHANNEL_SIGNATURE)) + VISOR_CHANNEL_SIGNATURE)) return -EINVAL; err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info, @@ -722,10 +764,9 @@ get_vbus_header_info(struct visorchannel *chan, * Returns no value since this is debug information and not needed for * device functionality. */ -static void -write_vbus_chp_info(struct visorchannel *chan, - struct visor_vbus_headerinfo *hdr_info, - struct visor_vbus_deviceinfo *info) +static void write_vbus_chp_info(struct visorchannel *chan, + struct visor_vbus_headerinfo *hdr_info, + struct visor_vbus_deviceinfo *info) { int off = sizeof(struct channel_header) + hdr_info->chp_info_offset; @@ -748,10 +789,9 @@ write_vbus_chp_info(struct visorchannel *chan, * Returns no value since this is debug information and not needed for * device functionality. */ -static void -write_vbus_bus_info(struct visorchannel *chan, - struct visor_vbus_headerinfo *hdr_info, - struct visor_vbus_deviceinfo *info) +static void write_vbus_bus_info(struct visorchannel *chan, + struct visor_vbus_headerinfo *hdr_info, + struct visor_vbus_deviceinfo *info) { int off = sizeof(struct channel_header) + hdr_info->bus_info_offset; @@ -775,10 +815,10 @@ write_vbus_bus_info(struct visorchannel *chan, * Returns no value since this is debug information and not needed for * device functionality. */ -static void -write_vbus_dev_info(struct visorchannel *chan, - struct visor_vbus_headerinfo *hdr_info, - struct visor_vbus_deviceinfo *info, unsigned int devix) +static void write_vbus_dev_info(struct visorchannel *chan, + struct visor_vbus_headerinfo *hdr_info, + struct visor_vbus_deviceinfo *info, + unsigned int devix) { int off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) + @@ -807,14 +847,13 @@ static void bus_device_info_init( } /* - * fix_vbus_dev_info() - for a child device just created on a client bus, fill - * in information about the driver that is controlling - * this device into the appropriate slot within the - * vbus channel of the bus instance + * publish_vbus_dev_info() - for a child device just created on a client bus, + * fill in information about the driver that is + * controlling this device into the appropriate slot + * within the vbus channel of the bus instance * @visordev: struct visor_device for the desired device */ -static void -fix_vbus_dev_info(struct visor_device *visordev) +static void publish_vbus_dev_info(struct visor_device *visordev) { int i; struct visor_device *bdev; @@ -853,7 +892,6 @@ fix_vbus_dev_info(struct visor_device *visordev) bus_device_info_init(&dev_info, chan_type_name, visordrv->name); write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no); - write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo); write_vbus_bus_info(bdev->visorchannel, hdr_info, &clientbus_driverinfo); @@ -874,18 +912,14 @@ fix_vbus_dev_info(struct visor_device *visordev) * was successful with this device, otherwise a negative errno * value indicating failure reason */ -static int -visordriver_probe_device(struct device *xdev) +static int visordriver_probe_device(struct device *xdev) { int res; struct visor_driver *drv; struct visor_device *dev; - drv = to_visor_driver(xdev->driver); dev = to_visor_device(xdev); - - if (!drv->probe) - return -ENODEV; + drv = to_visor_driver(xdev->driver); mutex_lock(&dev->visordriver_callback_lock); dev->being_removed = false; @@ -894,14 +928,14 @@ visordriver_probe_device(struct device *xdev) if (res >= 0) { /* success: reference kept via unmatched get_device() */ get_device(&dev->device); - fix_vbus_dev_info(dev); + publish_vbus_dev_info(dev); } mutex_unlock(&dev->visordriver_callback_lock); return res; } -/** +/* * visorbus_register_visor_driver() - registers the provided visor driver * for handling one or more visor device * types (channel_types) @@ -952,8 +986,21 @@ visordriver_probe_device(struct device *xdev) */ int visorbus_register_visor_driver(struct visor_driver *drv) { + /* can't register on a nonexistent bus */ if (!initialized) - return -ENODEV; /* can't register on a nonexistent bus */ + return -ENODEV; + + if (!drv->probe) + return -ENODEV; + + if (!drv->remove) + return -ENODEV; + + if (!drv->pause) + return -ENODEV; + + if (!drv->resume) + return -ENODEV; drv->driver.name = drv->name; drv->driver.bus = &visorbus_type; @@ -985,8 +1032,7 @@ EXPORT_SYMBOL_GPL(visorbus_register_visor_driver); * Return: 0 for success, otherwise negative errno value indicating reason for * failure */ -static int -visorbus_create_instance(struct visor_device *dev) +static int visorbus_create_instance(struct visor_device *dev) { int id = dev->chipset_bus_no; int err; @@ -1030,7 +1076,7 @@ visorbus_create_instance(struct visor_device *dev) err_debugfs_dir: debugfs_remove_recursive(dev->debugfs_dir); kfree(hdr_info); - dev_err(&dev->device, "visorbus_create_instance failed: %d\n", err); + dev_err(&dev->device, "%s failed: %d\n", __func__, err); return err; } @@ -1038,8 +1084,7 @@ err_debugfs_dir: * visorbus_remove_instance() - remove a device instance for the visorbus itself * @dev: struct visor_device indentifying the bus to remove */ -static void -visorbus_remove_instance(struct visor_device *dev) +static void visorbus_remove_instance(struct visor_device *dev) { /* * Note that this will result in the release method for @@ -1049,10 +1094,7 @@ visorbus_remove_instance(struct visor_device *dev) * successfully been able to trace thru the code to see where/how * release() gets called. But I know it does. */ - if (dev->visorchannel) { - visorchannel_destroy(dev->visorchannel); - dev->visorchannel = NULL; - } + visorchannel_destroy(dev->visorchannel); kfree(dev->vbus_hdr_info); list_del(&dev->list_all); device_unregister(&dev->device); @@ -1061,8 +1103,7 @@ visorbus_remove_instance(struct visor_device *dev) /* * remove_all_visor_devices() - remove all child visorbus device instances */ -static void -remove_all_visor_devices(void) +static void remove_all_visor_devices(void) { struct list_head *listentry, *listtmp; @@ -1074,13 +1115,11 @@ remove_all_visor_devices(void) } } -int -visorchipset_bus_create(struct visor_device *dev) +int visorchipset_bus_create(struct visor_device *dev) { int err; err = visorbus_create_instance(dev); - if (err < 0) return err; @@ -1089,15 +1128,13 @@ visorchipset_bus_create(struct visor_device *dev) return 0; } -void -visorchipset_bus_destroy(struct visor_device *dev) +void visorchipset_bus_destroy(struct visor_device *dev) { visorbus_remove_instance(dev); visorbus_destroy_response(dev, 0); } -int -visorchipset_device_create(struct visor_device *dev_info) +int visorchipset_device_create(struct visor_device *dev_info) { int err; @@ -1110,11 +1147,9 @@ visorchipset_device_create(struct visor_device *dev_info) return 0; } -void -visorchipset_device_destroy(struct visor_device *dev_info) +void visorchipset_device_destroy(struct visor_device *dev_info) { remove_visor_device(dev_info); - visorbus_device_destroy_response(dev_info, 0); } @@ -1127,14 +1162,12 @@ visorchipset_device_destroy(struct visor_device *dev_info) * @status: 0 iff the pause state change completed successfully, otherwise * a negative errno value indicating the reason for failure */ -static void -pause_state_change_complete(struct visor_device *dev, int status) +static void pause_state_change_complete(struct visor_device *dev, int status) { if (!dev->pausing) return; dev->pausing = false; - visorbus_device_pause_response(dev, status); } @@ -1147,8 +1180,7 @@ pause_state_change_complete(struct visor_device *dev, int status) * @status: 0 iff the resume state change completed successfully, otherwise * a negative errno value indicating the reason for failure */ -static void -resume_state_change_complete(struct visor_device *dev, int status) +static void resume_state_change_complete(struct visor_device *dev, int status) { if (!dev->resuming) return; @@ -1174,9 +1206,8 @@ resume_state_change_complete(struct visor_device *dev, int status) * via a callback function; see pause_state_change_complete() and * resume_state_change_complete(). */ -static int -visorchipset_initiate_device_pause_resume(struct visor_device *dev, - bool is_pause) +static int visorchipset_initiate_device_pause_resume(struct visor_device *dev, + bool is_pause) { int err; struct visor_driver *drv = NULL; @@ -1189,19 +1220,14 @@ visorchipset_initiate_device_pause_resume(struct visor_device *dev, return -EBUSY; if (is_pause) { - if (!drv->pause) - return -EINVAL; - dev->pausing = true; err = drv->pause(dev, pause_state_change_complete); } else { - /* The vbus_dev_info structure in the channel was been - * cleared, make sure it is valid. + /* + * The vbus_dev_info structure in the channel was been cleared, + * make sure it is valid. */ - fix_vbus_dev_info(dev); - if (!drv->resume) - return -EINVAL; - + publish_vbus_dev_info(dev); dev->resuming = true; err = drv->resume(dev, resume_state_change_complete); } @@ -1209,7 +1235,7 @@ visorchipset_initiate_device_pause_resume(struct visor_device *dev, return err; } -/** +/* * visorchipset_device_pause() - start a pause operation for a visor device * @dev_info: struct visor_device identifying the device being paused * @@ -1217,13 +1243,11 @@ visorchipset_initiate_device_pause_resume(struct visor_device *dev, * that device. Success/failure result is returned asynchronously * via a callback function; see pause_state_change_complete(). */ -int -visorchipset_device_pause(struct visor_device *dev_info) +int visorchipset_device_pause(struct visor_device *dev_info) { int err; err = visorchipset_initiate_device_pause_resume(dev_info, true); - if (err < 0) { dev_info->pausing = false; return err; @@ -1232,7 +1256,7 @@ visorchipset_device_pause(struct visor_device *dev_info) return 0; } -/** +/* * visorchipset_device_resume() - start a resume operation for a visor device * @dev_info: struct visor_device identifying the device being resumed * @@ -1240,13 +1264,11 @@ visorchipset_device_pause(struct visor_device *dev_info) * that device. Success/failure result is returned asynchronously * via a callback function; see resume_state_change_complete(). */ -int -visorchipset_device_resume(struct visor_device *dev_info) +int visorchipset_device_resume(struct visor_device *dev_info) { int err; err = visorchipset_initiate_device_pause_resume(dev_info, false); - if (err < 0) { dev_info->resuming = false; return err; @@ -1255,8 +1277,7 @@ visorchipset_device_resume(struct visor_device *dev_info) return 0; } -int -visorbus_init(void) +int visorbus_init(void) { int err; @@ -1271,14 +1292,12 @@ visorbus_init(void) return err; initialized = true; - bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset"); return 0; } -void -visorbus_exit(void) +void visorbus_exit(void) { struct list_head *listentry, *listtmp; diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h index 98a5af19189d..7ccf7565eb2c 100644 --- a/drivers/staging/unisys/visorbus/visorbus_private.h +++ b/drivers/staging/unisys/visorbus/visorbus_private.h @@ -23,10 +23,6 @@ #include "controlvmchannel.h" #include "vbuschannel.h" -/* TARGET_HOSTNAME specified as -DTARGET_HOSTNAME=\"thename\" on the - * command line - */ - int visorchipset_bus_create(struct visor_device *bus_info); void visorchipset_bus_destroy(struct visor_device *bus_info); int visorchipset_device_create(struct visor_device *dev_info); diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c index 6885c2cb7135..c7eea655a86e 100644 --- a/drivers/staging/unisys/visorbus/visorchannel.c +++ b/drivers/staging/unisys/visorbus/visorchannel.c @@ -26,7 +26,7 @@ #include "visorbus_private.h" #include "controlvmchannel.h" -#define MYDRVNAME "visorchannel" +#define VISOR_DRV_NAME "visorchannel" #define VISOR_CONSOLEVIDEO_CHANNEL_GUID \ UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \ @@ -41,17 +41,20 @@ struct visorchannel { bool requested; struct channel_header chan_hdr; uuid_le guid; - bool needs_lock; /* channel creator knows if more than one */ - /* thread will be inserting or removing */ - spinlock_t insert_lock; /* protect head writes in chan_hdr */ - spinlock_t remove_lock; /* protect tail writes in chan_hdr */ - + /* + * channel creator knows if more than one + * thread will be inserting or removing + */ + bool needs_lock; + /* protect head writes in chan_hdr */ + spinlock_t insert_lock; + /* protect tail writes in chan_hdr */ + spinlock_t remove_lock; uuid_le type; uuid_le inst; }; -void -visorchannel_destroy(struct visorchannel *channel) +void visorchannel_destroy(struct visorchannel *channel) { if (!channel) return; @@ -63,46 +66,39 @@ visorchannel_destroy(struct visorchannel *channel) kfree(channel); } -u64 -visorchannel_get_physaddr(struct visorchannel *channel) +u64 visorchannel_get_physaddr(struct visorchannel *channel) { return channel->physaddr; } -ulong -visorchannel_get_nbytes(struct visorchannel *channel) +ulong visorchannel_get_nbytes(struct visorchannel *channel) { return channel->nbytes; } -char * -visorchannel_uuid_id(uuid_le *guid, char *s) +char *visorchannel_uuid_id(uuid_le *guid, char *s) { sprintf(s, "%pUL", guid); return s; } -char * -visorchannel_id(struct visorchannel *channel, char *s) +char *visorchannel_id(struct visorchannel *channel, char *s) { return visorchannel_uuid_id(&channel->guid, s); } -char * -visorchannel_zoneid(struct visorchannel *channel, char *s) +char *visorchannel_zoneid(struct visorchannel *channel, char *s) { return visorchannel_uuid_id(&channel->chan_hdr.zone_uuid, s); } -u64 -visorchannel_get_clientpartition(struct visorchannel *channel) +u64 visorchannel_get_clientpartition(struct visorchannel *channel) { return channel->chan_hdr.partition_handle; } -int -visorchannel_set_clientpartition(struct visorchannel *channel, - u64 partition_handle) +int visorchannel_set_clientpartition(struct visorchannel *channel, + u64 partition_handle) { channel->chan_hdr.partition_handle = partition_handle; return 0; @@ -114,16 +110,14 @@ visorchannel_set_clientpartition(struct visorchannel *channel, * * Return: the UUID of the provided channel */ -uuid_le -visorchannel_get_uuid(struct visorchannel *channel) +uuid_le visorchannel_get_uuid(struct visorchannel *channel) { return channel->guid; } EXPORT_SYMBOL_GPL(visorchannel_get_uuid); -int -visorchannel_read(struct visorchannel *channel, ulong offset, - void *dest, ulong nbytes) +int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest, + ulong nbytes) { if (offset + nbytes > channel->nbytes) return -EIO; @@ -133,9 +127,8 @@ visorchannel_read(struct visorchannel *channel, ulong offset, return 0; } -int -visorchannel_write(struct visorchannel *channel, ulong offset, - void *dest, ulong nbytes) +int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest, + ulong nbytes) { size_t chdr_size = sizeof(struct channel_header); size_t copy_size; @@ -154,8 +147,7 @@ visorchannel_write(struct visorchannel *channel, ulong offset, return 0; } -void * -visorchannel_get_header(struct visorchannel *channel) +void *visorchannel_get_header(struct visorchannel *channel) { return &channel->chan_hdr; } @@ -187,9 +179,8 @@ visorchannel_get_header(struct visorchannel *channel) &((sig_hdr)->FIELD), \ sizeof((sig_hdr)->FIELD)) -static int -sig_read_header(struct visorchannel *channel, u32 queue, - struct signal_queue_header *sig_hdr) +static int sig_read_header(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr) { if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header)) return -EINVAL; @@ -200,9 +191,9 @@ sig_read_header(struct visorchannel *channel, u32 queue, sig_hdr, sizeof(struct signal_queue_header)); } -static int -sig_read_data(struct visorchannel *channel, u32 queue, - struct signal_queue_header *sig_hdr, u32 slot, void *data) +static int sig_read_data(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr, u32 slot, + void *data) { int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, sig_hdr, slot); @@ -211,9 +202,9 @@ sig_read_data(struct visorchannel *channel, u32 queue, data, sig_hdr->signal_size); } -static int -sig_write_data(struct visorchannel *channel, u32 queue, - struct signal_queue_header *sig_hdr, u32 slot, void *data) +static int sig_write_data(struct visorchannel *channel, u32 queue, + struct signal_queue_header *sig_hdr, u32 slot, + void *data) { int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, sig_hdr, slot); @@ -222,8 +213,8 @@ sig_write_data(struct visorchannel *channel, u32 queue, data, sig_hdr->signal_size); } -static int -signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) +static int signalremove_inner(struct visorchannel *channel, u32 queue, + void *msg) { struct signal_queue_header sig_hdr; int error; @@ -246,9 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) /* * For each data field in SIGNAL_QUEUE_HEADER that was modified, - * update host memory. + * update host memory. Required for channel sync. */ - mb(); /* required for channel synch */ + mb(); error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail); if (error) @@ -269,8 +260,8 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) * * Return: integer error code indicating the status of the removal */ -int -visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg) +int visorchannel_signalremove(struct visorchannel *channel, u32 queue, + void *msg) { int rc; unsigned long flags; @@ -287,8 +278,7 @@ visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg) } EXPORT_SYMBOL_GPL(visorchannel_signalremove); -static bool -queue_empty(struct visorchannel *channel, u32 queue) +static bool queue_empty(struct visorchannel *channel, u32 queue) { struct signal_queue_header sig_hdr; @@ -307,8 +297,7 @@ queue_empty(struct visorchannel *channel, u32 queue) * Return: boolean indicating whether any messages in the designated * channel/queue are present */ -bool -visorchannel_signalempty(struct visorchannel *channel, u32 queue) +bool visorchannel_signalempty(struct visorchannel *channel, u32 queue) { bool rc; unsigned long flags; @@ -324,8 +313,8 @@ visorchannel_signalempty(struct visorchannel *channel, u32 queue) } EXPORT_SYMBOL_GPL(visorchannel_signalempty); -static int -signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) +static int signalinsert_inner(struct visorchannel *channel, u32 queue, + void *msg) { struct signal_queue_header sig_hdr; int err; @@ -351,9 +340,9 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) /* * For each data field in SIGNAL_QUEUE_HEADER that was modified, - * update host memory. + * update host memory. Required for channel sync. */ - mb(); /* required for channel synch */ + mb(); err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head); if (err) @@ -388,9 +377,11 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) * Return: pointer to visorchannel that was created if successful, * otherwise NULL */ -static struct visorchannel * -visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, - gfp_t gfp, uuid_le guid, bool needs_lock) +static struct visorchannel *visorchannel_create_guts( + u64 physaddr, + unsigned long channel_bytes, + gfp_t gfp, uuid_le guid, + bool needs_lock) { struct visorchannel *channel; int err; @@ -414,7 +405,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, * this. Remember that we haven't requested it so we don't try to * release later on. */ - channel->requested = request_mem_region(physaddr, size, MYDRVNAME); + channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME); if (!channel->requested && uuid_le_cmp(guid, visor_video_guid)) /* we only care about errors if this is not the video channel */ goto err_destroy_channel; @@ -444,7 +435,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, release_mem_region(channel->physaddr, channel->nbytes); channel->mapped = NULL; channel->requested = request_mem_region(channel->physaddr, - channel_bytes, MYDRVNAME); + channel_bytes, VISOR_DRV_NAME); if (!channel->requested && uuid_le_cmp(guid, visor_video_guid)) /* we only care about errors if this is not the video channel */ goto err_destroy_channel; @@ -465,17 +456,17 @@ err_destroy_channel: return NULL; } -struct visorchannel * -visorchannel_create(u64 physaddr, unsigned long channel_bytes, - gfp_t gfp, uuid_le guid) +struct visorchannel *visorchannel_create(u64 physaddr, + unsigned long channel_bytes, + gfp_t gfp, uuid_le guid) { return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid, false); } -struct visorchannel * -visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes, - gfp_t gfp, uuid_le guid) +struct visorchannel *visorchannel_create_with_lock(u64 physaddr, + unsigned long channel_bytes, + gfp_t gfp, uuid_le guid) { return visorchannel_create_guts(physaddr, channel_bytes, gfp, guid, true); @@ -490,8 +481,8 @@ visorchannel_create_with_lock(u64 physaddr, unsigned long channel_bytes, * * Return: integer error code indicating the status of the insertion */ -int -visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg) +int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, + void *msg) { int rc; unsigned long flags; diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index 22150564b4fb..6d4498f4de66 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c @@ -15,11 +15,7 @@ */ #include <linux/acpi.h> -#include <linux/ctype.h> -#include <linux/fs.h> #include <linux/mm.h> -#include <linux/nls.h> -#include <linux/netdevice.h> #include <linux/uuid.h> #include <linux/crash_dump.h> @@ -27,8 +23,6 @@ #include "visorbus_private.h" #include "vmcallinterface.h" -#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c - #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100 @@ -124,7 +118,6 @@ static ssize_t toolaction_store(struct device *dev, offsetof(struct visor_controlvm_channel, tool_action), &tool_action, sizeof(u8)); - if (err) return err; return count; @@ -143,7 +136,6 @@ static ssize_t boottotool_show(struct device *dev, efi_visor_ind), &efi_visor_indication, sizeof(struct efi_visor_indication)); - if (err) return err; return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool); @@ -165,7 +157,6 @@ static ssize_t boottotool_store(struct device *dev, efi_visor_ind), &(efi_visor_indication), sizeof(struct efi_visor_indication)); - if (err) return err; return count; @@ -302,18 +293,21 @@ parser_string_get(struct parser_context *ctx) int i; pscan = ctx->curr; + if (!pscan) + return NULL; nscan = ctx->bytes_remaining; if (nscan == 0) return NULL; - if (!pscan) - return NULL; + for (i = 0, value_length = -1; i < nscan; i++) if (pscan[i] == '\0') { value_length = i; break; } - if (value_length < 0) /* '\0' was not included in the length */ + /* '\0' was not included in the length */ + if (value_length < 0) value_length = nscan; + value = kmalloc(value_length + 1, GFP_KERNEL); if (!value) return NULL; @@ -364,9 +358,9 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no, struct device *dev_start = NULL; struct visor_device *vdev = NULL; struct visor_busdev id = { - .bus_no = bus_no, - .dev_no = dev_no - }; + .bus_no = bus_no, + .dev_no = dev_no + }; if (from) dev_start = &from->device; @@ -580,7 +574,7 @@ visorbus_create(struct controlvm_message *inmsg) bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (bus_info && (bus_info->state.created == 1)) { dev_err(&chipset_dev->acpi_device->dev, - "failed visorbus_create: already exists\n"); + "failed %s: already exists\n", __func__); err = -EEXIST; goto err_respond; } @@ -618,11 +612,11 @@ visorbus_create(struct controlvm_message *inmsg) cmd->create_bus.channel_bytes, GFP_KERNEL, cmd->create_bus.bus_data_type_uuid); - if (!visorchannel) { err = -ENOMEM; goto err_free_pending_msg; } + bus_info->visorchannel = visorchannel; /* Response will be handled by visorchipset_bus_create */ @@ -757,7 +751,6 @@ visorbus_device_create(struct controlvm_message *inmsg) err = -ENODEV; goto err_respond; } - if (bus_info->state.created == 0) { dev_err(&chipset_dev->acpi_device->dev, "bus not created, id: %d\n", bus_no); @@ -791,7 +784,6 @@ visorbus_device_create(struct controlvm_message *inmsg) cmd->create_device.channel_bytes, GFP_KERNEL, cmd->create_device.data_type_uuid); - if (!visorchannel) { dev_err(&chipset_dev->acpi_device->dev, "failed to create visorchannel: %d/%d\n", @@ -918,7 +910,6 @@ visorbus_device_destroy(struct controlvm_message *inmsg) err = -EINVAL; goto err_respond; } - if (dev_info->pending_msg_hdr) { /* only non-NULL if dev is still waiting on a response */ err = -EIO; @@ -936,6 +927,7 @@ visorbus_device_destroy(struct controlvm_message *inmsg) dev_info->pending_msg_hdr = pmsg_hdr; } + kfree(dev_info->name); visorchipset_device_destroy(dev_info); return 0; @@ -954,8 +946,7 @@ err_respond: * disable the specified device. The udev script then writes to * /sys/devices/platform/visorchipset/parahotplug, which causes the * parahotplug store functions to get called, at which point the - * appropriate CONTROLVM message is retrieved from the list and responded - * to. + * appropriate CONTROLVM message is retrieved from the list and responded to. */ #define PARAHOTPLUG_TIMEOUT_MS 2000 @@ -1023,7 +1014,8 @@ parahotplug_request_destroy(struct parahotplug_request *req) } static LIST_HEAD(parahotplug_request_list); -static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */ +/* lock for above */ +static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* * parahotplug_request_complete() - mark request as complete @@ -1146,7 +1138,7 @@ static struct attribute *visorchipset_parahotplug_attrs[] = { NULL }; -static struct attribute_group visorchipset_parahotplug_group = { +static const struct attribute_group visorchipset_parahotplug_group = { .name = "parahotplug", .attrs = visorchipset_parahotplug_attrs }; @@ -1201,7 +1193,6 @@ parahotplug_process_message(struct controlvm_message *inmsg) int err; req = parahotplug_request_create(inmsg); - if (!req) return -ENOMEM; @@ -1295,10 +1286,9 @@ chipset_selftest_uevent(struct controlvm_message_header *msg_hdr) static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr) { - int res; - - res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, + int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_OFFLINE); + if (msg_hdr->flags.response_expected) controlvm_respond(msg_hdr, res, NULL); @@ -1321,13 +1311,12 @@ static int unisys_vmcall(unsigned long tuple, unsigned long param) __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) : "a"(tuple), "b"(reg_ebx), "c"(reg_ecx)); - if (result) goto error; return 0; - -error: /* Need to convert from VMCALL error codes to Linux */ +/* Need to convert from VMCALL error codes to Linux */ +error: switch (result) { case VMCALL_RESULT_INVALID_PARAM: return -EINVAL; @@ -1513,10 +1502,11 @@ visorbus_device_resume_response(struct visor_device *dev_info, int response) } static struct parser_context * -parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) +parser_init_byte_stream(u64 addr, u32 bytes, bool *retry) { int allocbytes = sizeof(struct parser_context) + bytes; struct parser_context *ctx; + void *mapping; *retry = false; @@ -1541,22 +1531,11 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) ctx->curr = NULL; ctx->bytes_remaining = 0; ctx->byte_stream = false; - if (local) { - void *p; - - if (addr > virt_to_phys(high_memory - 1)) - goto err_finish_ctx; - p = __va((unsigned long)(addr)); - memcpy(ctx->data, p, bytes); - } else { - void *mapping = memremap(addr, bytes, MEMREMAP_WB); - - if (!mapping) - goto err_finish_ctx; - memcpy(ctx->data, mapping, bytes); - memunmap(mapping); - } - + mapping = memremap(addr, bytes, MEMREMAP_WB); + if (!mapping) + goto err_finish_ctx; + memcpy(ctx->data, mapping, bytes); + memunmap(mapping); ctx->byte_stream = true; chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes; @@ -1587,12 +1566,10 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr) u64 parm_addr; u32 parm_bytes; struct parser_context *parser_ctx = NULL; - bool local_addr; struct controlvm_message ackmsg; int err = 0; /* create parsing context if necessary */ - local_addr = (inmsg.hdr.flags.test_message == 1); parm_addr = channel_addr + inmsg.hdr.payload_vm_offset; parm_bytes = inmsg.hdr.payload_bytes; @@ -1605,21 +1582,16 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr) bool retry = false; parser_ctx = - parser_init_byte_stream(parm_addr, parm_bytes, - local_addr, &retry); + parser_init_byte_stream(parm_addr, parm_bytes, &retry); if (!parser_ctx && retry) return -EAGAIN; } + controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS); + err = visorchannel_signalinsert(chipset_dev->controlvm_channel, + CONTROLVM_QUEUE_ACK, &ackmsg); + if (err) + return err; - if (!local_addr) { - controlvm_init_response(&ackmsg, &inmsg.hdr, - CONTROLVM_RESP_SUCCESS); - err = visorchannel_signalinsert(chipset_dev->controlvm_channel, - CONTROLVM_QUEUE_ACK, - &ackmsg); - if (err) - return err; - } switch (inmsg.hdr.id) { case CONTROLVM_CHIPSET_INIT: err = chipset_init(&inmsg); @@ -1692,9 +1664,7 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr) static int read_controlvm_event(struct controlvm_message *msg) { - int err; - - err = visorchannel_signalremove(chipset_dev->controlvm_channel, + int err = visorchannel_signalremove(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_EVENT, msg); if (err) return err; @@ -1829,12 +1799,10 @@ visorchipset_init(struct acpi_device *acpi_device) goto error; acpi_device->driver_data = chipset_dev; - chipset_dev->acpi_device = acpi_device; chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; controlvm_channel = visorchannel_create_with_lock(addr, 0, GFP_KERNEL, uuid); - if (!controlvm_channel) goto error_free_chipset_dev; @@ -1845,8 +1813,12 @@ visorchipset_init(struct acpi_device *acpi_device) if (err < 0) goto error_destroy_channel; - if (!VISOR_CONTROLVM_CHANNEL_OK_CLIENT( - visorchannel_get_header(controlvm_channel))) + if (!visor_check_channel(visorchannel_get_header(controlvm_channel), + VISOR_CONTROLVM_CHANNEL_UUID, + "controlvm", + sizeof(struct visor_controlvm_channel), + VISOR_CONTROLVM_CHANNEL_VERSIONID, + VISOR_CHANNEL_SIGNATURE)) goto error_delete_groups; /* if booting in a crash kernel */ diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h index cc70e1b16bda..541911bcb6f0 100644 --- a/drivers/staging/unisys/visorbus/vmcallinterface.h +++ b/drivers/staging/unisys/visorbus/vmcallinterface.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010 - 2015 UNISYS CORPORATION +/* + * Copyright (C) 2010 - 2015 UNISYS CORPORATION * All rights reserved. * * This program is free software; you can redistribute it and/or modify it @@ -15,20 +16,19 @@ #ifndef __VMCALLINTERFACE_H__ #define __VMCALLINTERFACE_H__ -enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */ - /* Note: when a new VMCALL is added: - * - the 1st 2 hex digits correspond to one of the - * VMCALL_MONITOR_INTERFACE types and - * - the next 2 hex digits are the nth relative instance of within a - * type - * E.G. for VMCALL_VIRTPART_RECYCLE_PART, - * - the 0x02 identifies it as a VMCALL_VIRTPART type and - * - the 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART - * type of VMCALL - */ - /* used by all Guests, not just IO */ - VMCALL_CONTROLVM_ADDR = 0x0501, -}; +/* + * VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. + * + * Note: When a new VMCALL is added: + * - The 1st 2 hex digits correspond to one of the VMCALL_MONITOR_INTERFACE + * types. + * - The next 2 hex digits are the nth relative instance of within a type. + * E.G. for VMCALL_VIRTPART_RECYCLE_PART, + * - The 0x02 identifies it as a VMCALL_VIRTPART type. + * - The 0x01 identifies it as the 1st instance of a VMCALL_VIRTPART type of + * VMCALL. + */ +#define VMCALL_CONTROLVM_ADDR 0x0501 enum vmcall_result { VMCALL_RESULT_SUCCESS = 0, @@ -39,16 +39,23 @@ enum vmcall_result { VMCALL_RESULT_DEVICE_NOT_READY = 5 }; -/* Structures for IO VMCALLs */ -/* Parameters to VMCALL_CONTROLVM_ADDR interface */ +/* + * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has + * parameters to VMCALL_CONTROLVM_ADDR + * interface. + * @address: The Guest-relative physical address of the ControlVm channel. + * This VMCall fills this in with the appropriate address. + * Contents provided by this VMCALL (OUT). + * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills + * this in with the appropriate address. Contents provided by + * this VMCALL (OUT). + * @unused: Unused Bytes in the 64-Bit Aligned Struct. + */ struct vmcall_io_controlvm_addr_params { - /* The Guest-relative physical address of the ControlVm channel. */ - /* This VMCall fills this in with the appropriate address. */ - u64 address; /* contents provided by this VMCALL (OUT) */ - /* the size of the ControlVm channel in bytes This VMCall fills this */ - /* in with the appropriate address. */ - u32 channel_bytes; /* contents provided by this VMCALL (OUT) */ - u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */ + u64 address; + u32 channel_bytes; + u8 unused[4]; } __packed; -#endif /* __VMCALLINTERFACE_H__ */ +/* __VMCALLINTERFACE_H__ */ +#endif diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c index a6e7a6bbc428..178d0227aa80 100644 --- a/drivers/staging/unisys/visorhba/visorhba_main.c +++ b/drivers/staging/unisys/visorhba/visorhba_main.c @@ -48,7 +48,8 @@ MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_UUID_STR); struct visordisk_info { u32 valid; - u32 channel, id, lun; /* Disk Path */ + /* Disk Path */ + u32 channel, id, lun; atomic_t ios_threshold; atomic_t error_count; struct visordisk_info *next; @@ -56,8 +57,10 @@ struct visordisk_info { struct scsipending { struct uiscmdrsp cmdrsp; - void *sent; /* The Data being tracked */ - char cmdtype; /* Type of pointer that is being stored */ + /* The Data being tracked */ + void *sent; + /* Type of pointer that is being stored */ + char cmdtype; }; /* Each scsi_host has a host_data area that contains this struct. */ @@ -71,7 +74,8 @@ struct visorhba_devdata { struct scsipending pending[MAX_PENDING_REQUESTS]; /* Start search for next pending free slot here */ unsigned int nextinsert; - spinlock_t privlock; /* lock to protect data in devdata */ + /* lock to protect data in devdata */ + spinlock_t privlock; bool serverdown; bool serverchangingstate; unsigned long long acquire_failed_cnt; @@ -108,18 +112,18 @@ struct visorhba_devices_open { (iter->lun == match->lun)) /* - * visor_thread_start - starts a thread for the device - * @threadfn: Function the thread starts - * @thrcontext: Context to pass to the thread, i.e. devdata - * @name: string describing name of thread + * visor_thread_start - Starts a thread for the device + * @threadfn: Function the thread starts + * @thrcontext: Context to pass to the thread, i.e. devdata + * @name: String describing name of thread * - * Starts a thread for the device. + * Starts a thread for the device. * - * Return the task_struct * denoting the thread on success, - * or NULL on failure + * Return: The task_struct * denoting the thread on success, + * or NULL on failure */ -static struct task_struct *visor_thread_start -(int (*threadfn)(void *), void *thrcontext, char *name) +static struct task_struct *visor_thread_start(int (*threadfn)(void *), + void *thrcontext, char *name) { struct task_struct *task; @@ -132,27 +136,27 @@ static struct task_struct *visor_thread_start } /* - * visor_thread_stop - stops the thread if it is running + * visor_thread_stop - Stops the thread if it is running + * @task: Description of process to stop */ static void visor_thread_stop(struct task_struct *task) { - if (!task) - return; /* no thread running */ kthread_stop(task); } /* - * add_scsipending_entry - save off io command that is pending in - * Service Partition - * @devdata: Pointer to devdata - * @cmdtype: Specifies the type of command pending - * @new: The command to be saved + * add_scsipending_entry - Save off io command that is pending in + * Service Partition + * @devdata: Pointer to devdata + * @cmdtype: Specifies the type of command pending + * @new: The command to be saved + * + * Saves off the io command that is being handled by the Service + * Partition so that it can be handled when it completes. If new is + * NULL it is assumed the entry refers only to the cmdrsp. * - * Saves off the io command that is being handled by the Service - * Partition so that it can be handled when it completes. If new is - * NULL it is assumed the entry refers only to the cmdrsp. - * Returns insert_location where entry was added, - * -EBUSY if it can't + * Return: Insert_location where entry was added on success, + * -EBUSY if it can't */ static int add_scsipending_entry(struct visorhba_devdata *devdata, char cmdtype, void *new) @@ -176,7 +180,8 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata, entry->cmdtype = cmdtype; if (new) entry->sent = new; - else /* wants to send cmdrsp */ + /* wants to send cmdrsp */ + else entry->sent = &entry->cmdrsp; devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS; spin_unlock_irqrestore(&devdata->privlock, flags); @@ -185,15 +190,15 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata, } /* - * del_scsipending_ent - removes an entry from the pending array - * @devdata: Device holding the pending array - * @del: Entry to remove + * del_scsipending_ent - Removes an entry from the pending array + * @devdata: Device holding the pending array + * @del: Entry to remove * - * Removes the entry pointed at by del and returns it. - * Returns the scsipending entry pointed at + * Removes the entry pointed at by del and returns it. + * + * Return: The scsipending entry pointed to on success, NULL on failure */ -static void *del_scsipending_ent(struct visorhba_devdata *devdata, - int del) +static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del) { unsigned long flags; void *sent; @@ -203,7 +208,6 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata, spin_lock_irqsave(&devdata->privlock, flags); sent = devdata->pending[del].sent; - devdata->pending[del].cmdtype = 0; devdata->pending[del].sent = NULL; spin_unlock_irqrestore(&devdata->privlock, flags); @@ -212,13 +216,14 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata, } /* - * get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry - * @ddata: Device holding the pending array - * @ent: Entry that stores the cmdrsp + * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry + * @ddata: Device holding the pending array + * @ent: Entry that stores the cmdrsp + * + * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid + * if the "sent" field is not NULL. * - * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid - * if the "sent" field is not NULL - * Returns a pointer to the cmdrsp. + * Return: A pointer to the cmdrsp, NULL on failure */ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata, int ent) @@ -230,13 +235,15 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata, } /* - * simple_idr_get - associate a provided pointer with an int value - * 1 <= value <= INT_MAX, and return this int value; - * the pointer value can be obtained later by passing - * this int value to idr_find() - * @idrtable: the data object maintaining the pointer<-->int mappings - * @p: the pointer value to be remembered - * @lock: a spinlock used when exclusive access to idrtable is needed + * simple_idr_get - Associate a provided pointer with an int value + * 1 <= value <= INT_MAX, and return this int value; + * the pointer value can be obtained later by passing + * this int value to idr_find() + * @idrtable: The data object maintaining the pointer<-->int mappings + * @p: The pointer value to be remembered + * @lock: A spinlock used when exclusive access to idrtable is needed + * + * Return: The id number mapped to pointer 'p', 0 on failure */ static unsigned int simple_idr_get(struct idr *idrtable, void *p, spinlock_t *lock) @@ -249,16 +256,23 @@ static unsigned int simple_idr_get(struct idr *idrtable, void *p, id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT); spin_unlock_irqrestore(lock, flags); idr_preload_end(); + /* failure */ if (id < 0) - return 0; /* failure */ - return (unsigned int)(id); /* idr_alloc() guarantees > 0 */ + return 0; + /* idr_alloc() guarantees > 0 */ + return (unsigned int)(id); } /* - * setup_scsitaskmgmt_handles - stash the necessary handles so that the - * completion processing logic for a taskmgmt - * cmd will be able to find who to wake up - * and where to stash the result + * setup_scsitaskmgmt_handles - Stash the necessary handles so that the + * completion processing logic for a taskmgmt + * cmd will be able to find who to wake up + * and where to stash the result + * @idrtable: The data object maintaining the pointer<-->int mappings + * @lock: A spinlock used when exclusive access to idrtable is needed + * @cmdrsp: Response from the IOVM + * @event: The event handle to associate with an id + * @result: The location to place the result of the event handle into */ static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock, struct uiscmdrsp *cmdrsp, @@ -273,8 +287,10 @@ static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock, } /* - * cleanup_scsitaskmgmt_handles - forget handles created by - * setup_scsitaskmgmt_handles() + * cleanup_scsitaskmgmt_handles - Forget handles created by + * setup_scsitaskmgmt_handles() + * @idrtable: The data object maintaining the pointer<-->int mappings + * @cmdrsp: Response from the IOVM */ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable, struct uiscmdrsp *cmdrsp) @@ -286,14 +302,15 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable, } /* - * forward_taskmgmt_command - send taskmegmt command to the Service - * Partition - * @tasktype: Type of taskmgmt command - * @scsidev: Scsidev that issued command + * forward_taskmgmt_command - Send taskmegmt command to the Service + * Partition + * @tasktype: Type of taskmgmt command + * @scsidev: Scsidev that issued command + * + * Create a cmdrsp packet and send it to the Serivce Partition + * that will service this request. * - * Create a cmdrsp packet and send it to the Serivce Partition - * that will service this request. - * Returns whether the command was queued successfully or not. + * Return: Int representing whether command was queued successfully or not */ static int forward_taskmgmt_command(enum task_mgmt_types tasktype, struct scsi_cmnd *scsicmd) @@ -365,11 +382,10 @@ err_del_scsipending_ent: } /* - * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK - * @scsicmd: The scsicmd that needs aborted - * - * Returns SUCCESS if inserted, failure otherwise + * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK + * @scsicmd: The scsicmd that needs aborted * + * Return: SUCCESS if inserted, FAILED otherwise */ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd) { @@ -390,10 +406,10 @@ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd) } /* - * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET - * @scsicmd: The scsicmd that needs aborted + * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET + * @scsicmd: The scsicmd that needs aborted * - * Returns SUCCESS if inserted, failure otherwise + * Return: SUCCESS if inserted, FAILED otherwise */ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd) { @@ -414,11 +430,11 @@ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd) } /* - * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each - * target on the bus - * @scsicmd: The scsicmd that needs aborted + * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each + * target on the bus + * @scsicmd: The scsicmd that needs aborted * - * Returns SUCCESS + * Return: SUCCESS if inserted, FAILED otherwise */ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd) { @@ -438,24 +454,22 @@ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd) } /* - * visorhba_host_reset_handler - Not supported - * @scsicmd: The scsicmd that needs aborted + * visorhba_host_reset_handler - Not supported + * @scsicmd: The scsicmd that needs to be aborted * - * Not supported, return SUCCESS - * Returns SUCCESS + * Return: Not supported, return SUCCESS */ -static int -visorhba_host_reset_handler(struct scsi_cmnd *scsicmd) +static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd) { /* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */ return SUCCESS; } /* - * visorhba_get_info - * @shp: Scsi host that is requesting information + * visorhba_get_info - Get information about SCSI device + * @shp: Scsi host that is requesting information * - * Returns string with info + * Return: String with visorhba information */ static const char *visorhba_get_info(struct Scsi_Host *shp) { @@ -464,19 +478,19 @@ static const char *visorhba_get_info(struct Scsi_Host *shp) } /* - * visorhba_queue_command_lck -- queues command to the Service Partition - * @scsicmd: Command to be queued - * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned + * visorhba_queue_command_lck - Queues command to the Service Partition + * @scsicmd: Command to be queued + * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned * - * Queues to scsicmd to the ServicePartition after converting it to a - * uiscmdrsp structure. + * Queues to scsicmd to the ServicePartition after converting it to a + * uiscmdrsp structure. * - * Returns success if queued to the Service Partition, otherwise - * failure. + * Return: 0 if successfully queued to the Service Partition, otherwise + * error code */ -static int -visorhba_queue_command_lck(struct scsi_cmnd *scsicmd, - void (*visorhba_cmnd_done)(struct scsi_cmnd *)) +static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd, + void (*visorhba_cmnd_done) + (struct scsi_cmnd *)) { struct uiscmdrsp *cmdrsp; struct scsi_device *scsidev = scsicmd->device; @@ -494,12 +508,10 @@ visorhba_queue_command_lck(struct scsi_cmnd *scsicmd, insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE, (void *)scsicmd); - if (insert_location < 0) return SCSI_MLQUEUE_DEVICE_BUSY; cmdrsp = get_scsipending_cmdrsp(devdata, insert_location); - cmdrsp->cmdtype = CMD_SCSI_TYPE; /* save the pending insertion location. Deletion from pending * will return the scsicmd pointer for completion @@ -515,7 +527,6 @@ visorhba_queue_command_lck(struct scsi_cmnd *scsicmd, /* save datadir */ cmdrsp->scsi.data_dir = scsicmd->sc_data_direction; memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE); - cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd); /* keep track of the max buffer length so far. */ @@ -555,13 +566,13 @@ static DEF_SCSI_QCMD(visorhba_queue_command) #endif /* - * visorhba_slave_alloc - called when new disk is discovered - * @scsidev: New disk + * visorhba_slave_alloc - Called when new disk is discovered + * @scsidev: New disk * - * Create a new visordisk_info structure and add it to our - * list of vdisks. + * Create a new visordisk_info structure and add it to our + * list of vdisks. * - * Returns success when created, otherwise error. + * Return: 0 on success, -ENOMEM on failure. */ static int visorhba_slave_alloc(struct scsi_device *scsidev) { @@ -573,12 +584,14 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev) struct visorhba_devdata *devdata; struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host; + /* even though we errored, treat as success */ devdata = (struct visorhba_devdata *)scsihost->hostdata; if (!devdata) - return 0; /* even though we errored, treat as success */ + return 0; + /* already allocated return success */ for_each_vdisk_match(vdisk, devdata, scsidev) - return 0; /* already allocated return success */ + return 0; tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC); if (!tmpvdisk) @@ -592,11 +605,8 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev) } /* - * visorhba_slave_destroy - disk is going away - * @scsidev: scsi device going away - * - * Disk is going away, clean up resources. - * Returns void. + * visorhba_slave_destroy - Disk is going away, clean up resources. + * @scsidev: Scsi device to destroy */ static void visorhba_slave_destroy(struct scsi_device *scsidev) { @@ -635,10 +645,13 @@ static struct scsi_host_template visorhba_driver_template = { }; /* - * info_debugfs_show - debugfs interface to dump visorhba states + * info_debugfs_show - Debugfs interface to dump visorhba states + * @seq: The sequence file to write information to + * @v: Unused, but needed for use with seq file single_open invocation + * + * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info. * - * This presents a file in the debugfs tree named: - * /visorhba/vbus<x>:dev<y>/info + * Return: SUCCESS */ static int info_debugfs_show(struct seq_file *seq, void *v) { @@ -679,12 +692,13 @@ static const struct file_operations info_debugfs_fops = { }; /* - * complete_taskmgmt_command - complete task management - * @cmdrsp: Response from the IOVM + * complete_taskmgmt_command - Complete task management + * @idrtable: The data object maintaining the pointer<-->int mappings + * @cmdrsp: Response from the IOVM + * @result: The result of the task management command * - * Service Partition returned the result of the task management - * command. Wake up anyone waiting for it. - * Returns void + * Service Partition returned the result of the task management + * command. Wake up anyone waiting for it. */ static void complete_taskmgmt_command(struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result) @@ -693,7 +707,6 @@ static void complete_taskmgmt_command(struct idr *idrtable, idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle); int *scsi_result_ptr = idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle); - if (unlikely(!(wq && scsi_result_ptr))) { pr_err("visorhba: no completion context; cmd will time out\n"); return; @@ -708,13 +721,12 @@ static void complete_taskmgmt_command(struct idr *idrtable, } /* - * visorhba_serverdown_complete - Called when we are done cleaning up - * from serverdown - * @work: work structure for this serverdown request + * visorhba_serverdown_complete - Called when we are done cleaning up + * from serverdown + * @devdata: Visorhba instance on which to complete serverdown * - * Called when we are done cleanning up from serverdown, stop processing - * queue, fail pending IOs. - * Returns void when finished cleaning up + * Called when we are done cleanning up from serverdown, stop processing + * queue, fail pending IOs. */ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata) { @@ -758,12 +770,13 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata) } /* - * visorhba_serverdown - Got notified that the IOVM is down - * @devdata: visorhba that is being serviced by downed IOVM. + * visorhba_serverdown - Got notified that the IOVM is down + * @devdata: Visorhba that is being serviced by downed IOVM * - * Something happened to the IOVM, return immediately and - * schedule work cleanup work. - * Return SUCCESS or EINVAL + * Something happened to the IOVM, return immediately and + * schedule cleanup work. + * + * Return: 0 on success, -EINVAL on failure */ static int visorhba_serverdown(struct visorhba_devdata *devdata) { @@ -777,15 +790,14 @@ static int visorhba_serverdown(struct visorhba_devdata *devdata) } /* - * do_scsi_linuxstat - scsi command returned linuxstat - * @cmdrsp: response from IOVM - * @scsicmd: Command issued. + * do_scsi_linuxstat - Scsi command returned linuxstat + * @cmdrsp: Response from IOVM + * @scsicmd: Command issued * - * Don't log errors for disk-not-present inquiries - * Returns void + * Don't log errors for disk-not-present inquiries. */ -static void -do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) +static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, + struct scsi_cmnd *scsicmd) { struct visorhba_devdata *devdata; struct visordisk_info *vdisk; @@ -809,10 +821,10 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) } } -static int set_no_disk_inquiry_result(unsigned char *buf, - size_t len, bool is_lun0) +static int set_no_disk_inquiry_result(unsigned char *buf, size_t len, + bool is_lun0) { - if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN) + if (len < NO_DISK_INQUIRY_RESULT_LEN) return -EINVAL; memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN); buf[2] = SCSI_SPC2_VER; @@ -828,15 +840,14 @@ static int set_no_disk_inquiry_result(unsigned char *buf, } /* - * do_scsi_nolinuxstat - scsi command didn't have linuxstat - * @cmdrsp: response from IOVM - * @scsicmd: Command issued. + * do_scsi_nolinuxstat - Scsi command didn't have linuxstat + * @cmdrsp: Response from IOVM + * @scsicmd: Command issued * - * Handle response when no linuxstat was returned - * Returns void + * Handle response when no linuxstat was returned. */ -static void -do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) +static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, + struct scsi_cmnd *scsicmd) { struct scsi_device *scsidev; unsigned char *buf; @@ -895,16 +906,15 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) } /* - * complete_scsi_command - complete a scsi command - * @uiscmdrsp: Response from Service Partition - * @scsicmd: The scsi command + * complete_scsi_command - Complete a scsi command + * @uiscmdrsp: Response from Service Partition + * @scsicmd: The scsi command * - * Response returned by the Service Partition, finish it and send - * completion to the scsi midlayer. - * Returns void. + * Response was returned by the Service Partition. Finish it and send + * completion to the scsi midlayer. */ -static void -complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) +static void complete_scsi_command(struct uiscmdrsp *cmdrsp, + struct scsi_cmnd *scsicmd) { /* take what we need out of cmdrsp and complete the scsicmd */ scsicmd->result = cmdrsp->scsi.linuxstat; @@ -917,24 +927,23 @@ complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) } /* - * drain_queue - pull responses out of iochannel - * @cmdrsp: Response from the IOSP - * @devdata: device that owns this iochannel + * drain_queue - Pull responses out of iochannel + * @cmdrsp: Response from the IOSP + * @devdata: Device that owns this iochannel * - * Pulls responses out of the iochannel and process the responses. - * Restuns void + * Pulls responses out of the iochannel and process the responses. */ -static void -drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata) +static void drain_queue(struct uiscmdrsp *cmdrsp, + struct visorhba_devdata *devdata) { struct scsi_cmnd *scsicmd; while (1) { + /* queue empty */ if (visorchannel_signalremove(devdata->dev->visorchannel, IOCHAN_FROM_IOPART, cmdrsp)) - break; /* queue empty */ - + break; if (cmdrsp->cmdtype == CMD_SCSI_TYPE) { /* scsicmd location is returned by the * deletion @@ -959,12 +968,14 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata) } /* - * process_incoming_rsps - Process responses from IOSP - * @v: void pointer to visorhba_devdata + * process_incoming_rsps - Process responses from IOSP + * @v: Void pointer to visorhba_devdata * - * Main function for the thread that processes the responses - * from the IO Service Partition. When the queue is empty, wait - * to check to see if it is full again. + * Main function for the thread that processes the responses + * from the IO Service Partition. When the queue is empty, wait + * to check to see if it is full again. + * + * Return: 0 on success, -ENOMEM on failure */ static int process_incoming_rsps(void *v) { @@ -991,14 +1002,15 @@ static int process_incoming_rsps(void *v) } /* - * visorhba_pause - function to handle visorbus pause messages - * @dev: device that is pausing. - * @complete_func: function to call when finished + * visorhba_pause - Function to handle visorbus pause messages + * @dev: Device that is pausing + * @complete_func: Function to call when finished + * + * Something has happened to the IO Service Partition that is + * handling this device. Quiet this device and reset commands + * so that the Service Partition can be corrected. * - * Something has happened to the IO Service Partition that is - * handling this device. Quiet this device and reset commands - * so that the Service Partition can be corrected. - * Returns SUCCESS + * Return: SUCCESS */ static int visorhba_pause(struct visor_device *dev, visorbus_state_complete_func complete_func) @@ -1011,13 +1023,14 @@ static int visorhba_pause(struct visor_device *dev, } /* - * visorhba_resume - function called when the IO Service Partition is back - * @dev: device that is pausing. - * @complete_func: function to call when finished + * visorhba_resume - Function called when the IO Service Partition is back + * @dev: Device that is pausing + * @complete_func: Function to call when finished + * + * Yay! The IO Service Partition is back, the channel has been wiped + * so lets re-establish connection and start processing responses. * - * Yay! The IO Service Partition is back, the channel has been wiped - * so lets re-establish connection and start processing responses. - * Returns 0 on success, error on failure. + * Return: 0 on success, -EINVAL on failure */ static int visorhba_resume(struct visor_device *dev, visorbus_state_complete_func complete_func) @@ -1033,7 +1046,6 @@ static int visorhba_resume(struct visor_device *dev, devdata->thread = visor_thread_start(process_incoming_rsps, devdata, "vhba_incming"); - devdata->serverdown = false; devdata->serverchangingstate = false; @@ -1041,11 +1053,12 @@ static int visorhba_resume(struct visor_device *dev, } /* - * visorhba_probe - device has been discovered, do acquire - * @dev: visor_device that was discovered + * visorhba_probe - Device has been discovered; do acquire + * @dev: visor_device that was discovered + * + * A new HBA was discovered; do the initial connections of it. * - * A new HBA was discovered, do the initial connections of it. - * Return 0 on success, otherwise error. + * Return: 0 on success, otherwise error code */ static int visorhba_probe(struct visor_device *dev) { @@ -1139,11 +1152,10 @@ err_scsi_host_put: } /* - * visorhba_remove - remove a visorhba device - * @dev: Device to remove + * visorhba_remove - Remove a visorhba device + * @dev: Device to remove * - * Removes the visorhba device. - * Returns void. + * Removes the visorhba device. */ static void visorhba_remove(struct visor_device *dev) { @@ -1181,10 +1193,12 @@ static struct visor_driver visorhba_driver = { }; /* - * visorhba_init - driver init routine + * visorhba_init - Driver init routine + * + * Initialize the visorhba driver and register it with visorbus + * to handle s-Par virtual host bus adapter. * - * Initialize the visorhba driver and register it with visorbus - * to handle s-Par virtual host bus adapter. + * Return: 0 on success, error code otherwise */ static int visorhba_init(void) { @@ -1207,9 +1221,9 @@ cleanup_debugfs: } /* - * visorhba_exit - driver exit routine + * visorhba_exit - Driver exit routine * - * Unregister driver from the bus and free up memory. + * Unregister driver from the bus and free up memory. */ static void visorhba_exit(void) { diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h index a4baea53c518..3ffad83100c4 100644 --- a/drivers/staging/unisys/visorinput/ultrainputreport.h +++ b/drivers/staging/unisys/visorinput/ultrainputreport.h @@ -20,44 +20,35 @@ /* These defines identify mouse and keyboard activity which is specified by the * firmware to the host using the cmsimpleinput protocol. @ingroup coretypes */ -#define INPUTACTION_XY_MOTION 1 /* only motion; arg1=x, arg2=y */ -#define INPUTACTION_MOUSE_BUTTON_DOWN 2 /* arg1: 1=left,2=center,3=right */ -#define INPUTACTION_MOUSE_BUTTON_UP 3 /* arg1: 1=left,2=center,3=right */ -#define INPUTACTION_MOUSE_BUTTON_CLICK 4 /* arg1: 1=left,2=center,3=right */ -#define INPUTACTION_MOUSE_BUTTON_DCLICK 5 /* arg1: 1=left,2=center, - * 3=right - */ -#define INPUTACTION_WHEEL_ROTATE_AWAY 6 /* arg1: wheel rotation away from - * user - */ -#define INPUTACTION_WHEEL_ROTATE_TOWARD 7 /* arg1: wheel rotation toward - * user - */ -#define INPUTACTION_KEY_DOWN 64 /* arg1: scancode, as follows: - * If arg1 <= 0xff, it's a 1-byte - * scancode and arg1 is that scancode. - * If arg1 > 0xff, it's a 2-byte - * scanecode, with the 1st byte in the - * low 8 bits, and the 2nd byte in the - * high 8 bits. E.g., the right ALT key - * would appear as x'38e0'. - */ -#define INPUTACTION_KEY_UP 65 /* arg1: scancode (in same format as - * inputaction_keyDown) - */ + /* only motion; arg1=x, arg2=y */ +#define INPUTACTION_XY_MOTION 1 +/* arg1: 1=left,2=center,3=right */ +#define INPUTACTION_MOUSE_BUTTON_DOWN 2 +/* arg1: 1=left,2=center,3=right */ +#define INPUTACTION_MOUSE_BUTTON_UP 3 +/* arg1: 1=left,2=center,3=right */ +#define INPUTACTION_MOUSE_BUTTON_CLICK 4 +/* arg1: 1=left,2=center 3=right */ +#define INPUTACTION_MOUSE_BUTTON_DCLICK 5 +/* arg1: wheel rotation away from user */ +#define INPUTACTION_WHEEL_ROTATE_AWAY 6 +/* arg1: wheel rotation toward user */ +#define INPUTACTION_WHEEL_ROTATE_TOWARD 7 +/* arg1: scancode, as follows: If arg1 <= 0xff, it's a 1-byte scancode and arg1 + * is that scancode. If arg1 > 0xff, it's a 2-byte scanecode, with the 1st + * byte in the low 8 bits, and the 2nd byte in the high 8 bits. + * E.g., the right ALT key would appear as x'38e0'. + */ +#define INPUTACTION_KEY_DOWN 64 +/* arg1: scancode (in same format as inputaction_keyDown) */ +#define INPUTACTION_KEY_UP 65 +/* arg1: scancode (in same format as inputaction_keyDown); MUST refer to one of + * the locking keys, like capslock, numlock, or scrolllock. + * arg2: 1 iff locking key should be in the LOCKED position (e.g., light is ON) + */ #define INPUTACTION_SET_LOCKING_KEY_STATE 66 - /* arg1: scancode (in same format - * as inputaction_keyDown); - * MUST refer to one of the - * locking keys, like capslock, - * numlock, or scrolllock - * arg2: 1 iff locking key should be - * in the LOCKED position - * (e.g., light is ON) - */ -#define INPUTACTION_KEY_DOWN_UP 67 /* arg1: scancode (in same format - * as inputaction_keyDown) - */ +/* arg1: scancode (in same format as inputaction_keyDown */ +#define INPUTACTION_KEY_DOWN_UP 67 struct visor_inputactivity { u16 action; diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c index 45bc340d4e9d..65060e9b6132 100644 --- a/drivers/staging/unisys/visorinput/visorinput.c +++ b/drivers/staging/unisys/visorinput/visorinput.c @@ -60,11 +60,13 @@ enum visorinput_device_type { */ struct visorinput_devdata { struct visor_device *dev; - struct mutex lock_visor_dev; /* lock for dev */ + /* lock for dev */ + struct mutex lock_visor_dev; struct input_dev *visorinput_dev; bool paused; bool interrupts_enabled; - unsigned int keycode_table_bytes; /* size of following array */ + /* size of following array */ + unsigned int keycode_table_bytes; /* for keyboard devices: visorkbd_keycode[] + visorkbd_ext_keycode[] */ unsigned char keycode_table[0]; }; @@ -162,9 +164,8 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = { [81] = KEY_KP3, [82] = KEY_KP0, [83] = KEY_KPDOT, - [86] = KEY_102ND, /* enables UK backslash+pipe key, - * and FR lessthan+greaterthan key - */ + /* enables UK backslash+pipe key and FR lessthan+greaterthan key */ + [86] = KEY_102ND, [87] = KEY_F11, [88] = KEY_F12, [90] = KEY_KPLEFTPAREN, @@ -260,7 +261,6 @@ static void visorinput_close(struct input_dev *visorinput_dev) * interrupts should be disabled so when we resume we will * not re-enable them. */ - mutex_lock(&devdata->lock_visor_dev); devdata->interrupts_enabled = false; if (devdata->paused) @@ -276,15 +276,13 @@ out_unlock: * we can use to deliver keyboard inputs to Linux. We of course do this when * we see keyboard inputs coming in on a keyboard channel. */ -static struct input_dev * -setup_client_keyboard(void *devdata, /* opaque on purpose */ - unsigned char *keycode_table) +static struct input_dev *setup_client_keyboard(void *devdata, + unsigned char *keycode_table) { int i; - struct input_dev *visorinput_dev; + struct input_dev *visorinput_dev = input_allocate_device(); - visorinput_dev = input_allocate_device(); if (!visorinput_dev) return NULL; @@ -302,7 +300,8 @@ setup_client_keyboard(void *devdata, /* opaque on purpose */ BIT_MASK(LED_SCROLLL) | BIT_MASK(LED_NUML); visorinput_dev->keycode = keycode_table; - visorinput_dev->keycodesize = 1; /* sizeof(unsigned char) */ + /* sizeof(unsigned char) */ + visorinput_dev->keycodesize = 1; visorinput_dev->keycodemax = KEYCODE_TABLE_BYTES; for (i = 1; i < visorinput_dev->keycodemax; i++) @@ -313,19 +312,18 @@ setup_client_keyboard(void *devdata, /* opaque on purpose */ visorinput_dev->open = visorinput_open; visorinput_dev->close = visorinput_close; - input_set_drvdata(visorinput_dev, devdata); /* pre input_register! */ + /* pre input_register! */ + input_set_drvdata(visorinput_dev, devdata); return visorinput_dev; } -static struct input_dev * -setup_client_mouse(void *devdata /* opaque on purpose */) +static struct input_dev *setup_client_mouse(void *devdata) { - struct input_dev *visorinput_dev = NULL; int xres, yres; struct fb_info *fb0; + struct input_dev *visorinput_dev = input_allocate_device(); - visorinput_dev = input_allocate_device(); if (!visorinput_dev) return NULL; @@ -354,14 +352,16 @@ setup_client_mouse(void *devdata /* opaque on purpose */) visorinput_dev->open = visorinput_open; visorinput_dev->close = visorinput_close; - input_set_drvdata(visorinput_dev, devdata); /* pre input_register! */ + /* pre input_register! */ + input_set_drvdata(visorinput_dev, devdata); input_set_capability(visorinput_dev, EV_REL, REL_WHEEL); return visorinput_dev; } -static struct visorinput_devdata * -devdata_create(struct visor_device *dev, enum visorinput_device_type devtype) +static struct visorinput_devdata *devdata_create( + struct visor_device *dev, + enum visorinput_device_type devtype) { struct visorinput_devdata *devdata = NULL; unsigned int extra_bytes = 0; @@ -446,8 +446,7 @@ err_kfree_devdata: return NULL; } -static int -visorinput_probe(struct visor_device *dev) +static int visorinput_probe(struct visor_device *dev) { uuid_le guid; enum visorinput_device_type devtype; @@ -465,15 +464,13 @@ visorinput_probe(struct visor_device *dev) return 0; } -static void -unregister_client_input(struct input_dev *visorinput_dev) +static void unregister_client_input(struct input_dev *visorinput_dev) { if (visorinput_dev) input_unregister_device(visorinput_dev); } -static void -visorinput_remove(struct visor_device *dev) +static void visorinput_remove(struct visor_device *dev) { struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device); @@ -499,9 +496,8 @@ visorinput_remove(struct visor_device *dev) * Make it so the current locking state of the locking key indicated by * <keycode> is as indicated by <desired_state> (1=locked, 0=unlocked). */ -static void -handle_locking_key(struct input_dev *visorinput_dev, - int keycode, int desired_state) +static void handle_locking_key(struct input_dev *visorinput_dev, int keycode, + int desired_state) { int led; @@ -533,17 +529,15 @@ handle_locking_key(struct input_dev *visorinput_dev, * with 0xE0 in the low byte and the extended scancode value in the next * higher byte. */ -static int -scancode_to_keycode(int scancode) +static int scancode_to_keycode(int scancode) { if (scancode > 0xff) return visorkbd_ext_keycode[(scancode >> 8) & 0xff]; - return visorkbd_keycode[scancode]; + return visorkbd_keycode[scancode]; } -static int -calc_button(int x) +static int calc_button(int x) { switch (x) { case 1: @@ -562,15 +556,13 @@ calc_button(int x) * client guest partition. It is called periodically so we can obtain inputs * from the channel, and deliver them to the guest OS. */ -static void -visorinput_channel_interrupt(struct visor_device *dev) +static void visorinput_channel_interrupt(struct visor_device *dev) { struct visor_inputreport r; int scancode, keycode; struct input_dev *visorinput_dev; int xmotion, ymotion, button; int i; - struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device); if (!devdata) @@ -626,7 +618,6 @@ visorinput_channel_interrupt(struct visor_device *dev) if (button < 0) break; input_report_key(visorinput_dev, button, 1); - input_sync(visorinput_dev); input_report_key(visorinput_dev, button, 0); input_sync(visorinput_dev); @@ -657,9 +648,8 @@ visorinput_channel_interrupt(struct visor_device *dev) } } -static int -visorinput_pause(struct visor_device *dev, - visorbus_state_complete_func complete_func) +static int visorinput_pause(struct visor_device *dev, + visorbus_state_complete_func complete_func) { int rc; struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device); @@ -681,7 +671,6 @@ visorinput_pause(struct visor_device *dev, * due to above, at this time no thread of execution will be * in visorinput_channel_interrupt() */ - devdata->paused = true; complete_func(dev, 0); rc = 0; @@ -691,9 +680,8 @@ out: return rc; } -static int -visorinput_resume(struct visor_device *dev, - visorbus_state_complete_func complete_func) +static int visorinput_resume(struct visor_device *dev, + visorbus_state_complete_func complete_func) { int rc; struct visorinput_devdata *devdata = dev_get_drvdata(&dev->device); @@ -743,14 +731,12 @@ static struct visor_driver visorinput_driver = { .resume = visorinput_resume, }; -static int -visorinput_init(void) +static int visorinput_init(void) { return visorbus_register_visor_driver(&visorinput_driver); } -static void -visorinput_cleanup(void) +static void visorinput_cleanup(void) { visorbus_unregister_visor_driver(&visorinput_driver); } diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 2891622eef18..0b39676e0daf 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -84,7 +84,8 @@ struct visornic_devdata { u64 incarnation_id; /* flags as they were prior to set_multicast_list */ unsigned short old_flags; - atomic_t usage; /* count of users */ + /* count of users */ + atomic_t usage; /* number of rcv buffers the vnic will post */ int num_rcv_bufs; @@ -108,31 +109,43 @@ struct visornic_devdata { struct uiscmdrsp *cmdrsp_rcv; /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */ struct uiscmdrsp *xmit_cmdrsp; - - bool server_down; /* IOPART is down */ - bool server_change_state; /* Processing SERVER_CHANGESTATE msg */ - bool going_away; /* device is being torn down */ + /* IOPART is down */ + bool server_down; + /* Processing SERVER_CHANGESTATE msg */ + bool server_change_state; + /* device is being torn down */ + bool going_away; struct dentry *eth_debugfs_dir; u64 interrupts_rcvd; u64 interrupts_notme; u64 interrupts_disabled; u64 busy_cnt; - spinlock_t priv_lock; /* spinlock to access devdata structures */ + /* spinlock to access devdata structures */ + spinlock_t priv_lock; /* flow control counter */ u64 flow_control_upper_hits; u64 flow_control_lower_hits; /* debug counters */ - unsigned long n_rcv0; /* # rcvs of 0 buffers */ - unsigned long n_rcv1; /* # rcvs of 1 buffers */ - unsigned long n_rcv2; /* # rcvs of 2 buffers */ - unsigned long n_rcvx; /* # rcvs of >2 buffers */ - unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */ - unsigned long repost_found_skb_cnt; /* # of found the skb */ - unsigned long n_repost_deficit; /* # of lost rcv buffers */ - unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */ - unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */ + /* # rcvs of 0 buffers */ + unsigned long n_rcv0; + /* # rcvs of 1 buffers */ + unsigned long n_rcv1; + /* # rcvs of 2 buffers */ + unsigned long n_rcv2; + /* # rcvs of >2 buffers */ + unsigned long n_rcvx; + /* # repost_rcvbuf_cnt */ + unsigned long found_repost_rcvbuf_cnt; + /* # of found the skb */ + unsigned long repost_found_skb_cnt; + /* # of lost rcv buffers */ + unsigned long n_repost_deficit; + /* # of unknown rcv skb not freed */ + unsigned long bad_rcv_buf; + /* # bogs rcv packets */ + unsigned long n_rcv_packets_not_accepted; int queuefullmsg_logged; struct chanstat chstat; @@ -142,9 +155,9 @@ struct visornic_devdata { }; /* Returns next non-zero index on success or 0 on failure (i.e. out of room). */ -static u16 -add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, - u16 max_pi_arr_entries, struct phys_info pi_arr[]) +static u16 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, + u16 index, u16 max_pi_arr_entries, + struct phys_info pi_arr[]) { u32 len; u16 i, firstlen; @@ -190,10 +203,10 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, * Return value indicates number of entries filled in frags * Negative values indicate an error. */ -static int -visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen, - unsigned int frags_max, - struct phys_info frags[]) +static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb, + unsigned int firstfraglen, + unsigned int frags_max, + struct phys_info frags[]) { unsigned int count = 0, frag, size, offset = 0, numfrags; unsigned int total_count; @@ -296,12 +309,9 @@ static const struct file_operations debugfs_enable_ints_fops = { * being down. * Returns void. */ -static void -visornic_serverdown_complete(struct visornic_devdata *devdata) +static void visornic_serverdown_complete(struct visornic_devdata *devdata) { - struct net_device *netdev; - - netdev = devdata->netdev; + struct net_device *netdev = devdata->netdev; /* Stop polling for interrupts */ del_timer_sync(&devdata->irq_poll_timer); @@ -330,9 +340,8 @@ visornic_serverdown_complete(struct visornic_devdata *devdata) * sure we haven't already handled the server change state event. * Returns 0 if we scheduled the work, -EINVAL on error. */ -static int -visornic_serverdown(struct visornic_devdata *devdata, - visorbus_state_complete_func complete_func) +static int visornic_serverdown(struct visornic_devdata *devdata, + visorbus_state_complete_func complete_func) { unsigned long flags; int err; @@ -377,8 +386,7 @@ err_unlock: * so that it can write rcv data into our memory space. * Return pointer to sk_buff */ -static struct sk_buff * -alloc_rcv_buf(struct net_device *netdev) +static struct sk_buff *alloc_rcv_buf(struct net_device *netdev) { struct sk_buff *skb; @@ -409,9 +417,8 @@ alloc_rcv_buf(struct net_device *netdev) * Send the skb to the IO Partition. * Returns 0 or error */ -static int -post_skb(struct uiscmdrsp *cmdrsp, - struct visornic_devdata *devdata, struct sk_buff *skb) +static int post_skb(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata, + struct sk_buff *skb) { int err; @@ -437,7 +444,6 @@ post_skb(struct uiscmdrsp *cmdrsp, atomic_inc(&devdata->num_rcvbuf_in_iovm); devdata->chstat.sent_post++; - return 0; } @@ -451,9 +457,8 @@ post_skb(struct uiscmdrsp *cmdrsp, * Send the enable/disable message to the IO Partition. * Returns 0 or error */ -static int -send_enbdis(struct net_device *netdev, int state, - struct visornic_devdata *devdata) +static int send_enbdis(struct net_device *netdev, int state, + struct visornic_devdata *devdata) { int err; @@ -479,10 +484,9 @@ send_enbdis(struct net_device *netdev, int state, * are disabled, reclaim memory from rcv bufs. * Returns 0 on success, negative for failure of IO Partition * responding. - * */ -static int -visornic_disable_with_timeout(struct net_device *netdev, const int timeout) +static int visornic_disable_with_timeout(struct net_device *netdev, + const int timeout) { struct visornic_devdata *devdata = netdev_priv(netdev); int i; @@ -493,7 +497,8 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout) /* send a msg telling the other end we are stopping incoming pkts */ spin_lock_irqsave(&devdata->priv_lock, flags); devdata->enabled = 0; - devdata->enab_dis_acked = 0; /* must wait for ack */ + /* must wait for ack */ + devdata->enab_dis_acked = 0; spin_unlock_irqrestore(&devdata->priv_lock, flags); /* send disable and wait for ack -- don't hold lock when sending @@ -568,8 +573,8 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout) * Allocate rcv buffers and post them to the IO Partition. * Return 0 for success, and negative for failure. */ -static int -init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata) +static int init_rcv_bufs(struct net_device *netdev, + struct visornic_devdata *devdata) { int i, j, count, err; @@ -578,10 +583,12 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata) */ for (i = 0; i < devdata->num_rcv_bufs; i++) { devdata->rcvbuf[i] = alloc_rcv_buf(netdev); + /* if we failed to allocate one let us stop */ if (!devdata->rcvbuf[i]) - break; /* if we failed to allocate one let us stop */ + break; } - if (i == 0) /* couldn't even allocate one -- bail out */ + /* couldn't even allocate one -- bail out */ + if (i == 0) return -ENOMEM; count = i; @@ -633,8 +640,8 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata) * timeout is defined in msecs (timeout of 0 specifies infinite wait) * Return 0 for success, negative for failure. */ -static int -visornic_enable_with_timeout(struct net_device *netdev, const int timeout) +static int visornic_enable_with_timeout(struct net_device *netdev, + const int timeout) { int err = 0; struct visornic_devdata *devdata = netdev_priv(netdev); @@ -695,7 +702,6 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout) } netif_start_queue(netdev); - return 0; } @@ -707,8 +713,7 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout) * device for our virtual NIC we will send a Disable and Enable * to the IOVM. If it doesn't respond we will trigger a serverdown. */ -static void -visornic_timeout_reset(struct work_struct *work) +static void visornic_timeout_reset(struct work_struct *work) { struct visornic_devdata *devdata; struct net_device *netdev; @@ -749,11 +754,9 @@ call_serverdown: * Enable the device and start the transmit queue. * Return 0 for success */ -static int -visornic_open(struct net_device *netdev) +static int visornic_open(struct net_device *netdev) { visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT); - return 0; } @@ -764,11 +767,9 @@ visornic_open(struct net_device *netdev) * Disable the device and stop the transmit queue. * Return 0 for success */ -static int -visornic_close(struct net_device *netdev) +static int visornic_close(struct net_device *netdev) { visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT); - return 0; } @@ -830,8 +831,7 @@ static bool vnic_hit_low_watermark(struct visornic_devdata *devdata, * can be called again. * Returns NETDEV_TX_OK. */ -static int -visornic_xmit(struct sk_buff *skb, struct net_device *netdev) +static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct visornic_devdata *devdata; int len, firstfraglen, padlen; @@ -938,6 +938,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev) * - everything else will be pass in frags & DMA'ed */ memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HLEN); + /* copy frags info - from skb->data we need to only provide access * beyond eth header */ @@ -997,8 +998,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev) * * Returns the net_device_stats for the device */ -static struct net_device_stats * -visornic_get_stats(struct net_device *netdev) +static struct net_device_stats *visornic_get_stats(struct net_device *netdev) { struct visornic_devdata *devdata = netdev_priv(netdev); @@ -1016,8 +1016,7 @@ visornic_get_stats(struct net_device *netdev) * Currently not supported. * Returns EINVAL */ -static int -visornic_change_mtu(struct net_device *netdev, int new_mtu) +static int visornic_change_mtu(struct net_device *netdev, int new_mtu) { return -EINVAL; } @@ -1029,8 +1028,7 @@ visornic_change_mtu(struct net_device *netdev, int new_mtu) * Only flag we support currently is IFF_PROMISC * Returns void */ -static void -visornic_set_multi(struct net_device *netdev) +static void visornic_set_multi(struct net_device *netdev) { struct uiscmdrsp *cmdrsp; struct visornic_devdata *devdata = netdev_priv(netdev); @@ -1070,8 +1068,7 @@ out_save_flags: * been informed the IO Partition is gone, if it is gone * we will already timeout the xmits. */ -static void -visornic_xmit_timeout(struct net_device *netdev) +static void visornic_xmit_timeout(struct net_device *netdev) { struct visornic_devdata *devdata = netdev_priv(netdev); unsigned long flags; @@ -1108,9 +1105,9 @@ visornic_xmit_timeout(struct net_device *netdev) * we are finished with them. * Returns 0 for success, -1 for error. */ -static int -repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata, - struct sk_buff *skb, struct net_device *netdev) +static int repost_return(struct uiscmdrsp *cmdrsp, + struct visornic_devdata *devdata, + struct sk_buff *skb, struct net_device *netdev) { struct net_pkt_rcv copy; int i = 0, cc, numreposted; @@ -1182,8 +1179,7 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata, * it up the stack. * Returns 1 iff an skb was received, otherwise 0 */ -static int -visornic_rx(struct uiscmdrsp *cmdrsp) +static int visornic_rx(struct uiscmdrsp *cmdrsp) { struct visornic_devdata *devdata; struct sk_buff *skb, *prev, *curr; @@ -1236,7 +1232,8 @@ visornic_rx(struct uiscmdrsp *cmdrsp) * firstfrag & set data_len to show rest see if we have to chain * frag_list. */ - if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */ + /* do PRECAUTIONARY check */ + if (skb->len > RCVPOST_BUF_SIZE) { if (cmdrsp->net.rcv.numrcvbufs < 2) { if (repost_return(cmdrsp, devdata, skb, netdev) < 0) dev_err(&devdata->netdev->dev, @@ -1244,23 +1241,24 @@ visornic_rx(struct uiscmdrsp *cmdrsp) return 0; } /* length rcvd is greater than firstfrag in this skb rcv buf */ - skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */ - skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that - * will be in - * frag_list - */ + /* amount in skb->data */ + skb->tail += RCVPOST_BUF_SIZE; + /* amount that will be in frag_list */ + skb->data_len = skb->len - RCVPOST_BUF_SIZE; } else { /* data fits in this skb - no chaining - do * PRECAUTIONARY check */ - if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */ + /* should be 1 */ + if (cmdrsp->net.rcv.numrcvbufs != 1) { if (repost_return(cmdrsp, devdata, skb, netdev) < 0) dev_err(&devdata->netdev->dev, "repost_return failed"); return 0; } skb->tail += skb->len; - skb->data_len = 0; /* nothing rcvd in frag_list */ + /* nothing rcvd in frag_list */ + skb->data_len = 0; } off = skb_tail_pointer(skb) - skb->data; @@ -1286,7 +1284,8 @@ visornic_rx(struct uiscmdrsp *cmdrsp) cc < cmdrsp->net.rcv.numrcvbufs; cc++) { curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc]; curr->next = NULL; - if (!prev) /* start of list- set head */ + /* start of list- set head */ + if (!prev) skb_shinfo(skb)->frag_list = curr; else prev->next = curr; @@ -1314,18 +1313,18 @@ visornic_rx(struct uiscmdrsp *cmdrsp) * sets up skb->pkt_type & it also PULLS out the eth header */ skb->protocol = eth_type_trans(skb, netdev); - eth = eth_hdr(skb); - skb->csum = 0; skb->ip_summed = CHECKSUM_NONE; do { + /* accept all packets */ if (netdev->flags & IFF_PROMISC) - break; /* accept all packets */ + break; if (skb->pkt_type == PACKET_BROADCAST) { + /* accept all broadcast packets */ if (netdev->flags & IFF_BROADCAST) - break; /* accept all broadcast packets */ + break; } else if (skb->pkt_type == PACKET_MULTICAST) { if ((netdev->flags & IFF_MULTICAST) && (netdev_mc_count(netdev))) { @@ -1385,8 +1384,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp) * values. * Returns a pointer to the devdata structure */ -static struct visornic_devdata * -devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev) +static struct visornic_devdata *devdata_initialize( + struct visornic_devdata *devdata, + struct visor_device *dev) { devdata->dev = dev; devdata->incarnation_id = get_jiffies_64(); @@ -1577,8 +1577,7 @@ static const struct file_operations debugfs_info_fops = { * Send receive buffers to the IO Partition. * Returns void */ -static int -send_rcv_posts_if_needed(struct visornic_devdata *devdata) +static int send_rcv_posts_if_needed(struct visornic_devdata *devdata) { int i; struct net_device *netdev; @@ -1625,8 +1624,8 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata) * @cmdrsp: io channel command response message * @devdata: visornic device to drain */ -static void -drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata) +static void drain_resp_queue(struct uiscmdrsp *cmdrsp, + struct visornic_devdata *devdata) { while (!visorchannel_signalremove(devdata->dev->visorchannel, IOCHAN_FROM_IOPART, @@ -1643,21 +1642,22 @@ drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata) * Process the responses as we get them. * Returns when response queue is empty or when the thread stops. */ -static void -service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata, - int *rx_work_done, int budget) +static void service_resp_queue(struct uiscmdrsp *cmdrsp, + struct visornic_devdata *devdata, + int *rx_work_done, int budget) { unsigned long flags; struct net_device *netdev; while (*rx_work_done < budget) { - /* TODO: CLIENT ACQUIRE -- Don't really need this at the - * moment - */ + /* TODO: CLIENT ACQUIRE -- Don't really need this at the + * moment + */ + /* queue empty */ if (visorchannel_signalremove(devdata->dev->visorchannel, IOCHAN_FROM_IOPART, cmdrsp)) - break; /* queue empty */ + break; switch (cmdrsp->net.type) { case NET_RCV: @@ -1763,8 +1763,7 @@ static int visornic_poll(struct napi_struct *napi, int budget) * response queue and drain it if needed. * Returns when thread has stopped. */ -static void -poll_for_irq(unsigned long v) +static void poll_for_irq(unsigned long v) { struct visornic_devdata *devdata = (struct visornic_devdata *)v; @@ -1831,7 +1830,8 @@ static int visornic_probe(struct visor_device *dev) dev_set_drvdata(&dev->device, devdata); init_waitqueue_head(&devdata->rsp_queue); spin_lock_init(&devdata->priv_lock); - devdata->enabled = 0; /* not yet */ + /* not yet */ + devdata->enabled = 0; atomic_set(&devdata->usage, 1); /* Setup rcv bufs */ @@ -1984,7 +1984,8 @@ static void host_side_disappeared(struct visornic_devdata *devdata) unsigned long flags; spin_lock_irqsave(&devdata->priv_lock, flags); - devdata->dev = NULL; /* indicate device destroyed */ + /* indicate device destroyed */ + devdata->dev = NULL; spin_unlock_irqrestore(&devdata->priv_lock, flags); } @@ -2023,8 +2024,8 @@ static void visornic_remove(struct visor_device *dev) cancel_work_sync(&devdata->timeout_reset); debugfs_remove_recursive(devdata->eth_debugfs_dir); - - unregister_netdev(netdev); /* this will call visornic_close() */ + /* this will call visornic_close() */ + unregister_netdev(netdev); del_timer_sync(&devdata->irq_poll_timer); netif_napi_del(&devdata->napi); @@ -2160,7 +2161,6 @@ static int visornic_init(void) cleanup_debugfs: debugfs_remove_recursive(visornic_debugfs_dir); - return err; } @@ -2172,7 +2172,6 @@ cleanup_debugfs: static void visornic_cleanup(void) { visorbus_unregister_visor_driver(&visornic_driver); - debugfs_remove_recursive(visornic_debugfs_dir); } diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig index a52746f9a670..f89f931f631b 100644 --- a/drivers/staging/vboxvideo/Kconfig +++ b/drivers/staging/vboxvideo/Kconfig @@ -2,11 +2,13 @@ config DRM_VBOXVIDEO tristate "Virtual Box Graphics Card" depends on DRM && X86 && PCI select DRM_KMS_HELPER + select GENERIC_ALLOCATOR help This is a KMS driver for the virtual Graphics Card used in Virtual Box virtual machines. - Although it is possible to builtin this module, it is advised - to build this driver as a module, so that it can be updated - independently of the kernel. Select M to built this driver as a - module and add support for these devices via drm/kms interfaces. + Although it is possible to build this driver built-in to the + kernel, it is advised to build it as a module, so that it can + be updated independently of the kernel. Select M to build this + driver as a module and add support for these devices via drm/kms + interfaces. diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c index 92ae1560a16d..5d9c65119914 100644 --- a/drivers/staging/vboxvideo/vbox_drv.c +++ b/drivers/staging/vboxvideo/vbox_drv.c @@ -36,7 +36,7 @@ #include "vbox_drv.h" -int vbox_modeset = -1; +static int vbox_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, vbox_modeset, int, 0400); diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index f2b85f3256fa..a7eea70a3804 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -54,14 +54,12 @@ static void vbox_do_modeset(struct drm_crtc *crtc, struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct vbox_private *vbox; int width, height, bpp, pitch; - unsigned int crtc_id; u16 flags; s32 x_offset, y_offset; vbox = crtc->dev->dev_private; width = mode->hdisplay ? mode->hdisplay : 640; height = mode->vdisplay ? mode->vdisplay : 480; - crtc_id = vbox_crtc->crtc_id; bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32; pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8; x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint; @@ -578,9 +576,6 @@ static int vbox_mode_valid(struct drm_connector *connector, static void vbox_connector_destroy(struct drm_connector *connector) { - struct vbox_connector *vbox_connector; - - vbox_connector = to_vbox_connector(connector); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c index 34a905d40735..4eb410a2a1a8 100644 --- a/drivers/staging/vboxvideo/vbox_ttm.c +++ b/drivers/staging/vboxvideo/vbox_ttm.c @@ -230,7 +230,7 @@ static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } -struct ttm_bo_driver vbox_bo_driver = { +static struct ttm_bo_driver vbox_bo_driver = { .ttm_tt_create = vbox_ttm_tt_create, .ttm_tt_populate = vbox_ttm_tt_populate, .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate, diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c index 3637ddf909a4..1bf34ce41b59 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c @@ -438,7 +438,7 @@ static int snd_bcm2835_pcm_lib_ioctl(struct snd_pcm_substream *substream, } /* operators */ -static struct snd_pcm_ops snd_bcm2835_playback_ops = { +static const struct snd_pcm_ops snd_bcm2835_playback_ops = { .open = snd_bcm2835_playback_open, .close = snd_bcm2835_playback_close, .ioctl = snd_bcm2835_pcm_lib_ioctl, @@ -450,7 +450,7 @@ static struct snd_pcm_ops snd_bcm2835_playback_ops = { .ack = snd_bcm2835_pcm_ack, }; -static struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = { +static const struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = { .open = snd_bcm2835_playback_spdif_open, .close = snd_bcm2835_playback_close, .ioctl = snd_bcm2835_pcm_lib_ioctl, diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c index 7fa0310e7b9e..2e52f07bbaa9 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c @@ -51,7 +51,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size) sema_init(&queue->pop, 0); sema_init(&queue->push, 0); - queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL); + queue->storage = kcalloc(size, sizeof(VCHIQ_HEADER_T *), GFP_KERNEL); if (!queue->storage) { vchiu_queue_delete(queue); return 0; diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c index f5db2b3d9045..14034e342aa6 100644 --- a/drivers/staging/vt6655/card.c +++ b/drivers/staging/vt6655/card.c @@ -649,19 +649,19 @@ static unsigned short CARDwGetOFDMControlRate(struct vnt_private *priv, pr_debug("BASIC RATE: %X\n", priv->basic_rates); if (!CARDbIsOFDMinBasicRate((void *)priv)) { - pr_debug("CARDwGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); + pr_debug("%s:(NO OFDM) %d\n", __func__, wRateIdx); if (wRateIdx > RATE_24M) wRateIdx = RATE_24M; return wRateIdx; } while (ui > RATE_11M) { if (priv->basic_rates & ((u32)0x1 << ui)) { - pr_debug("CARDwGetOFDMControlRate : %d\n", ui); + pr_debug("%s : %d\n", __func__, ui); return (unsigned short)ui; } ui--; } - pr_debug("CARDwGetOFDMControlRate: 6M\n"); + pr_debug("%s: 6M\n", __func__); return (unsigned short)RATE_24M; } diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c index 4aaa99bafcda..f7550b215f72 100644 --- a/drivers/staging/vt6655/mac.c +++ b/drivers/staging/vt6655/mac.c @@ -809,7 +809,7 @@ void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl, if (byLocalID <= 1) return; - pr_debug("MACvSetKeyEntry\n"); + pr_debug("%s\n", __func__); offset = MISCFIFO_KEYETRY0; offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE); diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index 4832666cc580..74715c854856 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -83,7 +83,7 @@ #define CONFIG_PATH "/etc/vntconfiguration.dat" #define MAX_UINTS 8 -#define OPTION_DEFAULT { [0 ... MAX_UINTS-1] = -1} +#define OPTION_DEFAULT { [0 ... MAX_UINTS - 1] = -1} #define DUPLICATE_RX_CACHE_LENGTH 5 diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c index 282f665aacfa..093a6048bd22 100644 --- a/drivers/staging/vt6656/firmware.c +++ b/drivers/staging/vt6656/firmware.c @@ -65,7 +65,7 @@ int vnt_download_firmware(struct vnt_private *priv) status = vnt_control_out(priv, 0, - 0x1200+ii, + 0x1200 + ii, 0x0000, length, buffer); diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h index 906d3454591d..cfc6c2131536 100644 --- a/drivers/staging/vt6656/key.h +++ b/drivers/staging/vt6656/key.h @@ -46,6 +46,6 @@ int vnt_key_init_table(struct vnt_private *priv); int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, - struct ieee80211_vif *vif, struct ieee80211_key_conf *key); + struct ieee80211_vif *vif, struct ieee80211_key_conf *key); #endif /* __KEY_H__ */ diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 095b85567306..cc6d8778fe5b 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -419,8 +419,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) int ii; for (ii = 0; ii < priv->num_tx_context; ii++) { - tx_context = kmalloc(sizeof(struct vnt_usb_send_context), - GFP_KERNEL); + tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL); if (!tx_context) goto free_tx; @@ -437,7 +436,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) } for (ii = 0; ii < priv->num_rcb; ii++) { - priv->rcb[ii] = kzalloc(sizeof(struct vnt_rcb), GFP_KERNEL); + priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL); if (!priv->rcb[ii]) { dev_err(&priv->usb->dev, "failed to allocate rcb no %d\n", ii); diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c index e322b7d8c617..c466e0614bc4 100644 --- a/drivers/staging/vt6656/power.c +++ b/drivers/staging/vt6656/power.c @@ -74,16 +74,15 @@ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval) vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_GO2DOZE); if (listen_interval >= 2) { - /* clear always listen beacon */ vnt_mac_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_ALBCN); /* first time set listen next beacon */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_LNBCN); - } else - + } else { /* always listen beacon */ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN); + } dev_dbg(&priv->usb->dev, "PS:Power Saving Mode Enable...\n"); } @@ -100,7 +99,6 @@ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval) void vnt_disable_power_saving(struct vnt_private *priv) { - /* disable power saving hw function */ vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0, 0, 0, NULL); diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index 23581afb4211..3a9d19a0b842 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -611,7 +611,7 @@ int vnt_rf_write_embedded(struct vnt_private *priv, u32 data) reg_data[3] = (u8)(data >> 24); vnt_control_out(priv, MESSAGE_TYPE_WRITE_IFRF, - 0, 0, ARRAY_SIZE(reg_data), reg_data); + 0, 0, ARRAY_SIZE(reg_data), reg_data); return true; } @@ -643,9 +643,9 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel) case RATE_48M: case RATE_54M: if (channel > CB_MAX_CHANNEL_24G) - power = priv->ofdm_a_pwr_tbl[channel-15]; + power = priv->ofdm_a_pwr_tbl[channel - 15]; else - power = priv->ofdm_pwr_tbl[channel-1]; + power = priv->ofdm_pwr_tbl[channel - 1]; break; } diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index dc11a05be8c4..23eaef458556 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -44,7 +44,7 @@ #define USB_CTL_WAIT 500 /* ms */ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, - u16 index, u16 length, u8 *buffer) + u16 index, u16 length, u8 *buffer) { int status = 0; u8 *usb_buffer; @@ -82,7 +82,7 @@ void vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data) } int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, - u16 index, u16 length, u8 *buffer) + u16 index, u16 length, u8 *buffer) { int status; u8 *usb_buffer; diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 2568dfc15181..7b620658ec38 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -1963,7 +1963,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif, wilc_get_vif_idx(vif)); if (result) { - netdev_err(vif->ndev, "Failed to SET incative time\n"); + netdev_err(vif->ndev, "Failed to SET inactive time\n"); return -EFAULT; } @@ -1976,7 +1976,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif, wilc_get_vif_idx(vif)); if (result) { - netdev_err(vif->ndev, "Failed to get incative time\n"); + netdev_err(vif->ndev, "Failed to get inactive time\n"); return -EFAULT; } diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index dbb3e24615be..119f3459b5bb 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -283,7 +283,8 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) static int linux_wlan_txq_task(void *vp) { - int ret, txq_count; + int ret; + u32 txq_count; struct wilc_vif *vif; struct wilc *wl; struct net_device *dev = vp; @@ -812,7 +813,7 @@ _fail_wilc_wlan_: wilc_wlan_cleanup(dev); _fail_locks_: wlan_deinit_locks(dev); - netdev_err(dev, "WLAN Iinitialization FAILED\n"); + netdev_err(dev, "WLAN initialization FAILED\n"); } else { netdev_dbg(dev, "wilc1000 already initialized\n"); } diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 68fd5b3b8b2d..ac5aaafa461c 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -214,48 +214,39 @@ static u32 get_rssi_avg(struct network_info *network_info) return rssi_v; } -static void refresh_scan(void *user_void, u8 all, bool direct_scan) +static void refresh_scan(struct wilc_priv *priv, bool direct_scan) { - struct wilc_priv *priv; - struct wiphy *wiphy; - struct cfg80211_bss *bss = NULL; + struct wiphy *wiphy = priv->dev->ieee80211_ptr->wiphy; int i; - int rssi = 0; - - priv = user_void; - wiphy = priv->dev->ieee80211_ptr->wiphy; for (i = 0; i < last_scanned_cnt; i++) { struct network_info *network_info; + s32 freq; + struct ieee80211_channel *channel; + int rssi; + struct cfg80211_bss *bss; network_info = &last_scanned_shadow[i]; - if (!network_info->found || all) { - s32 freq; - struct ieee80211_channel *channel; - - if (network_info) { - freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ); - channel = ieee80211_get_channel(wiphy, freq); - - rssi = get_rssi_avg(network_info); - if (memcmp("DIRECT-", network_info->ssid, 7) || - direct_scan) { - bss = cfg80211_inform_bss(wiphy, - channel, - CFG80211_BSS_FTYPE_UNKNOWN, - network_info->bssid, - network_info->tsf_hi, - network_info->cap_info, - network_info->beacon_period, - (const u8 *)network_info->ies, - (size_t)network_info->ies_len, - (s32)rssi * 100, - GFP_KERNEL); - cfg80211_put_bss(wiphy, bss); - } - } - } + if (!memcmp("DIRECT-", network_info->ssid, 7) && !direct_scan) + continue; + + freq = ieee80211_channel_to_frequency((s32)network_info->ch, + NL80211_BAND_2GHZ); + channel = ieee80211_get_channel(wiphy, freq); + rssi = get_rssi_avg(network_info); + bss = cfg80211_inform_bss(wiphy, + channel, + CFG80211_BSS_FTYPE_UNKNOWN, + network_info->bssid, + network_info->tsf_hi, + network_info->cap_info, + network_info->beacon_period, + (const u8 *)network_info->ies, + (size_t)network_info->ies_len, + (s32)rssi * 100, + GFP_KERNEL); + cfg80211_put_bss(wiphy, bss); } } @@ -442,7 +433,7 @@ static void CfgScanResult(enum scan_event scan_event, } } } else if (scan_event == SCAN_EVENT_DONE) { - refresh_scan(priv, 1, false); + refresh_scan(priv, false); mutex_lock(&priv->scan_req_lock); @@ -466,7 +457,7 @@ static void CfgScanResult(enum scan_event scan_event, }; update_scan_time(); - refresh_scan(priv, 1, false); + refresh_scan(priv, false); cfg80211_scan_done(priv->pstrScanReq, &info); priv->bCfgScanning = false; @@ -540,7 +531,7 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent, } if (bNeedScanRefresh) - refresh_scan(priv, 1, true); + refresh_scan(priv, true); } cfg80211_connect_result(dev, pstrConnectInfo->bssid, diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h index c89bf4301096..7a36561a599e 100644 --- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h +++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h @@ -227,8 +227,8 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif); void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset); void wilc_mac_indicate(struct wilc *wilc, int flag); void wilc_netdev_cleanup(struct wilc *wilc); -int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio, - const struct wilc_hif_func *ops); +int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, + int gpio, const struct wilc_hif_func *ops); void wilc1000_wlan_deinit(struct net_device *dev); void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size); int wilc_wlan_get_firmware(struct net_device *dev); diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h index 018db2299d0c..cb5afeffb234 100644 --- a/drivers/staging/wlan-ng/hfa384x.h +++ b/drivers/staging/wlan-ng/hfa384x.h @@ -413,8 +413,8 @@ struct hfa384x_join_request_data { /*-- Configuration Record: authenticateStation (data portion only) --*/ struct hfa384x_authenticate_station_data { u8 address[ETH_ALEN]; - u16 status; - u16 algorithm; + __le16 status; + __le16 algorithm; } __packed; /*-- Configuration Record: WPAData (data portion only) --*/ @@ -445,9 +445,9 @@ struct hfa384x_downloadbuffer { /*-- Information Record: commsquality --*/ struct hfa384x_commsquality { - u16 cq_curr_bss; - u16 asl_curr_bss; - u16 anl_curr_fc; + __le16 cq_curr_bss; + __le16 asl_curr_bss; + __le16 anl_curr_fc; } __packed; /*-- Information Record: dmbcommsquality --*/ @@ -598,51 +598,51 @@ struct hfa384x_rx_frame { /*-- Inquiry Frame, Diagnose: Communication Tallies --*/ struct hfa384x_comm_tallies_16 { - u16 txunicastframes; - u16 txmulticastframes; - u16 txfragments; - u16 txunicastoctets; - u16 txmulticastoctets; - u16 txdeferredtrans; - u16 txsingleretryframes; - u16 txmultipleretryframes; - u16 txretrylimitexceeded; - u16 txdiscards; - u16 rxunicastframes; - u16 rxmulticastframes; - u16 rxfragments; - u16 rxunicastoctets; - u16 rxmulticastoctets; - u16 rxfcserrors; - u16 rxdiscardsnobuffer; - u16 txdiscardswrongsa; - u16 rxdiscardswepundecr; - u16 rxmsginmsgfrag; - u16 rxmsginbadmsgfrag; + __le16 txunicastframes; + __le16 txmulticastframes; + __le16 txfragments; + __le16 txunicastoctets; + __le16 txmulticastoctets; + __le16 txdeferredtrans; + __le16 txsingleretryframes; + __le16 txmultipleretryframes; + __le16 txretrylimitexceeded; + __le16 txdiscards; + __le16 rxunicastframes; + __le16 rxmulticastframes; + __le16 rxfragments; + __le16 rxunicastoctets; + __le16 rxmulticastoctets; + __le16 rxfcserrors; + __le16 rxdiscardsnobuffer; + __le16 txdiscardswrongsa; + __le16 rxdiscardswepundecr; + __le16 rxmsginmsgfrag; + __le16 rxmsginbadmsgfrag; } __packed; struct hfa384x_comm_tallies_32 { - u32 txunicastframes; - u32 txmulticastframes; - u32 txfragments; - u32 txunicastoctets; - u32 txmulticastoctets; - u32 txdeferredtrans; - u32 txsingleretryframes; - u32 txmultipleretryframes; - u32 txretrylimitexceeded; - u32 txdiscards; - u32 rxunicastframes; - u32 rxmulticastframes; - u32 rxfragments; - u32 rxunicastoctets; - u32 rxmulticastoctets; - u32 rxfcserrors; - u32 rxdiscardsnobuffer; - u32 txdiscardswrongsa; - u32 rxdiscardswepundecr; - u32 rxmsginmsgfrag; - u32 rxmsginbadmsgfrag; + __le32 txunicastframes; + __le32 txmulticastframes; + __le32 txfragments; + __le32 txunicastoctets; + __le32 txmulticastoctets; + __le32 txdeferredtrans; + __le32 txsingleretryframes; + __le32 txmultipleretryframes; + __le32 txretrylimitexceeded; + __le32 txdiscards; + __le32 rxunicastframes; + __le32 rxmulticastframes; + __le32 rxfragments; + __le32 rxunicastoctets; + __le32 rxmulticastoctets; + __le32 rxfcserrors; + __le32 rxdiscardsnobuffer; + __le32 txdiscardswrongsa; + __le32 rxdiscardswepundecr; + __le32 rxmsginmsgfrag; + __le32 rxmsginbadmsgfrag; } __packed; /*-- Inquiry Frame, Diagnose: Scan Results & Subfields--*/ @@ -733,13 +733,13 @@ struct hfa384x_assoc_status { struct hfa384x_auth_request { u8 sta_addr[ETH_ALEN]; - u16 algorithm; + __le16 algorithm; } __packed; /*-- Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/ struct hfa384x_ps_user_count { - u16 usercnt; + __le16 usercnt; } __packed; struct hfa384x_key_id_changed { diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index 021fb23ae9ba..0f503652740f 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -258,7 +258,7 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev, return 0; } - netdev_dbg(wlandev->netdev, "p80211_convert_to_ether failed.\n"); + netdev_dbg(wlandev->netdev, "%s failed.\n", __func__); return CONV_TO_ETHER_FAILED; } diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c index 1a0c786c7616..344bec8cc31b 100644 --- a/drivers/staging/wlan-ng/prism2fw.c +++ b/drivers/staging/wlan-ng/prism2fw.c @@ -1016,7 +1016,8 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, kfree(rstmsg); kfree(rwrmsg); netdev_err(wlandev->netdev, - "writeimage: no memory for firmware download, aborting download\n"); + "%s: no memory for firmware download, aborting download\n", + __func__); return -ENOMEM; } @@ -1058,15 +1059,15 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, result = prism2mgmt_ramdl_state(wlandev, rstmsg); if (result) { netdev_err(wlandev->netdev, - "writeimage state enable failed w/ result=%d, aborting download\n", - result); + "%s state enable failed w/ result=%d, aborting download\n", + __func__, result); goto free_result; } resultcode = rstmsg->resultcode.data; if (resultcode != P80211ENUM_resultcode_success) { netdev_err(wlandev->netdev, - "writeimage()->xxxdl_state msg indicates failure, w/ resultcode=%d, aborting download.\n", - resultcode); + "%s()->xxxdl_state msg indicates failure, w/ resultcode=%d, aborting download.\n", + __func__, resultcode); result = 1; goto free_result; } @@ -1102,14 +1103,14 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, /* Check the results */ if (result) { netdev_err(wlandev->netdev, - "writeimage chunk write failed w/ result=%d, aborting download\n", - result); + "%s chunk write failed w/ result=%d, aborting download\n", + __func__, result); goto free_result; } resultcode = rstmsg->resultcode.data; if (resultcode != P80211ENUM_resultcode_success) { - pr_err("writeimage()->xxxdl_write msg indicates failure, w/ resultcode=%d, aborting download.\n", - resultcode); + pr_err("%s()->xxxdl_write msg indicates failure, w/ resultcode=%d, aborting download.\n", + __func__, resultcode); result = 1; goto free_result; } @@ -1124,15 +1125,15 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk, result = prism2mgmt_ramdl_state(wlandev, rstmsg); if (result) { netdev_err(wlandev->netdev, - "writeimage state disable failed w/ result=%d, aborting download\n", - result); + "%s state disable failed w/ result=%d, aborting download\n", + __func__, result); goto free_result; } resultcode = rstmsg->resultcode.data; if (resultcode != P80211ENUM_resultcode_success) { netdev_err(wlandev->netdev, - "writeimage()->xxxdl_state msg indicates failure, w/ resultcode=%d, aborting download.\n", - resultcode); + "%s()->xxxdl_state msg indicates failure, w/ resultcode=%d, aborting download.\n", + __func__, resultcode); result = 1; goto free_result; } diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c index e16da34389cd..8c347128e810 100644 --- a/drivers/staging/wlan-ng/prism2sta.c +++ b/drivers/staging/wlan-ng/prism2sta.c @@ -991,9 +991,9 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev, struct hfa384x_inf_frame *inf) { struct hfa384x *hw = wlandev->priv; - u16 *src16; + __le16 *src16; u32 *dst; - u32 *src32; + __le32 *src32; int i; int cnt; @@ -1005,12 +1005,12 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev, cnt = sizeof(struct hfa384x_comm_tallies_32) / sizeof(u32); if (inf->framelen > 22) { dst = (u32 *)&hw->tallies; - src32 = (u32 *)&inf->info.commtallies32; + src32 = (__le32 *)&inf->info.commtallies32; for (i = 0; i < cnt; i++, dst++, src32++) *dst += le32_to_cpu(*src32); } else { dst = (u32 *)&hw->tallies; - src16 = (u16 *)&inf->info.commtallies16; + src16 = (__le16 *)&inf->info.commtallies16; for (i = 0; i < cnt; i++, dst++, src16++) *dst += le16_to_cpu(*src16); } @@ -1561,7 +1561,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, */ ether_addr_copy(rec.address, inf->info.authreq.sta_addr); - rec.status = P80211ENUM_status_unspec_failure; + rec.status = cpu_to_le16(P80211ENUM_status_unspec_failure); /* * Authenticate based on the access mode. @@ -1578,7 +1578,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, for (i = 0; i < hw->authlist.cnt; i++) if (ether_addr_equal(rec.address, hw->authlist.addr[i])) { - rec.status = P80211ENUM_status_successful; + rec.status = cpu_to_le16(P80211ENUM_status_successful); break; } @@ -1590,7 +1590,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, * Allow all authentications. */ - rec.status = P80211ENUM_status_successful; + rec.status = cpu_to_le16(P80211ENUM_status_successful); break; case WLAN_ACCESS_ALLOW: @@ -1615,7 +1615,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, for (i = 0; i < cnt; i++, addr += ETH_ALEN) if (ether_addr_equal(rec.address, addr)) { - rec.status = P80211ENUM_status_successful; + rec.status = cpu_to_le16(P80211ENUM_status_successful); break; } @@ -1641,11 +1641,11 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, addr = hw->deny.addr1[0]; } - rec.status = P80211ENUM_status_successful; + rec.status = cpu_to_le16(P80211ENUM_status_successful); for (i = 0; i < cnt; i++, addr += ETH_ALEN) if (ether_addr_equal(rec.address, addr)) { - rec.status = P80211ENUM_status_unspec_failure; + rec.status = cpu_to_le16(P80211ENUM_status_unspec_failure); break; } @@ -1663,7 +1663,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, added = 0; - if (rec.status == P80211ENUM_status_successful) { + if (rec.status == cpu_to_le16(P80211ENUM_status_successful)) { for (i = 0; i < hw->authlist.cnt; i++) if (ether_addr_equal(rec.address, hw->authlist.addr[i])) @@ -1671,7 +1671,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, if (i >= hw->authlist.cnt) { if (hw->authlist.cnt >= WLAN_AUTH_MAX) { - rec.status = P80211ENUM_status_ap_full; + rec.status = cpu_to_le16(P80211ENUM_status_ap_full); } else { ether_addr_copy( hw->authlist.addr[hw->authlist.cnt], @@ -1688,7 +1688,6 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev, * it was added. */ - rec.status = cpu_to_le16(rec.status); rec.algorithm = inf->info.authreq.algorithm; result = hfa384x_drvr_setconfig(hw, HFA384x_RID_AUTHENTICATESTA, diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 97f1b465d04f..7b0fa8b5c120 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -332,4 +332,16 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, ssize_t st_sensors_sysfs_scale_avail(struct device *dev, struct device_attribute *attr, char *buf); +#ifdef CONFIG_OF +void st_sensors_of_name_probe(struct device *dev, + const struct of_device_id *match, + char *name, int len); +#else +static inline void st_sensors_of_name_probe(struct device *dev, + const struct of_device_id *match, + char *name, int len) +{ +} +#endif + #endif /* ST_SENSORS_H */ diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 254de3c7dde8..0a2c25e06d1f 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -18,16 +18,6 @@ void st_sensors_i2c_configure(struct iio_dev *indio_dev, struct i2c_client *client, struct st_sensor_data *sdata); -#ifdef CONFIG_OF -void st_sensors_of_i2c_probe(struct i2c_client *client, - const struct of_device_id *match); -#else -static inline void st_sensors_of_i2c_probe(struct i2c_client *client, - const struct of_device_id *match) -{ -} -#endif - #ifdef CONFIG_ACPI int st_sensors_match_acpi_device(struct device *dev); #else diff --git a/include/linux/msi.h b/include/linux/msi.h index df6d59201d31..80e3b562bef6 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -66,6 +66,7 @@ struct fsl_mc_msi_desc { * @mask_pos: [PCI MSI] Mask register position * @mask_base: [PCI MSI-X] Mask register base address * @platform: [platform] Platform device specific msi descriptor data + * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data */ struct msi_desc { /* Shared device/bus type independent data */ |