From 8bea9af887de4c99a95f93f2ce400ef63e8b4e9b Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 22 Mar 2022 12:50:27 +0200 Subject: iio: adc: ad_sigma_delta: Add sequencer support Some sigma-delta chips support sampling of multiple channels in continuous mode. When the operating with more than one channel enabled, the channel sequencer cycles through the enabled channels in sequential order, from first channel to the last one. If a channel is disabled, it is skipped by the sequencer. If more than one channel is used in continuous mode, instruct the device to append the status to the SPI transfer (1 extra byte) every time we receive a sample. All sigma-delta chips possessing a sampling sequencer have this ability. Inside the status register there will be the number of the converted channel. In this way, even if the CPU won't keep up with the sampling rate, it won't send to userspace wrong channel samples. When multiple channels are enabled in continuous mode, the device needs to perform a measurement on all slots before we can push to userspace the sample. If, during sequencing and data reading, a channel measurement is lost, a desync occurred. In this case, ad_sigma_delta drops the incomplete sample and waits for the device to send the measurement on the first active slot. Co-developed-by: Alexandru Tachici Signed-off-by: Alexandru Tachici Signed-off-by: Lars-Peter Clausen Link: https://lore.kernel.org/r/20220322105029.86389-5-alexandru.tachici@analog.com Signed-off-by: Jonathan Cameron --- include/linux/iio/adc/ad_sigma_delta.h | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'include') diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index c525fd51652f..7852f6c9a714 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -32,26 +32,34 @@ struct iio_dev; /** * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options * @set_channel: Will be called to select the current channel, may be NULL. + * @append_status: Will be called to enable status append at the end of the sample, may be NULL. * @set_mode: Will be called to select the current mode, may be NULL. + * @disable_all: Will be called to disable all channels, may be NULL. * @postprocess_sample: Is called for each sampled data word, can be used to * modify or drop the sample data, it, may be NULL. * @has_registers: true if the device has writable and readable registers, false * if there is just one read-only sample data shift register. * @addr_shift: Shift of the register address in the communications register. * @read_mask: Mask for the communications register having the read bit set. + * @status_ch_mask: Mask for the channel number stored in status register. * @data_reg: Address of the data register, if 0 the default address of 0x3 will * be used. * @irq_flags: flags for the interrupt used by the triggered buffer + * @num_slots: Number of sequencer slots */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); + int (*append_status)(struct ad_sigma_delta *, bool append); int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode); + int (*disable_all)(struct ad_sigma_delta *); int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample); bool has_registers; unsigned int addr_shift; unsigned int read_mask; + unsigned int status_ch_mask; unsigned int data_reg; unsigned long irq_flags; + unsigned int num_slots; }; /** @@ -76,6 +84,13 @@ struct ad_sigma_delta { uint8_t comm; const struct ad_sigma_delta_info *info; + unsigned int active_slots; + unsigned int current_slot; + unsigned int num_slots; + bool status_appended; + /* map slots to channels in order to know what to expect from devices */ + unsigned int *slots; + uint8_t *samples_buf; /* * DMA (thus cache coherency maintenance) requires the @@ -97,6 +112,29 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd, return 0; } +static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append) +{ + int ret; + + if (sd->info->append_status) { + ret = sd->info->append_status(sd, append); + if (ret < 0) + return ret; + + sd->status_appended = append; + } + + return 0; +} + +static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd) +{ + if (sd->info->disable_all) + return sd->info->disable_all(sd); + + return 0; +} + static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd, unsigned int mode) { -- cgit v1.2.3 From a8b6d6708bb682108d8c899bc0cb7873240daf8a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:28 +0100 Subject: iio: core: Enhance the kernel doc of modes and currentmodes iio_dev entries Let's provide more details about these two variables because their understanding may not be straightforward for someone not used to the IIO subsystem internal logic. The different modes will soon be also be more documented for the same reason. Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220207143840.707510-2-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index faf00f2c0be6..f191b80466cd 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -488,8 +488,15 @@ struct iio_buffer_setup_ops { /** * struct iio_dev - industrial I/O device - * @modes: [DRIVER] operating modes supported by device - * @currentmode: [INTERN] current operating mode + * @modes: [DRIVER] bitmask listing all the operating modes + * supported by the IIO device. This list should be + * initialized before registering the IIO device. It can + * also be filed up by the IIO core, as a result of + * enabling particular features in the driver + * (see iio_triggered_event_setup()). + * @currentmode: [INTERN] operating mode currently in use, may be + * eventually checked by device drivers but should be + * considered read-only as this is a core internal bit * @dev: [DRIVER] device structure, should be assigned a parent * and owner * @buffer: [DRIVER] any buffer present -- cgit v1.2.3 From 474010127e2505fc463236470908e1ff5ddb3578 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:33 +0100 Subject: iio: st_sensors: Add a local lock for protecting odr Right now the (framework) mlock lock is (ab)used for multiple purposes: 1- protecting concurrent accesses over the odr local cache 2- avoid changing samplig frequency whilst buffer is running Let's start by handling situation #1 with a local lock. Suggested-by: Jonathan Cameron Cc: Denis Ciocca Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220207143840.707510-7-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- drivers/iio/common/st_sensors/st_sensors_core.c | 24 ++++++++++++++++++------ include/linux/iio/common/st_sensors.h | 3 +++ 2 files changed, 21 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 829ffe54d8a1..0cc66c82d58a 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -71,16 +71,18 @@ st_sensors_match_odr_error: int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) { - int err; + int err = 0; struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_data *sdata = iio_priv(indio_dev); + mutex_lock(&sdata->odr_lock); + if (!sdata->sensor_settings->odr.mask) - return 0; + goto unlock_mutex; err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); if (err < 0) - goto st_sensors_match_odr_error; + goto unlock_mutex; if ((sdata->sensor_settings->odr.addr == sdata->sensor_settings->pw.addr) && @@ -103,7 +105,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) if (err >= 0) sdata->odr = odr_out.hz; -st_sensors_match_odr_error: +unlock_mutex: + mutex_unlock(&sdata->odr_lock); + return err; } EXPORT_SYMBOL_NS(st_sensors_set_odr, IIO_ST_SENSORS); @@ -361,6 +365,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *of_pdata; int err = 0; + mutex_init(&sdata->odr_lock); + /* If OF/DT pdata exists, it will take precedence of anything else */ of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); if (IS_ERR(of_pdata)) @@ -554,18 +560,24 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, err = -EBUSY; goto out; } else { + mutex_lock(&sdata->odr_lock); err = st_sensors_set_enable(indio_dev, true); - if (err < 0) + if (err < 0) { + mutex_unlock(&sdata->odr_lock); goto out; + } msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr); err = st_sensors_read_axis_data(indio_dev, ch, val); - if (err < 0) + if (err < 0) { + mutex_unlock(&sdata->odr_lock); goto out; + } *val = *val >> ch->scan_type.shift; err = st_sensors_set_enable(indio_dev, false); + mutex_unlock(&sdata->odr_lock); } out: mutex_unlock(&indio_dev->mlock); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 22f67845cdd3..db4a1b260348 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -237,6 +237,7 @@ struct st_sensor_settings { * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. * @buffer_data: Data used by buffer part. + * @odr_lock: Local lock for preventing concurrent ODR accesses/changes */ struct st_sensor_data { struct iio_trigger *trig; @@ -261,6 +262,8 @@ struct st_sensor_data { s64 hw_timestamp; char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; + + struct mutex odr_lock; }; #ifdef CONFIG_IIO_BUFFER -- cgit v1.2.3 From 2f53b4adfede66f1bc1c8bb7efd7ced2bad1191a Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:36 +0100 Subject: iio: Un-inline iio_buffer_enabled() As we are going to hide the currentmode inside the opaque structure, this helper would soon need to call a non-inline function which would simply drop the benefit of having the helper defined inline in a header. One alternative is to move this helper in the core as there is no more interest in defining it inline in a header. We will pay the minor cost either way. Let's do like the iio_device_id() helper which also refers to the opaque structure and gets defined in the core. Suggested-by: Jonathan Cameron Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220207143840.707510-10-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-core.c | 12 ++++++++++++ include/linux/iio/iio.h | 11 +---------- 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 2f48e9a97274..c91930244915 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -184,6 +184,18 @@ int iio_device_id(struct iio_dev *indio_dev) } EXPORT_SYMBOL_GPL(iio_device_id); +/** + * iio_buffer_enabled() - helper function to test if the buffer is enabled + * @indio_dev: IIO device structure for device + */ +bool iio_buffer_enabled(struct iio_dev *indio_dev) +{ + return indio_dev->currentmode + & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | + INDIO_BUFFER_SOFTWARE); +} +EXPORT_SYMBOL_GPL(iio_buffer_enabled); + /** * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps * @array: array of strings diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f191b80466cd..faabb852128a 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -550,6 +550,7 @@ struct iio_dev { }; int iio_device_id(struct iio_dev *indio_dev); +bool iio_buffer_enabled(struct iio_dev *indio_dev); const struct iio_chan_spec *iio_find_channel_from_si(struct iio_dev *indio_dev, int si); @@ -679,16 +680,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); __printf(2, 3) struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fmt, ...); -/** - * iio_buffer_enabled() - helper function to test if the buffer is enabled - * @indio_dev: IIO device structure for device - **/ -static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) -{ - return indio_dev->currentmode - & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | - INDIO_BUFFER_SOFTWARE); -} /** * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry -- cgit v1.2.3 From 8c576f87ad7eb639b8bd4472a9bb830e0696dda5 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:37 +0100 Subject: iio: core: Hide read accesses to iio_dev->currentmode In order to later move this variable within the opaque structure, let's create a helper for accessing it in read-only mode. This helper will be exposed to device drivers and kept accessible for the few that could need it. The write access to this variable however should be fully reserved to the core so in a second step we will hide this variable into the opaque structure. Cc: Eugen Hristev Cc: Nicolas Ferre Cc: Alexandre Belloni Cc: Ludovic Desroches Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220207143840.707510-11-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- drivers/iio/accel/bmc150-accel-core.c | 4 ++-- drivers/iio/adc/at91-sama5d2_adc.c | 4 ++-- drivers/iio/industrialio-core.c | 11 +++++++++++ include/linux/iio/iio.h | 1 + 4 files changed, 16 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 7516d7dde1af..57e8a8350cd1 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1525,7 +1525,7 @@ static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev) struct bmc150_accel_data *data = iio_priv(indio_dev); int ret = 0; - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) + if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED) return 0; mutex_lock(&data->mutex); @@ -1557,7 +1557,7 @@ static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev) { struct bmc150_accel_data *data = iio_priv(indio_dev); - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) + if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED) return 0; mutex_lock(&data->mutex); diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 854b1f81d807..b764823ce57e 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -1117,7 +1117,7 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev) return at91_adc_configure_touch(st, true); /* if we are not in triggered mode, we cannot enable the buffer. */ - if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES)) + if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES)) return -EINVAL; /* we continue with the triggered buffer */ @@ -1159,7 +1159,7 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev) return at91_adc_configure_touch(st, false); /* if we are not in triggered mode, nothing to do here */ - if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES)) + if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES)) return -EINVAL; /* diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index c91930244915..fa1e00bee787 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -2070,6 +2070,17 @@ void iio_device_release_direct_mode(struct iio_dev *indio_dev) } EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); +/** + * iio_device_get_current_mode() - helper function providing read-only access to + * the @currentmode variable + * @indio_dev: IIO device structure for device + */ +int iio_device_get_current_mode(struct iio_dev *indio_dev) +{ + return indio_dev->currentmode; +} +EXPORT_SYMBOL_GPL(iio_device_get_current_mode); + subsys_initcall(iio_init); module_exit(iio_exit); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index faabb852128a..31098ffa7dc9 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -550,6 +550,7 @@ struct iio_dev { }; int iio_device_id(struct iio_dev *indio_dev); +int iio_device_get_current_mode(struct iio_dev *indio_dev); bool iio_buffer_enabled(struct iio_dev *indio_dev); const struct iio_chan_spec -- cgit v1.2.3 From 51570c9d4b3a678f77a50ac139f67290e946ec86 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:38 +0100 Subject: iio: core: Move the currentmode entry to the opaque structure This entry should, under no situation, be modified by device drivers. Now that we have limited its read access to device drivers really needing it and did so through a dedicated helper, we can easily move this variable to the opaque structure in order to prevent any further modification from non-authorized code (out of the core, basically). Signed-off-by: Miquel Raynal Reviewed-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20220207143840.707510-12-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-buffer.c | 12 ++++++------ drivers/iio/industrialio-core.c | 10 +++++++--- drivers/iio/industrialio-trigger.c | 2 +- include/linux/iio/iio-opaque.h | 4 ++++ include/linux/iio/iio.h | 4 ---- 5 files changed, 18 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 4706d0e3c954..615662d75e68 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -1065,7 +1065,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, indio_dev->active_scan_mask = config->scan_mask; indio_dev->scan_timestamp = config->scan_timestamp; indio_dev->scan_bytes = config->scan_bytes; - indio_dev->currentmode = config->mode; + iio_dev_opaque->currentmode = config->mode; iio_update_demux(indio_dev); @@ -1103,7 +1103,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, } } - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { ret = iio_trigger_attach_poll_func(indio_dev->trig, indio_dev->pollfunc); if (ret) @@ -1122,7 +1122,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev, return 0; err_detach_pollfunc: - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { iio_trigger_detach_poll_func(indio_dev->trig, indio_dev->pollfunc); } @@ -1135,7 +1135,7 @@ err_run_postdisable: if (indio_dev->setup_ops->postdisable) indio_dev->setup_ops->postdisable(indio_dev); err_undo_config: - indio_dev->currentmode = INDIO_DIRECT_MODE; + iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; indio_dev->active_scan_mask = NULL; return ret; @@ -1165,7 +1165,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) ret = ret2; } - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { iio_trigger_detach_poll_func(indio_dev->trig, indio_dev->pollfunc); } @@ -1184,7 +1184,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev) iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); indio_dev->active_scan_mask = NULL; - indio_dev->currentmode = INDIO_DIRECT_MODE; + iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; return ret; } diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index fa1e00bee787..a8c4e85c2bb5 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -190,7 +190,9 @@ EXPORT_SYMBOL_GPL(iio_device_id); */ bool iio_buffer_enabled(struct iio_dev *indio_dev) { - return indio_dev->currentmode + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + return iio_dev_opaque->currentmode & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE); } @@ -2072,12 +2074,14 @@ EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); /** * iio_device_get_current_mode() - helper function providing read-only access to - * the @currentmode variable + * the opaque @currentmode variable * @indio_dev: IIO device structure for device */ int iio_device_get_current_mode(struct iio_dev *indio_dev) { - return indio_dev->currentmode; + struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); + + return iio_dev_opaque->currentmode; } EXPORT_SYMBOL_GPL(iio_device_get_current_mode); diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index f504ed351b3e..585b6cef8fcc 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -444,7 +444,7 @@ static ssize_t iio_trigger_write_current(struct device *dev, int ret; mutex_lock(&indio_dev->mlock); - if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { mutex_unlock(&indio_dev->mlock); return -EBUSY; } diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 2be12b7b5dc5..6b3586b3f952 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -7,6 +7,9 @@ * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information * @id: used to identify device internally + * @currentmode: operating mode currently in use, may be eventually + * checked by device drivers but should be considered + * read-only as this is a core internal bit * @driver_module: used to make it harder to undercut users * @info_exist_lock: lock to prevent use during removal * @trig_readonly: mark the current trigger immutable @@ -36,6 +39,7 @@ */ struct iio_dev_opaque { struct iio_dev indio_dev; + int currentmode; int id; struct module *driver_module; struct mutex info_exist_lock; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 31098ffa7dc9..85cb924debd9 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -494,9 +494,6 @@ struct iio_buffer_setup_ops { * also be filed up by the IIO core, as a result of * enabling particular features in the driver * (see iio_triggered_event_setup()). - * @currentmode: [INTERN] operating mode currently in use, may be - * eventually checked by device drivers but should be - * considered read-only as this is a core internal bit * @dev: [DRIVER] device structure, should be assigned a parent * and owner * @buffer: [DRIVER] any buffer present @@ -523,7 +520,6 @@ struct iio_buffer_setup_ops { */ struct iio_dev { int modes; - int currentmode; struct device dev; struct iio_buffer *buffer; -- cgit v1.2.3 From f67c6c73cb07a4778425a2064640333ef7bfa42b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:39 +0100 Subject: iio: core: Simplify the registration of kfifo buffers Among all the users of the kfifo buffers, no one uses the INDIO_BUFFER_HARDWARE mode. So let's take this as a general rule and simplify a little bit the internals - overall the documentation - by eliminating unused specific cases. Use the INDIO_BUFFER_SOFTWARE mode by default with kfifo buffers, which will basically mimic what all the "non direct" modes do. Cc: Benson Leung Cc: Guenter Roeck Cc: Jyoti Bhayana Cc: Jean-Baptiste Maneyrol Cc: Lorenzo Bianconi Cc: Michael Hennerich Cc: Greg Kroah-Hartman Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220207143840.707510-13-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- drivers/iio/accel/adxl367.c | 1 - drivers/iio/accel/fxls8962af-core.c | 1 - drivers/iio/accel/sca3000.c | 1 - drivers/iio/accel/ssp_accel_sensor.c | 1 - drivers/iio/adc/ina2xx-adc.c | 1 - drivers/iio/adc/ti_am335x_adc.c | 4 +--- drivers/iio/buffer/kfifo_buf.c | 10 +--------- drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c | 3 +-- drivers/iio/common/scmi_sensors/scmi_iio.c | 1 - drivers/iio/gyro/ssp_gyro_sensor.c | 1 - drivers/iio/health/max30100.c | 1 - drivers/iio/health/max30102.c | 1 - drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c | 1 - drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c | 1 - drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c | 1 - drivers/iio/light/apds9960.c | 1 - drivers/staging/iio/impedance-analyzer/ad5933.c | 1 - include/linux/iio/kfifo_buf.h | 5 ++--- 18 files changed, 5 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 62960134ea19..0289ed8cf2c6 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -1567,7 +1567,6 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops, return ret; ret = devm_iio_kfifo_buffer_setup_ext(st->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &adxl367_buffer_ops, adxl367_fifo_attributes); if (ret) diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index a9d2f10d5d45..8874d6d61725 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -1217,7 +1217,6 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq) return ret; ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &fxls8962af_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 83c81072511e..29a68a7d34cd 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -1474,7 +1474,6 @@ static int sca3000_probe(struct spi_device *spi) indio_dev->modes = INDIO_DIRECT_MODE; ret = devm_iio_kfifo_buffer_setup(&spi->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &sca3000_ring_setup_ops); if (ret) return ret; diff --git a/drivers/iio/accel/ssp_accel_sensor.c b/drivers/iio/accel/ssp_accel_sensor.c index a1164b439f41..7ca9d0d543e0 100644 --- a/drivers/iio/accel/ssp_accel_sensor.c +++ b/drivers/iio/accel/ssp_accel_sensor.c @@ -113,7 +113,6 @@ static int ssp_accel_probe(struct platform_device *pdev) indio_dev->available_scan_masks = ssp_accel_scan_mask; ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ssp_accel_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 8d902a32a0fd..08a2c547f0b3 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -1027,7 +1027,6 @@ static int ina2xx_probe(struct i2c_client *client, indio_dev->name = id->name; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ina2xx_setup_ops); if (ret) return ret; diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index dbdc1ef48566..567d43a30955 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -376,9 +376,7 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev, { int ret; - ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, - setup_ops); + ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops); if (ret) return ret; diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 416d35a61ae2..35d8b4077376 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -259,8 +259,6 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) * devm_iio_kfifo_buffer_setup_ext - Allocate a kfifo buffer & attach it to an IIO device * @dev: Device object to which to attach the life-time of this kfifo buffer * @indio_dev: The device the buffer should be attached to - * @mode_flags: The mode flags for this buffer (INDIO_BUFFER_SOFTWARE and/or - * INDIO_BUFFER_TRIGGERED). * @setup_ops: The setup_ops required to configure the HW part of the buffer (optional) * @buffer_attrs: Extra sysfs buffer attributes for this IIO buffer * @@ -271,22 +269,16 @@ static struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev) */ int devm_iio_kfifo_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, - int mode_flags, const struct iio_buffer_setup_ops *setup_ops, const struct attribute **buffer_attrs) { struct iio_buffer *buffer; - if (!mode_flags) - return -EINVAL; - buffer = devm_iio_kfifo_allocate(dev); if (!buffer) return -ENOMEM; - mode_flags &= kfifo_access_funcs.modes; - - indio_dev->modes |= mode_flags; + indio_dev->modes |= INDIO_BUFFER_SOFTWARE; indio_dev->setup_ops = setup_ops; buffer->attrs = buffer_attrs; diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index b2725c6adc7f..a4cf1d9a8a49 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -333,8 +333,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, * We can not use trigger here, as events are generated * as soon as sample_frequency is set. */ - ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, NULL, + ret = devm_iio_kfifo_buffer_setup_ext(dev, indio_dev, NULL, cros_ec_sensor_fifo_attributes); if (ret) return ret; diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c index d538bf3ab1ef..793d628db55f 100644 --- a/drivers/iio/common/scmi_sensors/scmi_iio.c +++ b/drivers/iio/common/scmi_sensors/scmi_iio.c @@ -686,7 +686,6 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev) err = devm_iio_kfifo_buffer_setup(&scmi_iio_dev->dev, scmi_iio_dev, - INDIO_BUFFER_SOFTWARE, &scmi_iio_buffer_ops); if (err < 0) { dev_err(dev, diff --git a/drivers/iio/gyro/ssp_gyro_sensor.c b/drivers/iio/gyro/ssp_gyro_sensor.c index 5fd1bf9902ea..d332474bc484 100644 --- a/drivers/iio/gyro/ssp_gyro_sensor.c +++ b/drivers/iio/gyro/ssp_gyro_sensor.c @@ -113,7 +113,6 @@ static int ssp_gyro_probe(struct platform_device *pdev) indio_dev->available_scan_masks = ssp_gyro_scan_mask; ret = devm_iio_kfifo_buffer_setup(&pdev->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ssp_gyro_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 36ba7611d9ce..ad5717965223 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c @@ -433,7 +433,6 @@ static int max30100_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &max30100_buffer_setup_ops); if (ret) return ret; diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 2292876c55e2..abbcef563807 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -542,7 +542,6 @@ static int max30102_probe(struct i2c_client *client, } ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &max30102_buffer_setup_ops); if (ret) return ret; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index 383cc3250342..c3f433ad3af6 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -731,7 +731,6 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st) indio_dev->available_scan_masks = inv_icm42600_accel_scan_masks; ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &inv_icm42600_buffer_ops); if (ret) return ERR_PTR(ret); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index cec1dd0e0464..9d94a8518e3c 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -743,7 +743,6 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st) indio_dev->setup_ops = &inv_icm42600_buffer_ops; ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &inv_icm42600_buffer_ops); if (ret) return ERR_PTR(ret); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 16730a780964..f80c62849d30 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -746,7 +746,6 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw) continue; ret = devm_iio_kfifo_buffer_setup(hw->dev, hw->iio_devs[i], - INDIO_BUFFER_SOFTWARE, &st_lsm6dsx_buffer_ops); if (ret) return ret; diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index 4141c0fa7bc4..09b831f9f40b 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -1003,7 +1003,6 @@ static int apds9960_probe(struct i2c_client *client, indio_dev->modes = INDIO_DIRECT_MODE; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &apds9960_buffer_setup_ops); if (ret) return ret; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 793918e1c45f..f177b20f0f2d 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -749,7 +749,6 @@ static int ad5933_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, - INDIO_BUFFER_SOFTWARE, &ad5933_ring_setup_ops); if (ret) return ret; diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index ccd2ceae7b25..8a83fb58232d 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -12,11 +12,10 @@ void iio_kfifo_free(struct iio_buffer *r); int devm_iio_kfifo_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, - int mode_flags, const struct iio_buffer_setup_ops *setup_ops, const struct attribute **buffer_attrs); -#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops) \ - devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL) +#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \ + devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL) #endif -- cgit v1.2.3 From 3abfaefb9a6dda5e0bfa6160f55abfd0e32d8b0a Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Tue, 19 Apr 2022 09:08:49 +0800 Subject: phy: Add LVDS configuration options This patch allows LVDS PHYs to be configured through the generic functions and through a custom structure added to the generic union. The parameters added here are based on common LVDS PHY implementation practices. The set of parameters should cover all potential users. Cc: Kishon Vijay Abraham I Cc: Vinod Koul Cc: NXP Linux Team Signed-off-by: Liu Ying Link: https://lore.kernel.org/r/20220419010852.452169-3-victor.liu@nxp.com Signed-off-by: Vinod Koul --- include/linux/phy/phy-lvds.h | 32 ++++++++++++++++++++++++++++++++ include/linux/phy/phy.h | 4 ++++ 2 files changed, 36 insertions(+) create mode 100644 include/linux/phy/phy-lvds.h (limited to 'include') diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h new file mode 100644 index 000000000000..09931d080a6d --- /dev/null +++ b/include/linux/phy/phy-lvds.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020,2022 NXP + */ + +#ifndef __PHY_LVDS_H_ +#define __PHY_LVDS_H_ + +/** + * struct phy_configure_opts_lvds - LVDS configuration set + * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential + * clock cycle. + * @differential_clk_rate: Clock rate, in Hertz, of the LVDS + * differential clock. + * @lanes: Number of active, consecutive, + * data lanes, starting from lane 0, + * used for the transmissions. + * @is_slave: Boolean, true if the phy is a slave + * which works together with a master + * phy to support dual link transmission, + * otherwise a regular phy or a master phy. + * + * This structure is used to represent the configuration state of a LVDS phy. + */ +struct phy_configure_opts_lvds { + unsigned int bits_per_lane_and_dclk_cycle; + unsigned long differential_clk_rate; + unsigned int lanes; + bool is_slave; +}; + +#endif /* __PHY_LVDS_H_ */ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index f3286f4cd306..b1413757fcc3 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -17,6 +17,7 @@ #include #include +#include #include struct phy; @@ -57,10 +58,13 @@ enum phy_media { * the MIPI_DPHY phy mode. * @dp: Configuration set applicable for phys supporting * the DisplayPort protocol. + * @lvds: Configuration set applicable for phys supporting + * the LVDS phy mode. */ union phy_configure_opts { struct phy_configure_opts_mipi_dphy mipi_dphy; struct phy_configure_opts_dp dp; + struct phy_configure_opts_lvds lvds; }; /** -- cgit v1.2.3 From ea3364db9068e37bd8e12b9a99c7c92385593ed7 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Fri, 8 Apr 2022 14:48:34 -0700 Subject: dt-bindings: interconnect: qcom: Add sc8280xp binding The Qualcomm SC8280XP platform has the usual set of busses, add a binding for these interconnect providers and port definitions to allow interconnect paths to be expressed in the sc8280xp DeviceTree. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220408214835.624494-1-bjorn.andersson@linaro.org Signed-off-by: Georgi Djakov --- .../bindings/interconnect/qcom,rpmh.yaml | 12 ++ include/dt-bindings/interconnect/qcom,sc8280xp.h | 232 +++++++++++++++++++++ 2 files changed, 244 insertions(+) create mode 100644 include/dt-bindings/interconnect/qcom,sc8280xp.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml index 5a911be0c2ea..0286f40fdc44 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml @@ -60,6 +60,18 @@ properties: - qcom,sc8180x-mc-virt - qcom,sc8180x-mmss-noc - qcom,sc8180x-system-noc + - qcom,sc8280xp-aggre1-noc + - qcom,sc8280xp-aggre2-noc + - qcom,sc8280xp-clk-virt + - qcom,sc8280xp-config-noc + - qcom,sc8280xp-dc-noc + - qcom,sc8280xp-gem-noc + - qcom,sc8280xp-lpass-ag-noc + - qcom,sc8280xp-mc-virt + - qcom,sc8280xp-mmss-noc + - qcom,sc8280xp-nspa-noc + - qcom,sc8280xp-nspb-noc + - qcom,sc8280xp-system-noc - qcom,sdm845-aggre1-noc - qcom,sdm845-aggre2-noc - qcom,sdm845-config-noc diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h new file mode 100644 index 000000000000..a3e5fda7c127 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H + +/* aggre1_noc */ +#define MASTER_QSPI_0 0 +#define MASTER_QUP_1 1 +#define MASTER_QUP_2 2 +#define MASTER_A1NOC_CFG 3 +#define MASTER_IPA 4 +#define MASTER_EMAC_1 5 +#define MASTER_SDCC_4 6 +#define MASTER_UFS_MEM 7 +#define MASTER_USB3_0 8 +#define MASTER_USB3_1 9 +#define MASTER_USB3_MP 10 +#define MASTER_USB4_0 11 +#define MASTER_USB4_1 12 +#define SLAVE_A1NOC_SNOC 13 +#define SLAVE_USB_NOC_SNOC 14 +#define SLAVE_SERVICE_A1NOC 15 + +/* aggre2_noc */ +#define MASTER_QDSS_BAM 0 +#define MASTER_QUP_0 1 +#define MASTER_A2NOC_CFG 2 +#define MASTER_CRYPTO 3 +#define MASTER_SENSORS_PROC 4 +#define MASTER_SP 5 +#define MASTER_EMAC 6 +#define MASTER_PCIE_0 7 +#define MASTER_PCIE_1 8 +#define MASTER_PCIE_2A 9 +#define MASTER_PCIE_2B 10 +#define MASTER_PCIE_3A 11 +#define MASTER_PCIE_3B 12 +#define MASTER_PCIE_4 13 +#define MASTER_QDSS_ETR 14 +#define MASTER_SDCC_2 15 +#define MASTER_UFS_CARD 16 +#define SLAVE_A2NOC_SNOC 17 +#define SLAVE_ANOC_PCIE_GEM_NOC 18 +#define SLAVE_SERVICE_A2NOC 19 + +/* clk_virt */ +#define MASTER_IPA_CORE 0 +#define MASTER_QUP_CORE_0 1 +#define MASTER_QUP_CORE_1 2 +#define MASTER_QUP_CORE_2 3 +#define SLAVE_IPA_CORE 4 +#define SLAVE_QUP_CORE_0 5 +#define SLAVE_QUP_CORE_1 6 +#define SLAVE_QUP_CORE_2 7 + +/* config_noc */ +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define SLAVE_AHB2PHY_0 2 +#define SLAVE_AHB2PHY_1 3 +#define SLAVE_AHB2PHY_2 4 +#define SLAVE_AOSS 5 +#define SLAVE_APPSS 6 +#define SLAVE_CAMERA_CFG 7 +#define SLAVE_CLK_CTL 8 +#define SLAVE_CDSP_CFG 9 +#define SLAVE_CDSP1_CFG 10 +#define SLAVE_RBCPR_CX_CFG 11 +#define SLAVE_RBCPR_MMCX_CFG 12 +#define SLAVE_RBCPR_MX_CFG 13 +#define SLAVE_CPR_NSPCX 14 +#define SLAVE_CRYPTO_0_CFG 15 +#define SLAVE_CX_RDPM 16 +#define SLAVE_DCC_CFG 17 +#define SLAVE_DISPLAY_CFG 18 +#define SLAVE_DISPLAY1_CFG 19 +#define SLAVE_EMAC_CFG 20 +#define SLAVE_EMAC1_CFG 21 +#define SLAVE_GFX3D_CFG 22 +#define SLAVE_HWKM 23 +#define SLAVE_IMEM_CFG 24 +#define SLAVE_IPA_CFG 25 +#define SLAVE_IPC_ROUTER_CFG 26 +#define SLAVE_LPASS 27 +#define SLAVE_MX_RDPM 28 +#define SLAVE_MXC_RDPM 29 +#define SLAVE_PCIE_0_CFG 30 +#define SLAVE_PCIE_1_CFG 31 +#define SLAVE_PCIE_2A_CFG 32 +#define SLAVE_PCIE_2B_CFG 33 +#define SLAVE_PCIE_3A_CFG 34 +#define SLAVE_PCIE_3B_CFG 35 +#define SLAVE_PCIE_4_CFG 36 +#define SLAVE_PCIE_RSC_CFG 37 +#define SLAVE_PDM 38 +#define SLAVE_PIMEM_CFG 39 +#define SLAVE_PKA_WRAPPER_CFG 40 +#define SLAVE_PMU_WRAPPER_CFG 41 +#define SLAVE_QDSS_CFG 42 +#define SLAVE_QSPI_0 43 +#define SLAVE_QUP_0 44 +#define SLAVE_QUP_1 45 +#define SLAVE_QUP_2 46 +#define SLAVE_SDCC_2 47 +#define SLAVE_SDCC_4 48 +#define SLAVE_SECURITY 49 +#define SLAVE_SMMUV3_CFG 50 +#define SLAVE_SMSS_CFG 51 +#define SLAVE_SPSS_CFG 52 +#define SLAVE_TCSR 53 +#define SLAVE_TLMM 54 +#define SLAVE_UFS_CARD_CFG 55 +#define SLAVE_UFS_MEM_CFG 56 +#define SLAVE_USB3_0 57 +#define SLAVE_USB3_1 58 +#define SLAVE_USB3_MP 59 +#define SLAVE_USB4_0 60 +#define SLAVE_USB4_1 61 +#define SLAVE_VENUS_CFG 62 +#define SLAVE_VSENSE_CTRL_CFG 63 +#define SLAVE_VSENSE_CTRL_R_CFG 64 +#define SLAVE_A1NOC_CFG 65 +#define SLAVE_A2NOC_CFG 66 +#define SLAVE_ANOC_PCIE_BRIDGE_CFG 67 +#define SLAVE_DDRSS_CFG 68 +#define SLAVE_CNOC_MNOC_CFG 69 +#define SLAVE_SNOC_CFG 70 +#define SLAVE_SNOC_SF_BRIDGE_CFG 71 +#define SLAVE_IMEM 72 +#define SLAVE_PIMEM 73 +#define SLAVE_SERVICE_CNOC 74 +#define SLAVE_PCIE_0 75 +#define SLAVE_PCIE_1 76 +#define SLAVE_PCIE_2A 77 +#define SLAVE_PCIE_2B 78 +#define SLAVE_PCIE_3A 79 +#define SLAVE_PCIE_3B 80 +#define SLAVE_PCIE_4 81 +#define SLAVE_QDSS_STM 82 +#define SLAVE_SMSS 83 +#define SLAVE_TCU 84 + +/* dc_noc */ +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_GEM_NOC_CFG 2 + +/* gem_noc */ +#define MASTER_GPU_TCU 0 +#define MASTER_PCIE_TCU 1 +#define MASTER_SYS_TCU 2 +#define MASTER_APPSS_PROC 3 +#define MASTER_COMPUTE_NOC 4 +#define MASTER_COMPUTE_NOC_1 5 +#define MASTER_GEM_NOC_CFG 6 +#define MASTER_GFX3D 7 +#define MASTER_MNOC_HF_MEM_NOC 8 +#define MASTER_MNOC_SF_MEM_NOC 9 +#define MASTER_ANOC_PCIE_GEM_NOC 10 +#define MASTER_SNOC_GC_MEM_NOC 11 +#define MASTER_SNOC_SF_MEM_NOC 12 +#define SLAVE_GEM_NOC_CNOC 13 +#define SLAVE_LLCC 14 +#define SLAVE_GEM_NOC_PCIE_CNOC 15 +#define SLAVE_SERVICE_GEM_NOC_1 16 +#define SLAVE_SERVICE_GEM_NOC_2 17 +#define SLAVE_SERVICE_GEM_NOC 18 + +/* lpass_ag_noc */ +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define MASTER_LPASS_PROC 1 +#define SLAVE_LPASS_CORE_CFG 2 +#define SLAVE_LPASS_LPI_CFG 3 +#define SLAVE_LPASS_MPU_CFG 4 +#define SLAVE_LPASS_TOP_CFG 5 +#define SLAVE_LPASS_SNOC 6 +#define SLAVE_SERVICES_LPASS_AML_NOC 7 +#define SLAVE_SERVICE_LPASS_AG_NOC 8 + +/* mc_virt */ +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +/*mmss_noc */ +#define MASTER_CAMNOC_HF 0 +#define MASTER_MDP0 1 +#define MASTER_MDP1 2 +#define MASTER_MDP_CORE1_0 3 +#define MASTER_MDP_CORE1_1 4 +#define MASTER_CNOC_MNOC_CFG 5 +#define MASTER_ROTATOR 6 +#define MASTER_ROTATOR_1 7 +#define MASTER_VIDEO_P0 8 +#define MASTER_VIDEO_P1 9 +#define MASTER_VIDEO_PROC 10 +#define MASTER_CAMNOC_ICP 11 +#define MASTER_CAMNOC_SF 12 +#define SLAVE_MNOC_HF_MEM_NOC 13 +#define SLAVE_MNOC_SF_MEM_NOC 14 +#define SLAVE_SERVICE_MNOC 15 + +/* nspa_noc */ +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_CDSP_MEM_NOC 2 +#define SLAVE_NSP_XFR 3 +#define SLAVE_SERVICE_NSP_NOC 4 + +/* nspb_noc */ +#define MASTER_CDSPB_NOC_CFG 0 +#define MASTER_CDSP_PROC_B 1 +#define SLAVE_CDSPB_MEM_NOC 2 +#define SLAVE_NSPB_XFR 3 +#define SLAVE_SERVICE_NSPB_NOC 4 + +/* system_noc */ +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define MASTER_USB_NOC_SNOC 2 +#define MASTER_LPASS_ANOC 3 +#define MASTER_SNOC_CFG 4 +#define MASTER_PIMEM 5 +#define MASTER_GIC 6 +#define SLAVE_SNOC_GEM_NOC_GC 7 +#define SLAVE_SNOC_GEM_NOC_SF 8 +#define SLAVE_SERVICE_SNOC 9 + +#endif -- cgit v1.2.3 From f918cfc08c1755b9e54cd6effc923fa809045cf4 Mon Sep 17 00:00:00 2001 From: Ronak Jain Date: Wed, 6 Apr 2022 03:55:23 -0700 Subject: firmware: xilinx: add support for IOCTL and QUERY ID feature check Add support to check if IOCTL ID or QUERY ID is supported in firmware or not. Signed-off-by: Ronak Jain Link: https://lore.kernel.org/r/1649242526-17493-2-git-send-email-ronak.jain@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/xilinx/zynqmp.c | 62 +++++++++++++++++++++++++++++++++++- include/linux/firmware/xlnx-zynqmp.h | 11 +++++++ 2 files changed, 72 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index f21ece56695e..41ca41697790 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -36,8 +36,16 @@ /* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */ #define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU +/* IOCTL/QUERY feature payload size */ +#define FEATURE_PAYLOAD_SIZE 2 + +/* Firmware feature check version mask */ +#define FIRMWARE_VERSION_MASK GENMASK(15, 0) + static bool feature_check_enabled; static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); +static u32 ioctl_features[FEATURE_PAYLOAD_SIZE]; +static u32 query_features[FEATURE_PAYLOAD_SIZE]; static struct platform_device *em_dev; @@ -168,7 +176,8 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, } /** - * zynqmp_pm_feature() - Check weather given feature is supported or not + * zynqmp_pm_feature() - Check whether given feature is supported or not and + * store supported IOCTL/QUERY ID mask * @api_id: API ID to check * * Return: Returns status, either success or error+reason @@ -208,10 +217,61 @@ int zynqmp_pm_feature(const u32 api_id) feature_data->feature_status = ret; hash_add(pm_api_features_map, &feature_data->hentry, api_id); + if (api_id == PM_IOCTL) + /* Store supported IOCTL IDs mask */ + memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4); + else if (api_id == PM_QUERY_DATA) + /* Store supported QUERY IDs mask */ + memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4); + return ret; } EXPORT_SYMBOL_GPL(zynqmp_pm_feature); +/** + * zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function + * is supported or not + * @api_id: PM_IOCTL or PM_QUERY_DATA + * @id: IOCTL or QUERY function IDs + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) +{ + int ret; + u32 *bit_mask; + + /* Input arguments validation */ + if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA)) + return -EINVAL; + + /* Check feature check API version */ + ret = zynqmp_pm_feature(PM_FEATURE_CHECK); + if (ret < 0) + return ret; + + /* Check if feature check version 2 is supported or not */ + if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) { + /* + * Call feature check for IOCTL/QUERY API to get IOCTL ID or + * QUERY ID feature status. + */ + ret = zynqmp_pm_feature(api_id); + if (ret < 0) + return ret; + + bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features; + + if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U) + return -EOPNOTSUPP; + } else { + return -ENODATA; + } + + return 0; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); + /** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 14f00a7672d1..1ec73d5352c3 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -29,6 +29,11 @@ /* SMC SIP service Call Function Identifier Prefix */ #define PM_SIP_SVC 0xC2000000 + +/* PM API versions */ +#define PM_API_VERSION_2 2 + +/* ATF only commands */ #define PM_GET_TRUSTZONE_VERSION 0xa03 #define PM_SET_SUSPEND_MODE 0xa02 #define GET_CALLBACK_DATA 0xa01 @@ -460,6 +465,7 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address); int zynqmp_pm_register_notifier(const u32 node, const u32 event, const u32 wake, const u32 enable); int zynqmp_pm_feature(const u32 api_id); +int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); #else @@ -678,6 +684,11 @@ static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) return -ENODEV; } +static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) +{ + return -ENODEV; +} + static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id) { return -ENODEV; -- cgit v1.2.3 From d405ac52ab19301622bddabf4bf925d199937823 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Wed, 13 Apr 2022 18:23:34 +0530 Subject: dt-bindings: interconnect: Add Qualcomm SDX65 DT bindings Add interconnect IDs for Qualcomm SDX65 platform. Signed-off-by: Rohit Agarwal Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/1649854415-11174-2-git-send-email-quic_rohiagar@quicinc.com Signed-off-by: Georgi Djakov --- .../bindings/interconnect/qcom,rpmh.yaml | 3 + include/dt-bindings/interconnect/qcom,sdx65.h | 67 ++++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 include/dt-bindings/interconnect/qcom,sdx65.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml index 5a911be0c2ea..9ee2a51d6ad7 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml @@ -72,6 +72,9 @@ properties: - qcom,sdx55-mc-virt - qcom,sdx55-mem-noc - qcom,sdx55-system-noc + - qcom,sdx65-mc-virt + - qcom,sdx65-mem-noc + - qcom,sdx65-system-noc - qcom,sm8150-aggre1-noc - qcom,sm8150-aggre2-noc - qcom,sm8150-camnoc-noc diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h new file mode 100644 index 000000000000..b25288aa7d74 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdx65.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_TCU_0 0 +#define MASTER_SNOC_GC_MEM_NOC 1 +#define MASTER_APPSS_PROC 2 +#define SLAVE_LLCC 3 +#define SLAVE_MEM_NOC_SNOC 4 +#define SLAVE_MEM_NOC_PCIE_SNOC 5 + +#define MASTER_AUDIO 0 +#define MASTER_BLSP_1 1 +#define MASTER_QDSS_BAM 2 +#define MASTER_QPIC 3 +#define MASTER_SNOC_CFG 4 +#define MASTER_SPMI_FETCHER 5 +#define MASTER_ANOC_SNOC 6 +#define MASTER_IPA 7 +#define MASTER_MEM_NOC_SNOC 8 +#define MASTER_MEM_NOC_PCIE_SNOC 9 +#define MASTER_CRYPTO 10 +#define MASTER_IPA_PCIE 11 +#define MASTER_PCIE_0 12 +#define MASTER_QDSS_ETR 13 +#define MASTER_SDCC_1 14 +#define MASTER_USB3 15 +#define SLAVE_AOSS 16 +#define SLAVE_APPSS 17 +#define SLAVE_AUDIO 18 +#define SLAVE_BLSP_1 19 +#define SLAVE_CLK_CTL 20 +#define SLAVE_CRYPTO_0_CFG 21 +#define SLAVE_CNOC_DDRSS 22 +#define SLAVE_ECC_CFG 23 +#define SLAVE_IMEM_CFG 24 +#define SLAVE_IPA_CFG 25 +#define SLAVE_CNOC_MSS 26 +#define SLAVE_PCIE_PARF 27 +#define SLAVE_PDM 28 +#define SLAVE_PRNG 29 +#define SLAVE_QDSS_CFG 30 +#define SLAVE_QPIC 31 +#define SLAVE_SDCC_1 32 +#define SLAVE_SNOC_CFG 33 +#define SLAVE_SPMI_FETCHER 34 +#define SLAVE_SPMI_VGI_COEX 35 +#define SLAVE_TCSR 36 +#define SLAVE_TLMM 37 +#define SLAVE_USB3 38 +#define SLAVE_USB3_PHY_CFG 39 +#define SLAVE_ANOC_SNOC 40 +#define SLAVE_SNOC_MEM_NOC_GC 41 +#define SLAVE_IMEM 42 +#define SLAVE_SERVICE_SNOC 43 +#define SLAVE_PCIE_0 44 +#define SLAVE_QDSS_STM 45 +#define SLAVE_TCU 46 + +#endif -- cgit v1.2.3 From d434743e5cac3558381b767f343eef5af246a4dc Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:37 +0530 Subject: bus: mhi: ep: Add support for registering MHI endpoint controllers This commit adds support for registering MHI endpoint controller drivers with the MHI endpoint stack. MHI endpoint controller drivers manage the interaction with the host machines (such as x86). They are also the MHI endpoint bus master in charge of managing the physical link between the host and endpoint device. Eventhough the MHI spec is bus agnostic, the current implementation is entirely based on PCIe bus. The endpoint controller driver encloses all information about the underlying physical bus like PCIe. The registration process involves parsing the channel configuration and allocating an MHI EP device. Channels used in the endpoint stack follows the perspective of the MHI host stack. i.e., UL - From host to endpoint DL - From endpoint to host Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-2-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/Kconfig | 1 + drivers/bus/mhi/Makefile | 3 + drivers/bus/mhi/ep/Kconfig | 10 ++ drivers/bus/mhi/ep/Makefile | 2 + drivers/bus/mhi/ep/internal.h | 156 ++++++++++++++++++++++++++++ drivers/bus/mhi/ep/main.c | 236 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 136 ++++++++++++++++++++++++ 7 files changed, 544 insertions(+) create mode 100644 drivers/bus/mhi/ep/Kconfig create mode 100644 drivers/bus/mhi/ep/Makefile create mode 100644 drivers/bus/mhi/ep/internal.h create mode 100644 drivers/bus/mhi/ep/main.c create mode 100644 include/linux/mhi_ep.h (limited to 'include') diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index 4748df7f9cd5..b39a11e6c624 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -6,3 +6,4 @@ # source "drivers/bus/mhi/host/Kconfig" +source "drivers/bus/mhi/ep/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 5f5708a249f5..46981331b38f 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,5 @@ # Host MHI stack obj-y += host/ + +# Endpoint MHI stack +obj-y += ep/ diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig new file mode 100644 index 000000000000..90ab3b040672 --- /dev/null +++ b/drivers/bus/mhi/ep/Kconfig @@ -0,0 +1,10 @@ +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by a host processor to control + and communicate a modem device over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices, + such as SDX55 modem connected to the host machine over PCIe. diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000000..64e29252b608 --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o +mhi_ep-y := main.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000000..0d3923186a5e --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include + +#include "../common.h" + +extern struct bus_type mhi_ep_bus_type; + +#define MHI_REG_OFFSET 0x100 +#define BHI_REG_OFFSET 0x200 + +/* MHI registers */ +#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN) +#define EP_MHIVER (MHI_REG_OFFSET + MHIVER) +#define EP_MHICFG (MHI_REG_OFFSET + MHICFG) +#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF) +#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF) +#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF) +#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF) +#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF) +#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL) +#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS) +#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER) +#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER) +#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER) +#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER) +#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER) +#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER) +#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER) +#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER) +#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER) +#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER) +#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER) +#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER) +#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER) +#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER) +#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER) +#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER) + +/* MHI BHI registers */ +#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC) +#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV) + +/* MHI Doorbell registers */ +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) + +#define MHI_CTRL_INT_STATUS 0x4 +#define MHI_CTRL_INT_STATUS_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) + +/* + * Unlike the usual "masking" convention, writing "1" to a bit in this register + * enables the interrupt and writing "0" will disable it.. + */ +#define MHI_CTRL_INT_MASK 0x94 +#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_CRDB_MASK BIT(1) + +#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0) + +#define NR_OF_CMD_RINGS 1 +#define MHI_MASK_ROWS_CH_DB 4 +#define MHI_MASK_ROWS_EV_DB 4 +#define MHI_MASK_CH_LEN 32 +#define MHI_MASK_EV_LEN 32 + +/* Generic context */ +struct mhi_generic_ctx { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD, + RING_TYPE_ER, + RING_TYPE_CH, +}; + +/* Ring element */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctxt cmd; + struct mhi_event_ctxt ev; + struct mhi_chan_ctxt ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring { + struct mhi_ep_cntrl *mhi_cntrl; + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ring_element *ring_cache; + enum mhi_ep_ring_type type; + u64 rbase; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000000..d932ad01761b --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Endpoint bus stack + * + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + mhi_dev->mhi_cntrl->mhi_dev = NULL; + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created in mhi_ep_create_device() + * if the mhi_dev associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_device_type dev_type) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + /* Controller device is always allocated first */ + if (dev_type == MHI_DEVICE_CONTROLLER) + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + else + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_type = dev_type; + + return mhi_dev; +} + +static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", + chan, mhi_cntrl->max_chan); + goto error_chan_cfg; + } + + /* Bi-directional and direction less channels are not supported */ + if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { + dev_err(dev, "Invalid direction (%u) for channel (%u)\n", + ch_cfg->dir, chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_power_up() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev) + return -EINVAL; + + ret = mhi_ep_chan_init(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + /* Set controller index */ + ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (ret < 0) + goto err_free_cmd; + + mhi_cntrl->index = ret; + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_ida_free; + } + + dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + mhi_cntrl->mhi_dev = mhi_dev; + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_put_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_put_dev: + put_device(&mhi_dev->dev); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam "); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h new file mode 100644 index 000000000000..891b199a9703 --- /dev/null +++ b/include/linux/mhi_ep.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ +#ifndef _MHI_EP_H_ +#define _MHI_EP_H_ + +#include +#include + +#define MHI_EP_DEFAULT_MTU 0x8000 + +/** + * struct mhi_ep_channel_config - Channel configuration structure for controller + * @name: The name of this channel + * @num: The number assigned to this channel + * @num_elements: The number of elements that can be queued to this channel + * @dir: Direction that data may flow on this channel + */ +struct mhi_ep_channel_config { + char *name; + u32 num; + u32 num_elements; + enum dma_data_direction dir; +}; + +/** + * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration + * @mhi_version: MHI spec version supported by the controller + * @max_channels: Maximum number of channels supported + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + */ +struct mhi_ep_cntrl_config { + u32 mhi_version; + u32 max_channels; + u32 num_channels; + const struct mhi_ep_channel_config *ch_cfg; +}; + +/** + * struct mhi_ep_db_info - MHI Endpoint doorbell info + * @mask: Mask of the doorbell interrupt + * @status: Status of the doorbell interrupt + */ +struct mhi_ep_db_info { + u32 mask; + u32 status; +}; + +/** + * struct mhi_ep_cntrl - MHI Endpoint controller structure + * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI + * Endpoint controller + * @mhi_dev: MHI Endpoint device instance for the controller + * @mmio: MMIO region containing the MHI registers + * @mhi_chan: Points to the channel configuration table + * @mhi_event: Points to the event ring configurations table + * @mhi_cmd: Points to the command ring configurations table + * @sm: MHI Endpoint state machine + * @raise_irq: CB function for raising IRQ to the host + * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it + * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context + * @read_from_host: CB function for reading from host memory from endpoint + * @write_to_host: CB function for writing to host memory from endpoint + * @mhi_state: MHI Endpoint state + * @max_chan: Maximum channels supported by the endpoint controller + * @mru: MRU (Maximum Receive Unit) value of the endpoint controller + * @index: MHI Endpoint controller index + */ +struct mhi_ep_cntrl { + struct device *cntrl_dev; + struct mhi_ep_device *mhi_dev; + void __iomem *mmio; + + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_event *mhi_event; + struct mhi_ep_cmd *mhi_cmd; + struct mhi_ep_sm *sm; + + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); + int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, + void __iomem **virt, size_t size); + void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys, + void __iomem *virt, size_t size); + int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size); + int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size); + + enum mhi_state mhi_state; + + u32 max_chan; + u32 mru; + u32 index; +}; + +/** + * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds + * to channels or is associated with controllers + * @dev: Driver model device node for the MHI Endpoint device + * @mhi_cntrl: Controller the device belongs to + * @id: Pointer to MHI Endpoint device ID struct + * @name: Name of the associated MHI Endpoint device + * @ul_chan: UL channel for the device + * @dl_chan: DL channel for the device + * @dev_type: MHI device type + */ +struct mhi_ep_device { + struct device dev; + struct mhi_ep_cntrl *mhi_cntrl; + const struct mhi_device_id *id; + const char *name; + struct mhi_ep_chan *ul_chan; + struct mhi_ep_chan *dl_chan; + enum mhi_device_type dev_type; +}; + +#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) + +/** + * mhi_ep_register_controller - Register MHI Endpoint controller + * @mhi_cntrl: MHI Endpoint controller to register + * @config: Configuration to use for the controller + * + * Return: 0 if controller registrations succeeds, a negative error code otherwise. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config); + +/** + * mhi_ep_unregister_controller - Unregister MHI Endpoint controller + * @mhi_cntrl: MHI Endpoint controller to unregister + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); + +#endif -- cgit v1.2.3 From ee0360b20b3fa08e2eee300eacb45e812ae44578 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:38 +0530 Subject: bus: mhi: ep: Add support for registering MHI endpoint client drivers This commit adds support for registering MHI endpoint client drivers with the MHI endpoint stack. MHI endpoint client drivers bind to one or more MHI endpoint devices inorder to send and receive the upper-layer protocol packets like IP packets, modem control messages, and diagnostics messages over MHI bus. Reviewed-by: Hemant Kumar Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-3-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 57 +++++++++++++++++++++++++++++-- 2 files changed, 140 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index d932ad01761b..f7d5f75fc083 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -198,9 +198,88 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + /* Client drivers should have callbacks defined for both channels */ + if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + static int mhi_ep_match(struct device *dev, struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; /* * If the device is a controller type then there is no client driver @@ -209,6 +288,12 @@ static int mhi_ep_match(struct device *dev, struct device_driver *drv) if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) return 0; + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + return 0; }; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 891b199a9703..e2b94f9eb846 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -101,8 +101,8 @@ struct mhi_ep_cntrl { * @mhi_cntrl: Controller the device belongs to * @id: Pointer to MHI Endpoint device ID struct * @name: Name of the associated MHI Endpoint device - * @ul_chan: UL channel for the device - * @dl_chan: DL channel for the device + * @ul_chan: UL (from host to endpoint) channel for the device + * @dl_chan: DL (from endpoint to host) channel for the device * @dev_type: MHI device type */ struct mhi_ep_device { @@ -115,7 +115,60 @@ struct mhi_ep_device { enum mhi_device_type dev_type; }; +/** + * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver + * @id_table: Pointer to MHI Endpoint device ID table + * @driver: Device driver model driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer + * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer + */ +struct mhi_ep_driver { + const struct mhi_device_id *id_table; + struct device_driver driver; + int (*probe)(struct mhi_ep_device *mhi_ep, + const struct mhi_device_id *id); + void (*remove)(struct mhi_ep_device *mhi_ep); + void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); +}; + #define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) +#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver) + +/* + * module_mhi_ep_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_ep_driver_register() and + * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_ep_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_ep_driver_register, \ + mhi_ep_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_ep_driver_register(mhi_drv) \ + __mhi_ep_driver_register(mhi_drv, THIS_MODULE) + +/** + * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus + * @mhi_drv: Driver to be associated with the device + * @owner: The module owner + * + * Return: 0 if driver registrations succeeds, a negative error code otherwise. + */ +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner); + +/** + * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus + * @mhi_drv: Driver associated with the device + */ +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv); /** * mhi_ep_register_controller - Register MHI Endpoint controller -- cgit v1.2.3 From e9e4da23cd65ea76ba658346f5c182791bd1cea9 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:40 +0530 Subject: bus: mhi: ep: Add support for managing MMIO registers Add support for managing the Memory Mapped Input Output (MMIO) registers of the MHI bus. All MHI operations are carried out using the MMIO registers by both host and the endpoint device. The MMIO registers reside inside the endpoint device memory (fixed location based on the platform) and the address is passed by the MHI EP controller driver during its registration. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-5-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 26 ++++ drivers/bus/mhi/ep/main.c | 6 +- drivers/bus/mhi/ep/mmio.c | 273 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 18 +++ 5 files changed, 323 insertions(+), 2 deletions(-) create mode 100644 drivers/bus/mhi/ep/mmio.c (limited to 'include') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index 64e29252b608..a1555ae287ad 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o +mhi_ep-y := main.o mmio.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 0d3923186a5e..475425a30d85 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -153,4 +153,30 @@ struct mhi_ep_chan { bool skip_td; }; +/* MMIO related functions */ +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val); +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 6c64745e8a06..7dcc784f10d1 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -214,7 +214,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_device *mhi_dev; int ret; - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev) + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) return -EINVAL; ret = mhi_ep_chan_init(mhi_cntrl, config); @@ -227,6 +227,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + /* Set MHI version and AMSS EE before enumeration */ + mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + /* Set controller index */ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); if (ret < 0) diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000000..b5bfd22f2c8e --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include + +#include "internal.h" + +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset) +{ + return readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, offset); + regval &= ~mask; + regval |= (val << __ffs(mask)) & mask; + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask) +{ + u32 regval; + + regval = mhi_ep_mmio_read(dev, offset); + regval &= mask; + regval >>= __ffs(mask); + + return regval; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable) +{ + u32 chid_mask, chid_shift, chdb_idx, val; + + chid_shift = ch_id % 32; + chid_mask = BIT(chid_shift); + chdb_idx = ch_id / 32; + + val = enable ? 1 : 0; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val); + + /* Update the local copy of the channel mask */ + mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask; + mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift; +} + +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true); +} + +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + bool chdb = false; + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i)); + if (mhi_cntrl->chdb[i].status) + chdb = true; + } + + /* Return whether a channel doorbell interrupt occurred or not */ + return chdb; +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val); +} + +static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + MHI_CHDB_INT_CLEAR_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i), + MHI_ERDB_INT_CLEAR_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER); + mhi_cntrl->ch_ctx_host_pa = regval; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER); + mhi_cntrl->ch_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER); + mhi_cntrl->ev_ctx_host_pa = regval; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER); + mhi_cntrl->ev_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER); + mhi_cntrl->cmd_ctx_host_pa = regval; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER); + mhi_cntrl->cmd_ctx_host_pa |= regval; +} + +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u64 db_offset; + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h); + db_offset = regval; + db_offset <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l); + db_offset |= regval; + + return db_offset; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF); + mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF); + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); +} diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index e2b94f9eb846..5db048e258e4 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -59,6 +59,10 @@ struct mhi_ep_db_info { * @mhi_event: Points to the event ring configurations table * @mhi_cmd: Points to the command ring configurations table * @sm: MHI Endpoint state machine + * @ch_ctx_host_pa: Physical address of host channel context data structure + * @ev_ctx_host_pa: Physical address of host event context data structure + * @cmd_ctx_host_pa: Physical address of host command context data structure + * @chdb: Array of channel doorbell interrupt info * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -67,6 +71,10 @@ struct mhi_ep_db_info { * @mhi_state: MHI Endpoint state * @max_chan: Maximum channels supported by the endpoint controller * @mru: MRU (Maximum Receive Unit) value of the endpoint controller + * @event_rings: Number of event rings supported by the endpoint controller + * @hw_event_rings: Number of hardware event rings supported by the endpoint controller + * @chdb_offset: Channel doorbell offset set by the host + * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index */ struct mhi_ep_cntrl { @@ -79,6 +87,12 @@ struct mhi_ep_cntrl { struct mhi_ep_cmd *mhi_cmd; struct mhi_ep_sm *sm; + u64 ch_ctx_host_pa; + u64 ev_ctx_host_pa; + u64 cmd_ctx_host_pa; + + struct mhi_ep_db_info chdb[4]; + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, void __iomem **virt, size_t size); @@ -91,6 +105,10 @@ struct mhi_ep_cntrl { u32 max_chan; u32 mru; + u32 event_rings; + u32 hw_event_rings; + u32 chdb_offset; + u32 erdb_offset; u32 index; }; -- cgit v1.2.3 From 961aeb6892242e0a667f5b8eb62b9b0a6041752c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:42 +0530 Subject: bus: mhi: ep: Add support for sending events to the host Add support for sending the events to the host over MHI bus from the endpoint. Following events are supported: 1. Transfer completion event 2. Command completion event 3. State change event 4. Execution Environment (EE) change event An event is sent whenever an operation has been completed in the MHI EP device. Event is sent using the MHI event ring and additionally the host is notified using an IRQ if required. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-7-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/common.h | 22 +++++++++++ drivers/bus/mhi/ep/internal.h | 4 ++ drivers/bus/mhi/ep/main.c | 89 +++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 8 ++++ 4 files changed, 123 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h index b4ef9acd3ce7..f794b9c8049e 100644 --- a/drivers/bus/mhi/common.h +++ b/drivers/bus/mhi/common.h @@ -165,6 +165,22 @@ #define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) #define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0))) +/* State change event */ +#define MHI_SC_EV_PTR 0 +#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state)) +#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* EE event */ +#define MHI_EE_EV_PTR 0 +#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee)) +#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + + +/* Command Completion event */ +#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code)) +#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + /* Transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) #define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len)) @@ -175,6 +191,12 @@ FIELD_PREP(BIT(9), ieot) | \ FIELD_PREP(BIT(8), ieob) | \ FIELD_PREP(BIT(0), chain)) +#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0)) +#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1)))) /* RSC transfer descriptor macros */ #define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr) diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index d16b87061ac6..e096d9cb2cb1 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -197,4 +197,8 @@ void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *s void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); + #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 7dcc784f10d1..eca1f58ba5fb 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -18,6 +18,93 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, + struct mhi_ring_element *el, bool bei) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + struct mhi_ep_ring *ring; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ring = &mhi_cntrl->mhi_event[ring_idx].ring; + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; + if (!ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%u)\n", ring_idx); + goto err_unlock; + } + } + + /* Add element to the event ring */ + ret = mhi_ep_ring_add_element(ring, el); + if (ret) { + dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); + goto err_unlock; + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* + * Raise IRQ to host only if the BEI flag is not set in TRE. Host might + * set this flag for interrupt moderation as per MHI protocol. + */ + if (!bei) + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) +{ + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event.dword[0] = MHI_TRE_EV_DWORD0(code, len); + event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + + return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_SC_EV_DWORD0(state); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_EE_EV_DWORD0(exec_env); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event.dword[0] = MHI_CC_EV_DWORD0(code); + event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -227,6 +314,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mutex_init(&mhi_cntrl->event_lock); + /* Set MHI version and AMSS EE before enumeration */ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 5db048e258e4..46236ffb528a 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -59,10 +59,14 @@ struct mhi_ep_db_info { * @mhi_event: Points to the event ring configurations table * @mhi_cmd: Points to the command ring configurations table * @sm: MHI Endpoint state machine + * @ch_ctx_cache: Cache of host channel context data structure + * @ev_ctx_cache: Cache of host event context data structure + * @cmd_ctx_cache: Cache of host command context data structure * @ch_ctx_host_pa: Physical address of host channel context data structure * @ev_ctx_host_pa: Physical address of host event context data structure * @cmd_ctx_host_pa: Physical address of host command context data structure * @chdb: Array of channel doorbell interrupt info + * @event_lock: Lock for protecting event rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -87,11 +91,15 @@ struct mhi_ep_cntrl { struct mhi_ep_cmd *mhi_cmd; struct mhi_ep_sm *sm; + struct mhi_chan_ctxt *ch_ctx_cache; + struct mhi_event_ctxt *ev_ctx_cache; + struct mhi_cmd_ctxt *cmd_ctx_cache; u64 ch_ctx_host_pa; u64 ev_ctx_host_pa; u64 cmd_ctx_host_pa; struct mhi_ep_db_info chdb[4]; + struct mutex event_lock; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From f9baa4f737950523ca648866dfa345ac378e4487 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:43 +0530 Subject: bus: mhi: ep: Add support for managing MHI state machine Add support for managing the MHI state machine by controlling the state transitions. Only the following MHI state transitions are supported: 1. Ready state 2. M0 state 3. M3 state 4. SYS_ERR state Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-8-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 11 ++++ drivers/bus/mhi/ep/main.c | 54 ++++++++++++++++- drivers/bus/mhi/ep/sm.c | 136 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 12 ++++ 5 files changed, 213 insertions(+), 2 deletions(-) create mode 100644 drivers/bus/mhi/ep/sm.c (limited to 'include') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index 7ba0e04801eb..aad85f180b70 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o mmio.o ring.o +mhi_ep-y := main.o mmio.o ring.o sm.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index e096d9cb2cb1..4f2e26841702 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -146,6 +146,11 @@ struct mhi_ep_event { struct mhi_ep_ring ring; }; +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + struct mhi_ep_chan { char *name; struct mhi_ep_device *mhi_dev; @@ -200,5 +205,11 @@ void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); /* MHI EP core functions */ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index eca1f58ba5fb..c912daf6dc65 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -105,6 +105,43 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + mhi_state_str(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); + break; + } + kfree(itr); + } +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -314,6 +351,17 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + if (!mhi_cntrl->wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + spin_lock_init(&mhi_cntrl->state_lock); + spin_lock_init(&mhi_cntrl->list_lock); mutex_init(&mhi_cntrl->event_lock); /* Set MHI version and AMSS EE before enumeration */ @@ -323,7 +371,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, /* Set controller index */ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); if (ret < 0) - goto err_free_cmd; + goto err_destroy_wq; mhi_cntrl->index = ret; @@ -351,6 +399,8 @@ err_put_dev: put_device(&mhi_dev->dev); err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->wq); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -364,6 +414,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) { struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + destroy_workqueue(mhi_cntrl->wq); + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000000..ffc02f5d0a0d --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include "internal.h" + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + if (mhi_state == MHI_STATE_SYS_ERR) + return true; /* Allowed in any state */ + + if (mhi_state == MHI_STATE_READY) + return cur_mhi_state == MHI_STATE_RESET; + + if (mhi_state == MHI_STATE_M0) + return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY; + + if (mhi_state == MHI_STATE_M3) + return cur_mhi_state == MHI_STATE_M0; + + return false; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s is not allowed!\n", + mhi_state_str(mhi_state), + mhi_state_str(mhi_cntrl->mhi_state)); + return -EACCES; + } + + /* TODO: Add support for M1 and M2 states */ + if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) { + dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state)); + return -EOPNOTSUPP; + } + + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state); + mhi_cntrl->mhi_state = mhi_state; + + if (mhi_state == MHI_STATE_READY) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1); + + if (mhi_state == MHI_STATE_SYS_ERR) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1); + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + return ret; + + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event\n"); + return ret; + } + + if (old_state == MHI_STATE_READY) { + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event\n"); + return ret; + } + } + + return 0; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + return ret; + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event\n"); + return ret; + } + + return 0; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + spin_unlock_bh(&mhi_cntrl->state_lock); + return -EIO; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + spin_unlock_bh(&mhi_cntrl->state_lock); + + return ret; +} diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 46236ffb528a..2880d2aa88b8 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -67,6 +67,11 @@ struct mhi_ep_db_info { * @cmd_ctx_host_pa: Physical address of host command context data structure * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings + * @list_lock: Lock for protecting state transition and channel doorbell lists + * @state_lock: Lock for protecting state transitions + * @st_transition_list: List of state transitions + * @wq: Dedicated workqueue for handling rings and state changes + * @state_work: State transition worker * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -100,6 +105,13 @@ struct mhi_ep_cntrl { struct mhi_ep_db_info chdb[4]; struct mutex event_lock; + spinlock_t list_lock; + spinlock_t state_lock; + + struct list_head st_transition_list; + + struct workqueue_struct *wq; + struct work_struct state_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 4799e71b082615445dc40ba0bbb86cbb76c24724 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:44 +0530 Subject: bus: mhi: ep: Add support for processing MHI endpoint interrupts Add support for processing MHI endpoint interrupts such as control interrupt, command interrupt and channel interrupt from the host. The interrupts will be generated in the endpoint device whenever host writes to the corresponding doorbell registers. The doorbell logic is handled inside the hardware internally. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-9-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 124 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/mhi_ep.h | 4 ++ 2 files changed, 126 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index c912daf6dc65..4e82006bd83b 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -142,6 +143,112 @@ static void mhi_ep_state_worker(struct work_struct *work) } } +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, + u32 ch_idx) +{ + struct mhi_ep_ring_item *item; + struct mhi_ep_ring *ring; + bool work = !!ch_int; + LIST_HEAD(head); + u32 i; + + /* First add the ring items to a local list */ + for_each_set_bit(i, &ch_int, 32) { + /* Channel index varies for each register: 0, 32, 64, 96 */ + u32 ch_id = ch_idx + i; + + ring = &mhi_cntrl->mhi_chan[ch_id].ring; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->ring = ring; + list_add_tail(&item->node, &head); + } + + /* Now, splice the local list into ch_db_list and queue the work item */ + if (work) { + spin_lock(&mhi_cntrl->list_lock); + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); + spin_unlock(&mhi_cntrl->list_lock); + } +} + +/* + * Channel interrupt statuses are contained in 4 registers each of 32bit length. + * For checking all interrupts, we need to loop through each registers and then + * check for bits set. + */ +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ch_int, ch_idx, i; + + /* Bail out if there is no channel doorbell interrupt */ + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) + return; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + ch_idx = i * MHI_MASK_CH_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; + if (ch_int) { + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->state = state; + spin_lock(&mhi_cntrl->list_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value; + + /* Acknowledge the ctrl interrupt */ + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for command doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) + dev_dbg(dev, "Processing command doorbell interrupt\n"); + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -338,7 +445,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_device *mhi_dev; int ret; - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; ret = mhi_ep_chan_init(mhi_cntrl, config); @@ -360,6 +467,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); spin_lock_init(&mhi_cntrl->state_lock); spin_lock_init(&mhi_cntrl->list_lock); mutex_init(&mhi_cntrl->event_lock); @@ -375,12 +483,20 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->index = ret; + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); + goto err_ida_free; + } + /* Allocate the controller device */ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); if (IS_ERR(mhi_dev)) { dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); ret = PTR_ERR(mhi_dev); - goto err_ida_free; + goto err_free_irq; } dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); @@ -397,6 +513,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, err_put_dev: put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: @@ -416,6 +534,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) destroy_workqueue(mhi_cntrl->wq); + free_irq(mhi_cntrl->irq, mhi_cntrl); + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 2880d2aa88b8..137bd3ee2e43 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -70,6 +70,7 @@ struct mhi_ep_db_info { * @list_lock: Lock for protecting state transition and channel doorbell lists * @state_lock: Lock for protecting state transitions * @st_transition_list: List of state transitions + * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker * @raise_irq: CB function for raising IRQ to the host @@ -85,6 +86,7 @@ struct mhi_ep_db_info { * @chdb_offset: Channel doorbell offset set by the host * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index + * @irq: IRQ used by the endpoint controller */ struct mhi_ep_cntrl { struct device *cntrl_dev; @@ -109,6 +111,7 @@ struct mhi_ep_cntrl { spinlock_t state_lock; struct list_head st_transition_list; + struct list_head ch_db_list; struct workqueue_struct *wq; struct work_struct state_work; @@ -130,6 +133,7 @@ struct mhi_ep_cntrl { u32 chdb_offset; u32 erdb_offset; u32 index; + int irq; }; /** -- cgit v1.2.3 From fb3a26b7e8aff11e44d582604f61c38f63bd507c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:45 +0530 Subject: bus: mhi: ep: Add support for powering up the MHI endpoint stack Add support for MHI endpoint power_up that includes initializing the MMIO and rings, caching the host MHI registers, and setting the MHI state to M0. After registering the MHI EP controller, the stack has to be powered up for usage. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-10-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 205 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 16 ++++ 2 files changed, 221 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 4e82006bd83b..20d579733486 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -17,6 +17,9 @@ #include #include "internal.h" +#define M0_WAIT_DELAY_MS 100 +#define M0_WAIT_COUNT 100 + static DEFINE_IDA(mhi_ep_cntrl_ida); static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, @@ -106,6 +109,154 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate and map memory for caching host channel context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, + &mhi_cntrl->ch_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ch_ctx_cache, + ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); + return ret; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate and map memory for caching host event context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, + &mhi_cntrl->ev_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ev_ctx_cache, + ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate and map memory for caching host command context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, + &mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->cmd_ctx_cache, + cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx; + } + + return ret; + +err_cmd_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + +err_ev_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + +err_ch_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + /* + * Doorbell interrupts are enabled when the corresponding channel gets started. + * Enabling all interrupts here triggers spurious irqs as some of the interrupts + * associated with hw channels always get triggered. + */ + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + bool mhi_reset; + u32 count = 0; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(M0_WAIT_DELAY_MS); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_info(dev, "Detected Host reset while waiting for M0\n"); + } + count++; + } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); + + if (state != MHI_STATE_M0) { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -249,6 +400,60 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) return IRQ_HANDLED; } +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + mhi_cntrl->mhi_state = MHI_STATE_RESET; + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint\n"); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + mhi_cntrl->enabled = true; + + return 0; + +err_free_event: + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 137bd3ee2e43..3b065f82fbeb 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -65,6 +65,9 @@ struct mhi_ep_db_info { * @ch_ctx_host_pa: Physical address of host channel context data structure * @ev_ctx_host_pa: Physical address of host event context data structure * @cmd_ctx_host_pa: Physical address of host command context data structure + * @ch_ctx_cache_phys: Physical address of the host channel context cache + * @ev_ctx_cache_phys: Physical address of the host event context cache + * @cmd_ctx_cache_phys: Physical address of the host command context cache * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings * @list_lock: Lock for protecting state transition and channel doorbell lists @@ -87,6 +90,7 @@ struct mhi_ep_db_info { * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index * @irq: IRQ used by the endpoint controller + * @enabled: Check if the endpoint controller is enabled or not */ struct mhi_ep_cntrl { struct device *cntrl_dev; @@ -104,6 +108,9 @@ struct mhi_ep_cntrl { u64 ch_ctx_host_pa; u64 ev_ctx_host_pa; u64 cmd_ctx_host_pa; + phys_addr_t ch_ctx_cache_phys; + phys_addr_t ev_ctx_cache_phys; + phys_addr_t cmd_ctx_cache_phys; struct mhi_ep_db_info chdb[4]; struct mutex event_lock; @@ -134,6 +141,7 @@ struct mhi_ep_cntrl { u32 erdb_offset; u32 index; int irq; + bool enabled; }; /** @@ -228,4 +236,12 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, */ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_power_up - Power up the MHI endpoint stack + * @mhi_cntrl: MHI Endpoint controller + * + * Return: 0 if power up succeeds, a negative error code otherwise. + */ +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); + #endif -- cgit v1.2.3 From 5d507ee04894e166f8c5a29f05c6b06ce91d5833 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:46 +0530 Subject: bus: mhi: ep: Add support for powering down the MHI endpoint stack Add support for MHI endpoint power_down that includes stopping all available channels, destroying the channels, resetting the event and transfer rings and freeing the host cache. The stack will be powered down whenever the physical bus link goes down. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-11-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 6 ++++ 2 files changed, 84 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 20d579733486..968025e4d3ac 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -22,6 +22,8 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_destroy_device(struct device *dev, void *data); + static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, struct mhi_ring_element *el, bool bei) { @@ -400,6 +402,68 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) return IRQ_HANDLED; } +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (!ev_ring->started) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_reset(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->enabled = false; +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -454,6 +518,16 @@ err_free_event: } EXPORT_SYMBOL_GPL(mhi_ep_power_up); +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->enabled) + mhi_ep_abort_transfer(mhi_cntrl); + + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -733,6 +807,10 @@ err_free_ch: } EXPORT_SYMBOL_GPL(mhi_ep_register_controller); +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) { struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 3b065f82fbeb..9da683e8302c 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -244,4 +244,10 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); */ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_power_down - Power down the MHI endpoint stack + * @mhi_cntrl: MHI controller + */ +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); + #endif -- cgit v1.2.3 From 7a97b6b47353c60dd1b53ada6180741437e377f2 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:47 +0530 Subject: bus: mhi: ep: Add support for handling MHI_RESET Add support for handling MHI_RESET in MHI endpoint stack. MHI_RESET will be issued by the host during shutdown and during error scenario so that it can recover the endpoint device without restarting the whole device. MHI_RESET handling involves resetting the internal MHI registers, data structures, state machines, resetting all channels/rings and setting MHICTRL.RESET bit to 0. Additionally the device will also move to READY state if the reset was due to SYS_ERR. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-12-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 2 ++ 2 files changed, 55 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 968025e4d3ac..d36708d43eb6 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -381,6 +381,7 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state state; u32 int_value; + bool mhi_reset; /* Acknowledge the ctrl interrupt */ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); @@ -389,6 +390,14 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) /* Check for ctrl interrupt */ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); } @@ -464,6 +473,49 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) mhi_cntrl->enabled = false; } +static void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state cur_state; + int ret; + + mhi_ep_abort_transfer(mhi_cntrl); + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) { + mhi_ep_mmio_init(mhi_cntrl); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + return; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + return; + } + + enable_irq(mhi_cntrl->irq); + } +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -738,6 +790,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, } INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 9da683e8302c..2f31a54c205f 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -76,6 +76,7 @@ struct mhi_ep_db_info { * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker + * @reset_work: Worker for MHI Endpoint reset * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -122,6 +123,7 @@ struct mhi_ep_cntrl { struct workqueue_struct *wq; struct work_struct state_work; + struct work_struct reset_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From e827569062a804c67b51930ce83a4cb886113cb7 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:49 +0530 Subject: bus: mhi: ep: Add support for processing command rings Add support for processing the command rings. Command ring is used by the host to issue channel specific commands to the ep device. Following commands are supported: 1. Start channel 2. Stop channel 3. Reset channel Once the device receives the command doorbell interrupt from host, it executes the command and generates a command completion event to the host in the primary event ring. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-14-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 190 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/mhi_ep.h | 2 + 2 files changed, 191 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 706473ea4918..32ac567e0f67 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -22,6 +22,7 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); static int mhi_ep_destroy_device(struct device *dev, void *data); static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, @@ -111,6 +112,156 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_ring *ch_ring; + u32 tmp, ch_id; + int ret; + + ch_id = MHI_TRE_GET_CMD_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_TRE_GET_CMD_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (!ch_ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", ret); + + goto err_unlock; + } + } + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%u)\n", ch_id); + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + } + + /* Finally, enable DB for the channel */ + mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); + + /* Send channel disconnect status to client drivers */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + default: + dev_err(dev, "Invalid command received: %lu for channel (%u)\n", + MHI_TRE_GET_CMD_TYPE(el), ch_id); + return -EINVAL; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; @@ -259,6 +410,40 @@ static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) return 0; } +static void mhi_ep_cmd_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring_element *el; + int ret; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) + return; + + /* + * Process command ring element till write offset. In case of an error, just try to + * process next element. + */ + while (ring->rd_offset != ring->wr_offset) { + el = &ring->ring_cache[ring->rd_offset]; + + ret = mhi_ep_process_cmd_ring(ring, el); + if (ret) + dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); + + mhi_ep_ring_inc_index(ring); + } +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -402,8 +587,10 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) } /* Check for command doorbell interrupt */ - if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); + } /* Check for channel interrupts */ mhi_ep_check_channel_interrupt(mhi_cntrl); @@ -811,6 +998,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 2f31a54c205f..8c6406d9c51f 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -77,6 +77,7 @@ struct mhi_ep_db_info { * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker * @reset_work: Worker for MHI Endpoint reset + * @cmd_ring_work: Worker for processing command rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -124,6 +125,7 @@ struct mhi_ep_cntrl { struct workqueue_struct *wq; struct work_struct state_work; struct work_struct reset_work; + struct work_struct cmd_ring_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 530125889977365cb6db32d7d0bd84c9f54c8aab Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:50 +0530 Subject: bus: mhi: ep: Add support for reading from the host Data transfer between host and the ep device happens over the transfer ring associated with each bi-directional channel pair. Host defines the transfer ring by allocating memory for it. The read and write pointer addresses of the transfer ring are stored in the channel context. Once host places the elements in the transfer ring, it increments the write pointer and rings the channel doorbell. Device will receive the doorbell interrupt and will process the transfer ring elements. This commit adds support for reading the transfer ring elements from the transfer ring till write pointer, incrementing the read pointer and finally sending the completion event to the host through corresponding event ring. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-15-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 ++++ 2 files changed, 130 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 32ac567e0f67..1e24eae4b446 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -262,6 +262,127 @@ err_unlock: return ret; } +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t tr_len, read_offset, write_offset; + struct mhi_ring_element *el; + bool tr_done = false; + void *write_addr; + u64 read_addr; + u32 buf_left; + int ret; + + buf_left = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + return -ENODEV; + } + + el = &ring->ring_cache[ring->rd_offset]; + + /* Check if there is data pending to be read from previous read operation */ + if (mhi_chan->tre_bytes_left) { + dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); + tr_len = min(buf_left, mhi_chan->tre_bytes_left); + } else { + mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); + mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + tr_len = min(buf_left, mhi_chan->tre_size); + } + + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + write_offset = len - buf_left; + read_addr = mhi_chan->tre_loc + read_offset; + write_addr = result->buf_addr + write_offset; + + dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); + return ret; + } + + buf_left -= tr_len; + mhi_chan->tre_bytes_left -= tr_len; + + /* + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been + * read completely: + * + * 1. Send completion event to the host based on the flags set in TRE. + * 2. Increment the local read offset of the transfer ring. + */ + if (!mhi_chan->tre_bytes_left) { + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + + tr_done = true; + } + + mhi_ep_ring_inc_index(ring); + } + + result->bytes_xferd += tr_len; + } while (buf_left && !tr_done); + + return 0; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 8c6406d9c51f..fc7d197413eb 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -254,4 +254,13 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); */ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * + * Return: true if the queue is empty, false otherwise. + */ +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); + #endif -- cgit v1.2.3 From 03c0bb8ec983f993a704417d73cc0a3511453d3e Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:51 +0530 Subject: bus: mhi: ep: Add support for processing channel rings Add support for processing the channel rings from host. For the channel ring associated with DL channel, the xfer callback will simply invoked. For the case of UL channel, the ring elements will be read in a buffer till the write pointer and later passed to the client driver using the xfer callback. The client drivers should provide the callbacks for both UL and DL channels during registration. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-16-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 2 + 2 files changed, 110 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 1e24eae4b446..e2ed10b4a9d2 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -383,6 +383,57 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return 0; } +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret; + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); + return -ENODEV; + } + + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) + return -ENOMEM; + + do { + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + kfree(result.buf_addr); + return ret; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + result.bytes_xferd = 0; + memset(result.buf_addr, 0, len); + + /* Read until the ring becomes empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + + kfree(result.buf_addr); + } + + return 0; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; @@ -565,6 +616,60 @@ static void mhi_ep_cmd_ring_worker(struct work_struct *work) } } +static void mhi_ep_ch_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_item *itr, *tmp; + struct mhi_ring_element *el; + struct mhi_ep_ring *ring; + struct mhi_ep_chan *chan; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + /* Process each queued channel ring. In case of an error, just process next element. */ + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + ring = itr->ring; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + kfree(itr); + continue; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) { + kfree(itr); + continue; + } + + el = &ring->ring_cache[ring->rd_offset]; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + mutex_lock(&chan->lock); + dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); + ret = mhi_ep_process_ch_ring(ring, el); + if (ret) { + dev_err(dev, "Error processing ring for channel (%u): %d\n", + ring->ch_id, ret); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + mutex_unlock(&chan->lock); + kfree(itr); + } +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -630,6 +735,8 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon spin_lock(&mhi_cntrl->list_lock); list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); } } @@ -1120,6 +1227,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); + INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index fc7d197413eb..eecc8f35d630 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -78,6 +78,7 @@ struct mhi_ep_db_info { * @state_work: State transition worker * @reset_work: Worker for MHI Endpoint reset * @cmd_ring_work: Worker for processing command rings + * @ch_ring_work: Worker for processing channel rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -126,6 +127,7 @@ struct mhi_ep_cntrl { struct work_struct state_work; struct work_struct reset_work; struct work_struct cmd_ring_work; + struct work_struct ch_ring_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 2d945a394d9c1c59d88397cb383b11216d018a6b Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:52 +0530 Subject: bus: mhi: ep: Add support for queueing SKBs to the host Add support for queueing SKBs to the host over the transfer ring of the relevant channel. The mhi_ep_queue_skb() API will be used by the client networking drivers to queue the SKBs to the host over MHI bus. The host will add ring elements to the transfer ring periodically for the device and the device will write SKBs to the ring elements. If a single SKB doesn't fit in a ring element (TRE), it will be placed in multiple ring elements and the overflow event will be sent for all ring elements except the last one. For the last ring element, the EOT event will be sent indicating the packet boundary. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-17-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 ++++++ 2 files changed, 91 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index e2ed10b4a9d2..660d1e9791d3 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -434,6 +434,88 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem return 0; } +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index eecc8f35d630..478aece17046 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -265,4 +265,13 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); */ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); +/** + * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint + * @mhi_dev: Device associated with the DL channel + * @skb: SKBs to be queued + * + * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise. + */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb); + #endif -- cgit v1.2.3 From c268c0a8a33047cd957fecc1349d09a68eb6ad9e Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:54 +0530 Subject: bus: mhi: ep: Add uevent support for module autoloading Add uevent support to MHI endpoint bus so that the client drivers can be autoloaded by udev when the MHI endpoint devices gets created. The client drivers are expected to provide MODULE_DEVICE_TABLE with the MHI id_table struct so that the alias can be exported. The MHI endpoint reused the mhi_device_id structure of the MHI bus. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-19-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 9 +++++++++ include/linux/mod_devicetable.h | 2 ++ scripts/mod/file2alias.c | 10 ++++++++++ 3 files changed, 21 insertions(+) (limited to 'include') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index bae5f40ec15e..40109a79017a 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -1536,6 +1536,14 @@ void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) } EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + static int mhi_ep_match(struct device *dev, struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -1562,6 +1570,7 @@ struct bus_type mhi_ep_bus_type = { .name = "mhi_ep", .dev_name = "mhi_ep", .match = mhi_ep_match, + .uevent = mhi_ep_uevent, }; static int __init mhi_ep_init(void) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5da5d990ff58..549590e9c644 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -835,6 +835,8 @@ struct wmi_device_id { #define MHI_DEVICE_MODALIAS_FMT "mhi:%s" #define MHI_NAME_SIZE 32 +#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s" + /** * struct mhi_device_id - MHI device identification * @chan: MHI channel name diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 5258247d78ac..d9d6a31446ea 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1391,6 +1391,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: mhi_ep:S */ +static int do_mhi_ep_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD_ADDR(symval, mhi_device_id, chan); + sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan); + + return 1; +} + /* Looks like: ishtp:{guid} */ static int do_ishtp_entry(const char *filename, void *symval, char *alias) { @@ -1519,6 +1528,7 @@ static const struct devtable devtable[] = { {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, {"mhi", SIZE_mhi_device_id, do_mhi_entry}, + {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry}, {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry}, {"ssam", SIZE_ssam_device_id, do_ssam_entry}, {"dfl", SIZE_dfl_device_id, do_dfl_entry}, -- cgit v1.2.3 From ac3e62f51b3fbda78a44fff62ece8f11d8f08a25 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 7 Feb 2022 15:38:40 +0100 Subject: iio: core: Clarify the modes As part of a previous discussion with Jonathan Cameron [1], it appeared necessary to clarify the meaning of each mode so that new developers could understand better what they should use or not use and when. The idea of renaming these modes as been let aside because naming is a big deal and requires a lot of thinking. So for now let's focus on correctly explaining what each mode implies. [1] https://lore.kernel.org/linux-iio/20210930165510.2295e6c4@jic23-huawei/ Suggested-by: Jonathan Cameron Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220207143840.707510-14-miquel.raynal@bootlin.com Signed-off-by: Jonathan Cameron --- include/linux/iio/iio.h | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 85cb924debd9..233d2e6b7721 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -315,7 +315,54 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, s64 iio_get_time_ns(const struct iio_dev *indio_dev); unsigned int iio_get_time_res(const struct iio_dev *indio_dev); -/* Device operating modes */ +/* + * Device operating modes + * @INDIO_DIRECT_MODE: There is an access to either: + * a) The last single value available for devices that do not provide + * on-demand reads. + * b) A new value after performing an on-demand read otherwise. + * On most devices, this is a single-shot read. On some devices with data + * streams without an 'on-demand' function, this might also be the 'last value' + * feature. Above all, this mode internally means that we are not in any of the + * other modes, and sysfs reads should work. + * Device drivers should inform the core if they support this mode. + * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers. + * It indicates that an explicit trigger is required. This requests the core to + * attach a poll function when enabling the buffer, which is indicated by the + * _TRIGGERED suffix. + * The core will ensure this mode is set when registering a triggered buffer + * with iio_triggered_buffer_setup(). + * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered. + * No poll function can be attached because there is no triggered infrastructure + * we can use to cause capture. There is a kfifo that the driver will fill, but + * not "only one scan at a time". Typically, hardware will have a buffer that + * can hold multiple scans. Software may read one or more scans at a single time + * and push the available data to a Kfifo. This means the core will not attach + * any poll function when enabling the buffer. + * The core will ensure this mode is set when registering a simple kfifo buffer + * with devm_iio_kfifo_buffer_setup(). + * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode. + * Same as above but this time the buffer is not a kfifo where we have direct + * access to the data. Instead, the consumer driver must access the data through + * non software visible channels (or DMA when there is no demux possible in + * software) + * The core will ensure this mode is set when registering a dmaengine buffer + * with devm_iio_dmaengine_buffer_setup(). + * @INDIO_EVENT_TRIGGERED: Very unusual mode. + * Triggers usually refer to an external event which will start data capture. + * Here it is kind of the opposite as, a particular state of the data might + * produce an event which can be considered as an event. We don't necessarily + * have access to the data itself, but to the event produced. For example, this + * can be a threshold detector. The internal path of this mode is very close to + * the INDIO_BUFFER_TRIGGERED mode. + * The core will ensure this mode is set when registering a triggered event. + * @INDIO_HARDWARE_TRIGGERED: Very unusual mode. + * Here, triggers can result in data capture and can be routed to multiple + * hardware components, which make them close to regular triggers in the way + * they must be managed by the core, but without the entire interrupts/poll + * functions burden. Interrupts are irrelevant as the data flow is hardware + * mediated and distributed. + */ #define INDIO_DIRECT_MODE 0x01 #define INDIO_BUFFER_TRIGGERED 0x02 #define INDIO_BUFFER_SOFTWARE 0x04 -- cgit v1.2.3 From b1c5f3085149e9643b125eb10aae0e74644d7dcc Mon Sep 17 00:00:00 2001 From: Ricky WU Date: Mon, 21 Mar 2022 11:18:30 +0000 Subject: misc: rtsx: add rts5261 efuse function move rts5261_fetch_vendor_settings() to rts5261_init_from_hw() make sure it be called from S3 or D3 add more register setting when efuse is set read efuse setting to register on init flow Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/18101ecb0f0749ccb9f564eda171ba40@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rts5261.c | 115 ++++++++++++++++++++------------------ include/linux/rtsx_pci.h | 3 + 2 files changed, 65 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index a77585ab0f30..749cc5a46d13 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -57,40 +57,6 @@ static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage) 0xFF, driving[drive_sel][2]); } -static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr) -{ - struct pci_dev *pdev = pcr->pci; - u32 reg; - - /* 0x814~0x817 */ - pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); - pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); - - if (!rts5261_vendor_setting_valid(reg)) { - /* Not support MMC default */ - pcr->extra_caps |= EXTRA_CAPS_NO_MMC; - pcr_dbg(pcr, "skip fetch vendor setting\n"); - return; - } - - if (!rts5261_reg_check_mmc_support(reg)) - pcr->extra_caps |= EXTRA_CAPS_NO_MMC; - - /* TO do: need to add rtd3 function */ - pcr->rtd3_en = rts5261_reg_to_rtd3(reg); - - if (rts5261_reg_check_reverse_socket(reg)) - pcr->flags |= PCR_REVERSE_SOCKET; - - /* 0x724~0x727 */ - pci_read_config_dword(pdev, PCR_SETTING_REG1, ®); - pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg); - - pcr->aspm_en = rts5261_reg_to_aspm(reg); - pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg); - pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg); -} - static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) { /* Set relink_time to 0 */ @@ -391,11 +357,11 @@ static void rts5261_process_ocp(struct rtsx_pcr *pcr) } -static int rts5261_init_from_hw(struct rtsx_pcr *pcr) +static void rts5261_init_from_hw(struct rtsx_pcr *pcr) { struct pci_dev *pdev = pcr->pci; - int retval; - u32 lval, i; + u32 lval1, lval2, i; + u16 setting_reg1, setting_reg2; u8 valid, efuse_valid, tmp; rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, @@ -418,26 +384,70 @@ static int rts5261_init_from_hw(struct rtsx_pcr *pcr) efuse_valid = ((tmp & 0x0C) >> 2); pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid); - if (efuse_valid == 0) { - retval = pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); - if (retval != 0) - pcr_dbg(pcr, "read 0x814 DW fail\n"); - pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval); - /* 0x816 */ - valid = (u8)((lval >> 16) & 0x03); - pcr_dbg(pcr, "0x816: %d\n", valid); - } + pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2); + /* 0x816 */ + valid = (u8)((lval2 >> 16) & 0x03); + rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, REG_EFUSE_POR, 0); pcr_dbg(pcr, "Disable efuse por!\n"); - pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval); - lval = lval & 0x00FFFFFF; - retval = pci_write_config_dword(pdev, PCR_SETTING_REG2, lval); - if (retval != 0) - pcr_dbg(pcr, "write config fail\n"); + if (efuse_valid == 2 || efuse_valid == 3) { + if (valid == 3) { + /* Bypass efuse */ + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } else { + /* Use efuse data */ + setting_reg1 = PCR_SETTING_REG4; + setting_reg2 = PCR_SETTING_REG5; + } + } else if (efuse_valid == 0) { + // default + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } + + pci_read_config_dword(pdev, setting_reg2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2); + + if (!rts5261_vendor_setting_valid(lval2)) { + /* Not support MMC default */ + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + if (!rts5261_reg_check_mmc_support(lval2)) + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; - return retval; + pcr->rtd3_en = rts5261_reg_to_rtd3(lval2); + + if (rts5261_reg_check_reverse_socket(lval2)) + pcr->flags |= PCR_REVERSE_SOCKET; + + pci_read_config_dword(pdev, setting_reg1, &lval1); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1); + + pcr->aspm_en = rts5261_reg_to_aspm(lval1); + pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(lval1); + pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(lval1); + + if (setting_reg1 == PCR_SETTING_REG1) { + /* store setting */ + rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF)); + + pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1); + lval2 = lval2 & 0x00FFFFFF; + pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2); + } } static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) @@ -636,7 +646,6 @@ static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) } static const struct pcr_ops rts5261_pcr_ops = { - .fetch_vendor_settings = rtsx5261_fetch_vendor_settings, .turn_on_led = rts5261_turn_on_led, .turn_off_led = rts5261_turn_off_led, .extra_init_hw = rts5261_extra_init_hw, diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 3d780b44e678..534038d962e4 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1067,6 +1067,9 @@ #define PCR_SETTING_REG1 0x724 #define PCR_SETTING_REG2 0x814 #define PCR_SETTING_REG3 0x747 +#define PCR_SETTING_REG4 0x818 +#define PCR_SETTING_REG5 0x81C + #define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0) -- cgit v1.2.3 From bd32889e841c12533d09a1bd02bba932baa9ed8f Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Fri, 29 Apr 2022 23:56:41 +0000 Subject: binder: add BINDER_GET_EXTENDED_ERROR ioctl Provide a userspace mechanism to pull precise error information upon failed operations. Extending the current error codes returned by the interfaces allows userspace to better determine the course of action. This could be for instance, retrying a failed transaction at a later point and thus offloading the error handling from the driver. Acked-by: Christian Brauner (Microsoft) Acked-by: Todd Kjos Signed-off-by: Carlos Llamas Link: https://lore.kernel.org/r/20220429235644.697372-3-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 60 +++++++++++++++++++++++++++++++++++++ drivers/android/binder_internal.h | 3 ++ include/uapi/linux/android/binder.h | 16 ++++++++++ 3 files changed, 79 insertions(+) (limited to 'include') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f0690d46caa1..4c2caf38e056 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -147,6 +147,13 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error, binder_stop_on_user_error = 2; \ } while (0) +#define binder_set_extended_error(ee, _id, _command, _param) \ + do { \ + (ee)->id = _id; \ + (ee)->command = _command; \ + (ee)->param = _param; \ + } while (0) + #define to_flat_binder_object(hdr) \ container_of(hdr, struct flat_binder_object, hdr) @@ -2708,6 +2715,24 @@ static struct binder_node *binder_get_node_refs_for_txn( return target_node; } +static void binder_set_txn_from_error(struct binder_transaction *t, int id, + uint32_t command, int32_t param) +{ + struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); + + if (!from) { + /* annotation for sparse */ + __release(&from->proc->inner_lock); + return; + } + + /* don't override existing errors */ + if (from->ee.command == BR_OK) + binder_set_extended_error(&from->ee, id, command, param); + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -2753,6 +2778,10 @@ static void binder_transaction(struct binder_proc *proc, e->offsets_size = tr->offsets_size; strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); + binder_inner_proc_unlock(proc); + if (reply) { binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; @@ -3498,10 +3527,16 @@ err_invalid_target_handle: BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_set_txn_from_error(in_reply_to, t_debug_id, + return_error, return_error_param); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, + return_error, return_error_param); + binder_inner_proc_unlock(proc); thread->return_error.cmd = return_error; binder_enqueue_thread_work(thread, &thread->return_error.work); } @@ -4628,6 +4663,7 @@ static struct binder_thread *binder_get_thread_ilocked( thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; + thread->ee.command = BR_OK; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } @@ -5066,6 +5102,25 @@ static int binder_ioctl_get_freezer_info( return 0; } +static int binder_ioctl_get_extended_error(struct binder_thread *thread, + void __user *ubuf) +{ + struct binder_extended_error *ee = &thread->ee; + + binder_inner_proc_lock(thread->proc); + if (copy_to_user(ubuf, ee, sizeof(*ee))) { + binder_inner_proc_unlock(thread->proc); + return -EFAULT; + } + + ee->id = 0; + ee->command = BR_OK; + ee->param = 0; + binder_inner_proc_unlock(thread->proc); + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; @@ -5274,6 +5329,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_GET_EXTENDED_ERROR: + ret = binder_ioctl_get_extended_error(thread, ubuf); + if (ret < 0) + goto err; + break; default: ret = -EINVAL; goto err; diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index cf70a104594d..8dc0bccf8513 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -480,6 +480,8 @@ struct binder_proc { * (only accessed by this thread) * @reply_error: transaction errors reported by target thread * (protected by @proc->inner_lock) + * @ee: extended error information from this thread + * (protected by @proc->inner_lock) * @wait: wait queue for thread work * @stats: per-thread statistics * (atomics, no lock needed) @@ -504,6 +506,7 @@ struct binder_thread { bool process_todo; struct binder_error return_error; struct binder_error reply_error; + struct binder_extended_error ee; wait_queue_head_t wait; struct binder_stats stats; atomic_t tmp_ref; diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 11157fae8a8e..e6ee8cae303b 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -236,6 +236,21 @@ struct binder_frozen_status_info { __u32 async_recv; }; +/* struct binder_extened_error - extended error information + * @id: identifier for the failed operation + * @command: command as defined by binder_driver_return_protocol + * @param: parameter holding a negative errno value + * + * Used with BINDER_GET_EXTENDED_ERROR. This extends the error information + * returned by the driver upon a failed operation. Userspace can pull this + * data to properly handle specific error scenarios. + */ +struct binder_extended_error { + __u32 id; + __u32 command; + __s32 param; +}; + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) @@ -249,6 +264,7 @@ struct binder_frozen_status_info { #define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info) #define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info) #define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32) +#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error) /* * NOTE: Two special error codes you should check for when calling -- cgit v1.2.3 From dbc2f62061c6bfba0aee93161ee3194dcee84bd0 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 29 Apr 2022 17:26:46 +0100 Subject: nvmem: core: support passing DT node in cell info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some hardware may have NVMEM cells described in Device Tree using individual nodes. Let drivers pass such nodes to the NVMEM subsystem so they can be later used by NVMEM consumers. Signed-off-by: Rafał Miłecki Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220429162701.2222-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 1 + include/linux/nvmem-consumer.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index f58d9bc7aa08..1e3c754efd0d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -467,6 +467,7 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, cell->bit_offset = info->bit_offset; cell->nbits = info->nbits; + cell->np = info->np; if (cell->nbits) cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index c0c0cefc3b92..980f9c9ac0bc 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -25,6 +25,7 @@ struct nvmem_cell_info { unsigned int bytes; unsigned int bit_offset; unsigned int nbits; + struct device_node *np; }; /** -- cgit v1.2.3 From 57ce2e406fe1fa005e8fdbf20936c791252ac7bc Mon Sep 17 00:00:00 2001 From: Nava kishore Manne Date: Sat, 23 Apr 2022 22:32:32 +0530 Subject: fpga: fix for coding style issues fixes the below checks reported by checkpatch.pl: - Lines should not end with a '(' - Alignment should match open parenthesis Signed-off-by: Nava kishore Manne Link: https://lore.kernel.org/r/20220423170235.2115479-3-nava.manne@xilinx.com Signed-off-by: Xu Yilun --- drivers/fpga/fpga-mgr.c | 9 +++++---- drivers/fpga/fpga-region.c | 6 +++--- drivers/fpga/of-fpga-region.c | 6 +++--- include/linux/fpga/fpga-region.h | 6 +++--- 4 files changed, 14 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index d49a9ce34568..24dee27c7897 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -148,11 +148,12 @@ static int fpga_mgr_write_init_buf(struct fpga_manager *mgr, int ret; mgr->state = FPGA_MGR_STATE_WRITE_INIT; - if (!mgr->mops->initial_header_size) + if (!mgr->mops->initial_header_size) { ret = fpga_mgr_write_init(mgr, info, NULL, 0); - else - ret = fpga_mgr_write_init( - mgr, info, buf, min(mgr->mops->initial_header_size, count)); + } else { + count = min(mgr->mops->initial_header_size, count); + ret = fpga_mgr_write_init(mgr, info, buf, count); + } if (ret) { dev_err(&mgr->dev, "Error preparing FPGA for writing\n"); diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index b0ac18de4885..485948e3c0db 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -18,9 +18,9 @@ static DEFINE_IDA(fpga_region_ida); static struct class *fpga_region_class; -struct fpga_region *fpga_region_class_find( - struct device *start, const void *data, - int (*match)(struct device *, const void *)) +struct fpga_region * +fpga_region_class_find(struct device *start, const void *data, + int (*match)(struct device *, const void *)) { struct device *dev; diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 50b83057c048..a598d03626af 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -189,9 +189,9 @@ static int child_regions_with_firmware(struct device_node *overlay) * fpga_image_info struct if there is an image to program. * error code for invalid overlay. */ -static struct fpga_image_info *of_fpga_region_parse_ov( - struct fpga_region *region, - struct device_node *overlay) +static struct fpga_image_info * +of_fpga_region_parse_ov(struct fpga_region *region, + struct device_node *overlay) { struct device *dev = ®ion->dev; struct fpga_image_info *info; diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h index 3b87f232425c..9d4d32909340 100644 --- a/include/linux/fpga/fpga-region.h +++ b/include/linux/fpga/fpga-region.h @@ -52,9 +52,9 @@ struct fpga_region { #define to_fpga_region(d) container_of(d, struct fpga_region, dev) -struct fpga_region *fpga_region_class_find( - struct device *start, const void *data, - int (*match)(struct device *, const void *)); +struct fpga_region * +fpga_region_class_find(struct device *start, const void *data, + int (*match)(struct device *, const void *)); int fpga_region_program_fpga(struct fpga_region *region); -- cgit v1.2.3 From 58e4a2d27d3255e4e8c507fdc13734dccc9fc4c7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 17 Dec 2021 09:28:46 +0300 Subject: extcon: Fix extcon_get_extcon_dev() error handling The extcon_get_extcon_dev() function returns error pointers on error, NULL when it's a -EPROBE_DEFER defer situation, and ERR_PTR(-ENODEV) when the CONFIG_EXTCON option is disabled. This is very complicated for the callers to handle and a number of them had bugs that would lead to an Oops. In real life, there are two things which prevented crashes. First, error pointers would only be returned if there was bug in the caller where they passed a NULL "extcon_name" and none of them do that. Second, only two out of the eight drivers will build when CONFIG_EXTCON is disabled. The normal way to write this would be to return -EPROBE_DEFER directly when appropriate and return NULL when CONFIG_EXTCON is disabled. Then the error handling is simple and just looks like: dev->edev = extcon_get_extcon_dev(acpi_dev_name(adev)); if (IS_ERR(dev->edev)) return PTR_ERR(dev->edev); For the two drivers which can build with CONFIG_EXTCON disabled, then extcon_get_extcon_dev() will now return NULL which is not treated as an error and the probe will continue successfully. Those two drivers are "typec_fusb302" and "max8997-battery". In the original code, the typec_fusb302 driver had an 800ms hang in tcpm_get_current_limit() but now that function is a no-op. For the max8997-battery driver everything should continue working as is. Signed-off-by: Dan Carpenter Reviewed-by: Hans de Goede Reviewed-by: Heikki Krogerus Reviewed-by: Guenter Roeck Acked-by: Sebastian Reichel Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-axp288.c | 4 ++-- drivers/extcon/extcon.c | 4 +++- drivers/power/supply/axp288_charger.c | 17 ++++++++++------- drivers/power/supply/charger-manager.c | 7 ++----- drivers/power/supply/max8997_charger.c | 8 ++++---- drivers/usb/dwc3/drd.c | 9 ++------- drivers/usb/phy/phy-omap-otg.c | 4 ++-- drivers/usb/typec/tcpm/fusb302.c | 4 ++-- include/linux/extcon.h | 2 +- 9 files changed, 28 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index 7c6d5857ff25..180be768c215 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c @@ -394,8 +394,8 @@ static int axp288_extcon_probe(struct platform_device *pdev) if (adev) { info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev)); put_device(&adev->dev); - if (!info->id_extcon) - return -EPROBE_DEFER; + if (IS_ERR(info->id_extcon)) + return PTR_ERR(info->id_extcon); dev_info(dev, "controlling USB role\n"); } else { diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index a09e704fd0fa..adb957470c65 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -851,6 +851,8 @@ EXPORT_SYMBOL_GPL(extcon_set_property_capability); * @extcon_name: the extcon name provided with extcon_dev_register() * * Return the pointer of extcon device if success or ERR_PTR(err) if fail. + * NOTE: This function returns -EPROBE_DEFER so it may only be called from + * probe() functions. */ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { @@ -864,7 +866,7 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) if (!strcmp(sd->name, extcon_name)) goto out; } - sd = NULL; + sd = ERR_PTR(-EPROBE_DEFER); out: mutex_unlock(&extcon_dev_list_lock); return sd; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 19746e658a6a..15219ed43ce9 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -865,17 +865,20 @@ static int axp288_charger_probe(struct platform_device *pdev) info->regmap_irqc = axp20x->regmap_irqc; info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME); - if (info->cable.edev == NULL) { - dev_dbg(dev, "%s is not ready, probe deferred\n", - AXP288_EXTCON_DEV_NAME); - return -EPROBE_DEFER; + if (IS_ERR(info->cable.edev)) { + dev_err_probe(dev, PTR_ERR(info->cable.edev), + "extcon_get_extcon_dev(%s) failed\n", + AXP288_EXTCON_DEV_NAME); + return PTR_ERR(info->cable.edev); } if (acpi_dev_present(USB_HOST_EXTCON_HID, NULL, -1)) { info->otg.cable = extcon_get_extcon_dev(USB_HOST_EXTCON_NAME); - if (info->otg.cable == NULL) { - dev_dbg(dev, "EXTCON_USB_HOST is not ready, probe deferred\n"); - return -EPROBE_DEFER; + if (IS_ERR(info->otg.cable)) { + dev_err_probe(dev, PTR_ERR(info->otg.cable), + "extcon_get_extcon_dev(%s) failed\n", + USB_HOST_EXTCON_NAME); + return PTR_ERR(info->otg.cable); } dev_info(dev, "Using " USB_HOST_EXTCON_HID " extcon for usb-id\n"); } diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index d67edb760c94..92db79400a6a 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -985,13 +985,10 @@ static int charger_extcon_init(struct charger_manager *cm, cable->nb.notifier_call = charger_extcon_notifier; cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name); - if (IS_ERR_OR_NULL(cable->extcon_dev)) { + if (IS_ERR(cable->extcon_dev)) { pr_err("Cannot find extcon_dev for %s (cable: %s)\n", cable->extcon_name, cable->name); - if (cable->extcon_dev == NULL) - return -EPROBE_DEFER; - else - return PTR_ERR(cable->extcon_dev); + return PTR_ERR(cable->extcon_dev); } for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) { diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c index 127c73b0b3bd..1ec3535a257d 100644 --- a/drivers/power/supply/max8997_charger.c +++ b/drivers/power/supply/max8997_charger.c @@ -242,10 +242,10 @@ static int max8997_battery_probe(struct platform_device *pdev) dev_info(&pdev->dev, "couldn't get charger regulator\n"); } charger->edev = extcon_get_extcon_dev("max8997-muic"); - if (IS_ERR_OR_NULL(charger->edev)) { - if (!charger->edev) - return -EPROBE_DEFER; - dev_info(charger->dev, "couldn't get extcon device\n"); + if (IS_ERR(charger->edev)) { + dev_err_probe(charger->dev, PTR_ERR(charger->edev), + "couldn't get extcon device: max8997-muic\n"); + return PTR_ERR(charger->edev); } if (!IS_ERR(charger->reg) && !IS_ERR_OR_NULL(charger->edev)) { diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index 8cad9e7d3368..4982edd13047 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -455,13 +455,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) * This device property is for kernel internal use only and * is expected to be set by the glue code. */ - if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { - edev = extcon_get_extcon_dev(name); - if (!edev) - return ERR_PTR(-EPROBE_DEFER); - - return edev; - } + if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) + return extcon_get_extcon_dev(name); /* * Try to get an extcon device from the USB PHY controller's "port" diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index ee0863c6553e..6e6ef8c0bc7e 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c @@ -95,8 +95,8 @@ static int omap_otg_probe(struct platform_device *pdev) return -ENODEV; extcon = extcon_get_extcon_dev(config->extcon); - if (!extcon) - return -EPROBE_DEFER; + if (IS_ERR(extcon)) + return PTR_ERR(extcon); otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); if (!otg_dev) diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c index 72f9001b0792..96c55eaf3f80 100644 --- a/drivers/usb/typec/tcpm/fusb302.c +++ b/drivers/usb/typec/tcpm/fusb302.c @@ -1708,8 +1708,8 @@ static int fusb302_probe(struct i2c_client *client, */ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { chip->extcon = extcon_get_extcon_dev(name); - if (!chip->extcon) - return -EPROBE_DEFER; + if (IS_ERR(chip->extcon)) + return PTR_ERR(chip->extcon); } chip->vbus = devm_regulator_get(chip->dev, "vbus"); diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 0c19010da77f..685401d94d39 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -296,7 +296,7 @@ static inline void devm_extcon_unregister_notifier_all(struct device *dev, static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) { - return ERR_PTR(-ENODEV); + return NULL; } static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node) -- cgit v1.2.3 From 42c4e3f670b97df0c9e23ca2f7080ff7cd591837 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Tue, 3 May 2022 14:19:24 -0700 Subject: interconnect: qcom: sc8180x: Fix QUP0 nodes The QUP0 BCM relates to some internal property of the QUPs, and should be configured independently of the path to the QUP. In line with other platforms expose QUP_CORE endpoints in order allow this configuration. Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20220503211925.1022169-4-bjorn.andersson@linaro.org Signed-off-by: Georgi Djakov --- drivers/interconnect/qcom/sc8180x.c | 30 ++++++++++++++++++++++--- drivers/interconnect/qcom/sc8180x.h | 7 ++++++ include/dt-bindings/interconnect/qcom,sc8180x.h | 7 ++++++ 3 files changed, 41 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c index 467083661559..86500d05caa3 100644 --- a/drivers/interconnect/qcom/sc8180x.c +++ b/drivers/interconnect/qcom/sc8180x.c @@ -76,6 +76,9 @@ DEFINE_QNODE(mas_qnm_aggre2_noc, SC8180X_A2NOC_SNOC_MAS, 1, 16, SC8180X_SLAVE_SN DEFINE_QNODE(mas_qnm_gemnoc, SC8180X_MASTER_GEM_NOC_SNOC, 1, 8, SC8180X_SLAVE_PIMEM, SC8180X_SLAVE_OCIMEM, SC8180X_SLAVE_APPSS, SC8180X_SNOC_CNOC_SLV, SC8180X_SLAVE_TCU, SC8180X_SLAVE_QDSS_STM); DEFINE_QNODE(mas_qxm_pimem, SC8180X_MASTER_PIMEM, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM); DEFINE_QNODE(mas_xm_gic, SC8180X_MASTER_GIC, 1, 8, SC8180X_SLAVE_SNOC_GEM_NOC_GC, SC8180X_SLAVE_OCIMEM); +DEFINE_QNODE(mas_qup_core_0, SC8180X_MASTER_QUP_CORE_0, 1, 4, SC8180X_SLAVE_QUP_CORE_0); +DEFINE_QNODE(mas_qup_core_1, SC8180X_MASTER_QUP_CORE_1, 1, 4, SC8180X_SLAVE_QUP_CORE_1); +DEFINE_QNODE(mas_qup_core_2, SC8180X_MASTER_QUP_CORE_2, 1, 4, SC8180X_SLAVE_QUP_CORE_2); DEFINE_QNODE(slv_qns_a1noc_snoc, SC8180X_A1NOC_SNOC_SLV, 1, 32, SC8180X_A1NOC_SNOC_MAS); DEFINE_QNODE(slv_srvc_aggre1_noc, SC8180X_SLAVE_SERVICE_A1NOC, 1, 4); DEFINE_QNODE(slv_qns_a2noc_snoc, SC8180X_A2NOC_SNOC_SLV, 1, 16, SC8180X_A2NOC_SNOC_MAS); @@ -165,6 +168,9 @@ DEFINE_QNODE(slv_xs_pcie_2, SC8180X_SLAVE_PCIE_2, 1, 8); DEFINE_QNODE(slv_xs_pcie_3, SC8180X_SLAVE_PCIE_3, 1, 8); DEFINE_QNODE(slv_xs_qdss_stm, SC8180X_SLAVE_QDSS_STM, 1, 4); DEFINE_QNODE(slv_xs_sys_tcu_cfg, SC8180X_SLAVE_TCU, 1, 8); +DEFINE_QNODE(slv_qup_core_0, SC8180X_SLAVE_QUP_CORE_0, 1, 4); +DEFINE_QNODE(slv_qup_core_1, SC8180X_SLAVE_QUP_CORE_1, 1, 4); +DEFINE_QNODE(slv_qup_core_2, SC8180X_SLAVE_QUP_CORE_2, 1, 4); DEFINE_QBCM(bcm_acv, "ACV", false, &slv_ebi); DEFINE_QBCM(bcm_mc0, "MC0", false, &slv_ebi); @@ -174,7 +180,7 @@ DEFINE_QBCM(bcm_co0, "CO0", false, &slv_qns_cdsp_mem_noc); DEFINE_QBCM(bcm_ce0, "CE0", false, &mas_qxm_crypto); DEFINE_QBCM(bcm_cn0, "CN0", false, &mas_qnm_snoc, &slv_qhs_a1_noc_cfg, &slv_qhs_a2_noc_cfg, &slv_qhs_ahb2phy_refgen_center, &slv_qhs_ahb2phy_refgen_east, &slv_qhs_ahb2phy_refgen_west, &slv_qhs_ahb2phy_south, &slv_qhs_aop, &slv_qhs_aoss, &slv_qhs_camera_cfg, &slv_qhs_clk_ctl, &slv_qhs_compute_dsp, &slv_qhs_cpr_cx, &slv_qhs_cpr_mmcx, &slv_qhs_cpr_mx, &slv_qhs_crypto0_cfg, &slv_qhs_ddrss_cfg, &slv_qhs_display_cfg, &slv_qhs_emac_cfg, &slv_qhs_glm, &slv_qhs_gpuss_cfg, &slv_qhs_imem_cfg, &slv_qhs_ipa, &slv_qhs_mnoc_cfg, &slv_qhs_npu_cfg, &slv_qhs_pcie0_cfg, &slv_qhs_pcie1_cfg, &slv_qhs_pcie2_cfg, &slv_qhs_pcie3_cfg, &slv_qhs_pdm, &slv_qhs_pimem_cfg, &slv_qhs_prng, &slv_qhs_qdss_cfg, &slv_qhs_qspi_0, &slv_qhs_qspi_1, &slv_qhs_qupv3_east0, &slv_qhs_qupv3_east1, &slv_qhs_qupv3_west, &slv_qhs_sdc2, &slv_qhs_sdc4, &slv_qhs_security, &slv_qhs_snoc_cfg, &slv_qhs_spss_cfg, &slv_qhs_tcsr, &slv_qhs_tlmm_east, &slv_qhs_tlmm_south, &slv_qhs_tlmm_west, &slv_qhs_tsif, &slv_qhs_ufs_card_cfg, &slv_qhs_ufs_mem0_cfg, &slv_qhs_ufs_mem1_cfg, &slv_qhs_usb3_0, &slv_qhs_usb3_1, &slv_qhs_usb3_2, &slv_qhs_venus_cfg, &slv_qhs_vsense_ctrl_cfg, &slv_srvc_cnoc); DEFINE_QBCM(bcm_mm1, "MM1", false, &mas_qxm_camnoc_hf0_uncomp, &mas_qxm_camnoc_hf1_uncomp, &mas_qxm_camnoc_sf_uncomp, &mas_qxm_camnoc_hf0, &mas_qxm_camnoc_hf1, &mas_qxm_mdp0, &mas_qxm_mdp1); -DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qhm_qup0, &mas_qhm_qup1, &mas_qhm_qup2); +DEFINE_QBCM(bcm_qup0, "QUP0", false, &mas_qup_core_0, &mas_qup_core_1, &mas_qup_core_2); DEFINE_QBCM(bcm_sh2, "SH2", false, &slv_qns_gem_noc_snoc); DEFINE_QBCM(bcm_mm2, "MM2", false, &mas_qxm_camnoc_sf, &mas_qxm_rot, &mas_qxm_venus0, &mas_qxm_venus1, &mas_qxm_venus_arm9, &slv_qns2_mem_noc); DEFINE_QBCM(bcm_sh3, "SH3", false, &mas_acm_apps); @@ -194,13 +200,11 @@ DEFINE_QBCM(bcm_sn15, "SN15", false, &mas_qnm_gemnoc); static struct qcom_icc_bcm * const aggre1_noc_bcms[] = { &bcm_sn3, &bcm_ce0, - &bcm_qup0, }; static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { &bcm_sn14, &bcm_ce0, - &bcm_qup0, }; static struct qcom_icc_bcm * const camnoc_virt_bcms[] = { @@ -503,6 +507,25 @@ static const struct qcom_icc_desc sc8180x_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; +static struct qcom_icc_bcm * const qup_virt_bcms[] = { + &bcm_qup0, +}; + +static struct qcom_icc_node *qup_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &mas_qup_core_0, + [MASTER_QUP_CORE_1] = &mas_qup_core_1, + [MASTER_QUP_CORE_2] = &mas_qup_core_2, + [SLAVE_QUP_CORE_0] = &slv_qup_core_0, + [SLAVE_QUP_CORE_1] = &slv_qup_core_1, + [SLAVE_QUP_CORE_2] = &slv_qup_core_2, +}; + +static const struct qcom_icc_desc sc8180x_qup_virt = { + .nodes = qup_virt_nodes, + .num_nodes = ARRAY_SIZE(qup_virt_nodes), + .bcms = qup_virt_bcms, + .num_bcms = ARRAY_SIZE(qup_virt_bcms), +}; static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc }, @@ -515,6 +538,7 @@ static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt }, { .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt }, { .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc }, + { .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt }, { .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc }, { } }; diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h index e70cf7032f80..2eafd35543c7 100644 --- a/drivers/interconnect/qcom/sc8180x.h +++ b/drivers/interconnect/qcom/sc8180x.h @@ -171,4 +171,11 @@ #define SC8180X_MASTER_OSM_L3_APPS 161 #define SC8180X_SLAVE_OSM_L3 162 +#define SC8180X_MASTER_QUP_CORE_0 163 +#define SC8180X_MASTER_QUP_CORE_1 164 +#define SC8180X_MASTER_QUP_CORE_2 165 +#define SC8180X_SLAVE_QUP_CORE_0 166 +#define SC8180X_SLAVE_QUP_CORE_1 167 +#define SC8180X_SLAVE_QUP_CORE_2 168 + #endif diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h index 235b525d2803..e84cfec5afdd 100644 --- a/include/dt-bindings/interconnect/qcom,sc8180x.h +++ b/include/dt-bindings/interconnect/qcom,sc8180x.h @@ -182,4 +182,11 @@ #define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3 #define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4 +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define MASTER_QUP_CORE_2 2 +#define SLAVE_QUP_CORE_0 3 +#define SLAVE_QUP_CORE_1 4 +#define SLAVE_QUP_CORE_2 5 + #endif -- cgit v1.2.3 From e6d3c99adf54fc1dcd07729990dc32acd3be874b Mon Sep 17 00:00:00 2001 From: Abhyuday Godhasara Date: Wed, 27 Apr 2022 00:48:03 -0700 Subject: driver: soc: xilinx: Update function prototype for xlnx_unregister_event As per the current implementation only single callback data gets saved per event, driver is throwing an error if try to register multiple callback for same event. So at time of unregistration of any event required things are event details and callback handler as parameter of xlnx_unregister_event(). As part of adding support of multiple callbacks for same event also require change in prototype of xlnx_unregister_event(). During unregistration of any events, now required things are event details, callback handler and agent's private data as parameter of xlnx_unregister_event(). Also modify the usage of xlnx_unregister_event() in xilinx/zynqmp_power.c driver as per new implementation. Signed-off-by: Abhyuday Godhasara Link: https://lore.kernel.org/r/20220427074803.19009-3-abhyuday.godhasara@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/soc/xilinx/xlnx_event_manager.c | 58 +++++++++++++++++++---------- drivers/soc/xilinx/zynqmp_power.c | 7 ++-- include/linux/firmware/xlnx-event-manager.h | 4 +- 3 files changed, 45 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index f89000dc33a3..5dcb7665fe22 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -41,6 +41,8 @@ static int event_manager_availability = -EACCES; static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); static int sgi_num = XLNX_EVENT_SGI_NUM; +static bool is_need_to_unregister; + /** * struct agent_cb - Registered callback function and private data. * @agent_data: Data passed back to handler function. @@ -189,6 +191,8 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) struct agent_cb *cb_pos; struct agent_cb *cb_next; + is_need_to_unregister = false; + /* Check for existing entry in hash table for given cb_type */ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { @@ -203,6 +207,7 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) /* remove an object from a hashtable */ hash_del(&eve_data->hentry); kfree(eve_data); + is_need_to_unregister = true; } } if (!is_callback_found) { @@ -214,7 +219,7 @@ static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) } static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event, - event_cb_func_t cb_fun) + event_cb_func_t cb_fun, void *data) { bool is_callback_found = false; struct registered_event_data *eve_data; @@ -222,20 +227,28 @@ static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event, struct agent_cb *cb_pos; struct agent_cb *cb_next; + is_need_to_unregister = false; + /* Check for existing entry in hash table for given key id */ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { if (eve_data->key == key) { /* Delete the list of callback */ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { - if (cb_pos->eve_cb == cb_fun) { + if (cb_pos->eve_cb == cb_fun && + cb_pos->agent_data == data) { is_callback_found = true; list_del_init(&cb_pos->list); kfree(cb_pos); } } - /* remove an object from a HASH table */ - hash_del(&eve_data->hentry); - kfree(eve_data); + + /* Remove HASH table if callback list is empty */ + if (list_empty(&eve_data->cb_list_head)) { + /* remove an object from a HASH table */ + hash_del(&eve_data->hentry); + kfree(eve_data); + is_need_to_unregister = true; + } } } if (!is_callback_found) { @@ -307,7 +320,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons eve = event & (1 << pos); if (!eve) continue; - xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); } } } @@ -329,10 +342,10 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, cons eve = event & (1 << pos); if (!eve) continue; - xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); } } else { - xlnx_remove_cb_for_notify_event(node_id, event, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); } return ret; } @@ -350,15 +363,18 @@ EXPORT_SYMBOL_GPL(xlnx_register_event); * @node_id: Node-Id related to event. * @event: Event Mask for the Error Event. * @cb_fun: Function pointer of callback function. + * @data: Pointer of agent's private data. * * Return: Returns 0 on successful unregistration else error code. */ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, - event_cb_func_t cb_fun) + event_cb_func_t cb_fun, void *data) { - int ret; + int ret = 0; u32 eve, pos; + is_need_to_unregister = false; + if (event_manager_availability) return event_manager_availability; @@ -375,23 +391,26 @@ int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, co } else { /* Remove Node-Id/Event from hash table */ if (!xlnx_is_error_event(node_id)) { - xlnx_remove_cb_for_notify_event(node_id, event, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); } else { for (pos = 0; pos < MAX_BITS; pos++) { eve = event & (1 << pos); if (!eve) continue; - xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun); + xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); } } - /* Un-register for Node-Id/Event combination */ - ret = zynqmp_pm_register_notifier(node_id, event, false, false); - if (ret) { - pr_err("%s() failed for 0x%x and 0x%x: %d\n", - __func__, node_id, event, ret); - return ret; + /* Un-register if list is empty */ + if (is_need_to_unregister) { + /* Un-register for Node-Id/Event combination */ + ret = zynqmp_pm_register_notifier(node_id, event, false, false); + if (ret) { + pr_err("%s() failed for 0x%x and 0x%x: %d\n", + __func__, node_id, event, ret); + return ret; + } } } @@ -447,7 +466,8 @@ static void xlnx_call_notify_cb_handler(const u32 *payload) list) { /* Remove already registered event from hash table */ xlnx_remove_cb_for_notify_event(payload[1], payload[2], - cb_pos->eve_cb); + cb_pos->eve_cb, + cb_pos->agent_data); } } } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 859dd31b6eff..78a8a7545d1e 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -208,7 +208,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev) GFP_KERNEL); if (!zynqmp_pm_init_suspend_work) { xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, - suspend_event_callback); + suspend_event_callback, NULL); return -ENOMEM; } event_registered = true; @@ -263,7 +263,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev) ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); if (ret) { if (event_registered) { - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback); + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, + NULL); event_registered = false; } dev_err(&pdev->dev, "unable to create sysfs interface\n"); @@ -277,7 +278,7 @@ static int zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); if (event_registered) - xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback); + xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL); if (!rx_chan) mbox_free_channel(rx_chan); diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h index 3f87c4929d21..82e8254b0f80 100644 --- a/include/linux/firmware/xlnx-event-manager.h +++ b/include/linux/firmware/xlnx-event-manager.h @@ -17,7 +17,7 @@ int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, event_cb_func_t cb_fun, void *data); int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, - const u32 event, event_cb_func_t cb_fun); + const u32 event, event_cb_func_t cb_fun, void *data); #else static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, const bool wake, @@ -27,7 +27,7 @@ static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 } static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, - const u32 event, event_cb_func_t cb_fun) + const u32 event, event_cb_func_t cb_fun, void *data) { return -ENODEV; } -- cgit v1.2.3 From 1359fcbe0f4aa2cd6ea684727a5a111eebeeed3a Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Wed, 23 Feb 2022 11:47:05 +0200 Subject: habanalabs: add DRAM default page size to HW info When using the device memory allocation API the user ought to know what is the default allocation page size. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 1 + include/uapi/misc/habanalabs.h | 3 +++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index c13a3c2a7013..14c58579b9cd 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -76,6 +76,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) if (hw_ip.dram_size > PAGE_SIZE) hw_ip.dram_enabled = 1; hw_ip.dram_page_size = prop->dram_page_size; + hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size; hw_ip.num_of_events = prop->num_of_events; memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 1d6b4f0c4159..ae2441521467 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -409,6 +409,7 @@ enum hl_server_type { * @dram_page_size: The DRAM physical page size. * @number_of_user_interrupts: The number of interrupts that are available to the userspace * application to use. Relevant for Gaudi2 and later. + * @device_mem_alloc_default_page_size: default page size used in device memory allocation. */ struct hl_info_hw_ip_info { __u64 sram_base_address; @@ -436,6 +437,8 @@ struct hl_info_hw_ip_info { __u32 reserved3; __u16 number_of_user_interrupts; __u16 pad2; + __u64 reserved4; + __u64 device_mem_alloc_default_page_size; }; struct hl_info_dram_usage { -- cgit v1.2.3 From 050a6f349a09d3cefb14f4114bfa047b2c5b2a65 Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Wed, 23 Feb 2022 13:37:08 +0200 Subject: habanalabs: add user API to get valid DRAM page sizes Future devices will support multiple device memory page sizes. In addition, an API for the user was added for it to be able to control the device memory allocation page size. This patch is a complementary patch to inform the user of the available page size supported by the device. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/common/habanalabs.h | 2 + drivers/misc/habanalabs/common/habanalabs_ioctl.c | 24 +++++++++ drivers/misc/habanalabs/gaudi/gaudi.c | 7 +++ drivers/misc/habanalabs/goya/goya.c | 7 +++ include/uapi/misc/habanalabs.h | 63 +++++++++++++---------- 5 files changed, 77 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 6eb35e4124c2..564797766f42 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1304,6 +1304,7 @@ struct fw_load_mgr { * @get_stream_master_qid_arr: get pointer to stream masters QID array * @is_valid_dram_page_size: return true if page size is supported in device * memory allocation, otherwise false. + * @get_valid_dram_page_orders: get valid device memory allocation page orders */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -1432,6 +1433,7 @@ struct hl_asic_funcs { bool (*is_valid_dram_page_size)(u32 page_size); int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop, u32 page_size, u32 *real_page_size, bool is_dram_addr); + void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info); }; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 14c58579b9cd..c6fe35ae1238 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -591,6 +591,27 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; } +static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct hl_info_dev_memalloc_page_sizes info = {0}; + struct hl_device *hdev = hpriv->hdev; + u32 max_size = args->return_size; + + if ((!max_size) || (!out)) + return -EINVAL; + + /* + * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2" + * pages (unlike some of the ASICs before supporting multiple page sizes). + * For this reason for all ASICs that not support multiple page size the function will + * return an empty bitmask indicating that multiple page sizes is not supported. + */ + hdev->asic_funcs->get_valid_dram_page_orders(&info); + + return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -641,6 +662,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_RAZWI_EVENT: return razwi_info(hpriv, args); + case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: + return dev_mem_alloc_page_sizes_info(hpriv, args); + default: break; } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 5979434d1905..64cb195bc26e 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -9378,6 +9378,12 @@ static u32 *gaudi_get_stream_master_qid_arr(void) return gaudi_stream_master; } +static void gaudi_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info) +{ + /* set 0 since multiple pages are not supported */ + info->page_order_bitmask = 0; +} + static ssize_t infineon_ver_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hl_device *hdev = dev_get_drvdata(dev); @@ -9489,6 +9495,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr, .is_valid_dram_page_size = NULL, .mmu_get_real_page_size = hl_mmu_get_real_page_size, + .get_valid_dram_page_orders = gaudi_get_valid_dram_page_orders, }; /** diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ec347bd3bb69..b18288070754 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5679,6 +5679,12 @@ static u32 *goya_get_stream_master_qid_arr(void) return NULL; } +static void goya_get_valid_dram_page_orders(struct hl_info_dev_memalloc_page_sizes *info) +{ + /* set 0 since multiple pages are not supported */ + info->page_order_bitmask = 0; +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5767,6 +5773,7 @@ static const struct hl_asic_funcs goya_funcs = { .get_stream_master_qid_arr = goya_get_stream_master_qid_arr, .is_valid_dram_page_size = NULL, .mmu_get_real_page_size = hl_mmu_get_real_page_size, + .get_valid_dram_page_orders = goya_get_valid_dram_page_orders, }; /* diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index ae2441521467..f474e7fb018d 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -348,33 +348,35 @@ enum hl_server_type { * The address which accessing it caused the razwi. * Razwi initiator. * Razwi cause, was it a page fault or MMU access error. + * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation */ -#define HL_INFO_HW_IP_INFO 0 -#define HL_INFO_HW_EVENTS 1 -#define HL_INFO_DRAM_USAGE 2 -#define HL_INFO_HW_IDLE 3 -#define HL_INFO_DEVICE_STATUS 4 -#define HL_INFO_DEVICE_UTILIZATION 6 -#define HL_INFO_HW_EVENTS_AGGREGATE 7 -#define HL_INFO_CLK_RATE 8 -#define HL_INFO_RESET_COUNT 9 -#define HL_INFO_TIME_SYNC 10 -#define HL_INFO_CS_COUNTERS 11 -#define HL_INFO_PCI_COUNTERS 12 -#define HL_INFO_CLK_THROTTLE_REASON 13 -#define HL_INFO_SYNC_MANAGER 14 -#define HL_INFO_TOTAL_ENERGY 15 -#define HL_INFO_PLL_FREQUENCY 16 -#define HL_INFO_POWER 17 -#define HL_INFO_OPEN_STATS 18 -#define HL_INFO_DRAM_REPLACED_ROWS 21 -#define HL_INFO_DRAM_PENDING_ROWS 22 -#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23 -#define HL_INFO_CS_TIMEOUT_EVENT 24 -#define HL_INFO_RAZWI_EVENT 25 - -#define HL_INFO_VERSION_MAX_LEN 128 -#define HL_INFO_CARD_NAME_MAX_LEN 16 +#define HL_INFO_HW_IP_INFO 0 +#define HL_INFO_HW_EVENTS 1 +#define HL_INFO_DRAM_USAGE 2 +#define HL_INFO_HW_IDLE 3 +#define HL_INFO_DEVICE_STATUS 4 +#define HL_INFO_DEVICE_UTILIZATION 6 +#define HL_INFO_HW_EVENTS_AGGREGATE 7 +#define HL_INFO_CLK_RATE 8 +#define HL_INFO_RESET_COUNT 9 +#define HL_INFO_TIME_SYNC 10 +#define HL_INFO_CS_COUNTERS 11 +#define HL_INFO_PCI_COUNTERS 12 +#define HL_INFO_CLK_THROTTLE_REASON 13 +#define HL_INFO_SYNC_MANAGER 14 +#define HL_INFO_TOTAL_ENERGY 15 +#define HL_INFO_PLL_FREQUENCY 16 +#define HL_INFO_POWER 17 +#define HL_INFO_OPEN_STATS 18 +#define HL_INFO_DRAM_REPLACED_ROWS 21 +#define HL_INFO_DRAM_PENDING_ROWS 22 +#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23 +#define HL_INFO_CS_TIMEOUT_EVENT 24 +#define HL_INFO_RAZWI_EVENT 25 +#define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES 26 + +#define HL_INFO_VERSION_MAX_LEN 128 +#define HL_INFO_CARD_NAME_MAX_LEN 16 /** * struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC @@ -643,6 +645,15 @@ struct hl_info_razwi_event { __u8 pad[2]; }; +/** + * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information. + * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size + * (e.g. 0x2100000 means that 1MB and 32MB pages are supported). + */ +struct hl_info_dev_memalloc_page_sizes { + __u64 page_order_bitmask; +}; + enum gaudi_dcores { HL_GAUDI_WS_DCORE, HL_GAUDI_WN_DCORE, -- cgit v1.2.3 From fdec56c1a416c6947b1db22617da15cb89f46c6c Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 3 Mar 2022 09:43:10 +0200 Subject: habanalabs: expose compute ctx status through info ioctl In order for the user to know if he can try and open device, we expose the compute ctx state. The user can now know if the context is used by another process or whether the device is still ongoing through cleanup or reset and will be available soon. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 2 ++ include/uapi/misc/habanalabs.h | 5 +++++ 2 files changed, 7 insertions(+) (limited to 'include') diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index c6fe35ae1238..bfb5cfe68110 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -498,6 +498,8 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) open_stats_info.last_open_period_ms = jiffies64_to_msecs( hdev->last_open_session_duration_jif); open_stats_info.open_counter = hdev->open_counter; + open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active; + open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release; return copy_to_user(out, &open_stats_info, min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index f474e7fb018d..ca2af5f98056 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -543,10 +543,15 @@ struct hl_pll_frequency_info { * struct hl_open_stats_info - device open statistics information * @open_counter: ever growing counter, increased on each successful dev open * @last_open_period_ms: duration (ms) device was open last time + * @is_compute_ctx_active: Whether there is an active compute context executing + * @compute_ctx_in_release: true if the current compute context is being released */ struct hl_open_stats_info { __u64 open_counter; __u64 last_open_period_ms; + __u8 is_compute_ctx_active; + __u8 compute_ctx_in_release; + __u8 pad[6]; }; /** -- cgit v1.2.3 From 5d1a0de2c778f369970dd50f6713e95068926a8b Mon Sep 17 00:00:00 2001 From: Ohad Sharabi Date: Sun, 10 Apr 2022 11:19:42 +0300 Subject: habanalabs: add prefetch flag to the MAP operation This patch let the user decide whether the translations done in the page tables will be fetched directly to the STLB right after the map. We want to let the user control whether to perform prefetch upon map operation. To do so a memory flag was added, to be used in the MAP ioctl, called HL_MEM_PREFETCH and if set- the mappings will be fetched directly to the STLB after map operation. Signed-off-by: Ohad Sharabi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/common/memory.c | 11 ++++++----- include/uapi/misc/habanalabs.h | 1 + 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 326c2179628f..6face45c57e3 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1250,11 +1250,12 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, if (rc) goto map_err; - /* already prefetch the relevant translations to the cache */ - rc = hl_mmu_prefetch_cache_range(hdev, *vm_type, ctx->asid, ret_vaddr, - phys_pg_pack->total_size); - if (rc) - goto map_err; + if (args->flags & HL_MEM_PREFETCH) { + rc = hl_mmu_prefetch_cache_range(hdev, *vm_type, ctx->asid, ret_vaddr, + phys_pg_pack->total_size); + if (rc) + goto map_err; + } mutex_unlock(&ctx->mmu_lock); diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index ca2af5f98056..3576bf2b4841 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -1134,6 +1134,7 @@ union hl_wait_cs_args { #define HL_MEM_SHARED 0x2 #define HL_MEM_USERPTR 0x4 #define HL_MEM_FORCE_HINT 0x8 +#define HL_MEM_PREFETCH 0x40 /** * structure hl_mem_in - structure that handle input args for memory IOCTL -- cgit v1.2.3 From 422ef171038d4855ffe938137039a8f3b3e84293 Mon Sep 17 00:00:00 2001 From: Tal Cohen Date: Thu, 28 Apr 2022 13:45:18 +0300 Subject: habanalabs: add support for notification via eventfd The driver will be able to send notification events towards a user process, using user's registered event file descriptor. The driver uses the notification mechanism to inform the user about an occurred event. A user thread can wait until a notification is received from the driver. The driver stores the occurred event until the user reads it, using HL_INFO_GET_EVENTS - new ioctl opcode in the INFO ioctl. Gaudi specific implementation includes sending a notification on a TPC assertion event that is received from f/w. Signed-off-by: Tal Cohen Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/common/device.c | 52 ++++++++++++++++++ drivers/misc/habanalabs/common/habanalabs.h | 40 +++++++++----- drivers/misc/habanalabs/common/habanalabs_drv.c | 9 ++++ drivers/misc/habanalabs/common/habanalabs_ioctl.c | 65 +++++++++++++++++++++++ drivers/misc/habanalabs/gaudi/gaudi.c | 14 ++++- include/uapi/misc/habanalabs.h | 15 ++++++ 6 files changed, 182 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 15df89b31e1b..315510aaca35 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -285,6 +285,14 @@ static void hpriv_release(struct kref *ref) hdev->compute_ctx_in_release = 0; + /* release the eventfd */ + if (hpriv->notifier_event.eventfd) { + eventfd_ctx_put(hpriv->notifier_event.eventfd); + hpriv->notifier_event.eventfd = 0; + } + + mutex_destroy(&hpriv->notifier_event.lock); + kfree(hpriv); } @@ -355,6 +363,13 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_ctrl_list_lock); out: + /* release the eventfd */ + if (hpriv->notifier_event.eventfd) { + eventfd_ctx_put(hpriv->notifier_event.eventfd); + hpriv->notifier_event.eventfd = 0; + } + + mutex_destroy(&hpriv->notifier_event.lock); put_pid(hpriv->taskpid); kfree(hpriv); @@ -1506,6 +1521,43 @@ out_err: return rc; } +static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event) +{ + mutex_lock(¬ifier_event->lock); + notifier_event->events_mask |= event; + if (notifier_event->eventfd) + eventfd_signal(notifier_event->eventfd, 1); + + mutex_unlock(¬ifier_event->lock); +} + +/* + * hl_notifier_event_send_all - notify all user processes via eventfd + * + * @hdev: pointer to habanalabs device structure + * @event: the occurred event + * Returns 0 for success or an error on failure. + */ +void hl_notifier_event_send_all(struct hl_device *hdev, u64 event) +{ + struct hl_fpriv *hpriv; + + mutex_lock(&hdev->fpriv_list_lock); + + list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) + hl_notifier_event_send(&hpriv->notifier_event, event); + + mutex_unlock(&hdev->fpriv_list_lock); + + /* control device */ + mutex_lock(&hdev->fpriv_ctrl_list_lock); + + list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node) + hl_notifier_event_send(&hpriv->notifier_event, event); + + mutex_unlock(&hdev->fpriv_ctrl_list_lock); +} + /* * hl_device_init - main initialization function for habanalabs device * diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 918e8a04acab..8977ec67dba7 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1932,6 +1933,18 @@ struct hl_debug_params { bool enable; }; +/** + * struct hl_notifier_event - holds the notifier data structure + * @eventfd: the event file descriptor to raise the notifications + * @lock: mutex lock to protect the notifier data flows + * @events_mask: indicates the bitmap events + */ +struct hl_notifier_event { + struct eventfd_ctx *eventfd; + struct mutex lock; + u64 events_mask; +}; + /* * FILE PRIVATE STRUCTURE */ @@ -1943,24 +1956,25 @@ struct hl_debug_params { * @taskpid: current process ID. * @ctx: current executing context. TODO: remove for multiple ctx per process * @ctx_mgr: context manager to handle multiple context for this FD. - * @cb_mgr: command buffer manager to handle multiple buffers for this FD. * @mem_mgr: manager descriptor for memory exportable via mmap + * @notifier_event: notifier eventfd towards user process * @debugfs_list: list of relevant ASIC debugfs. * @dev_node: node in the device list of file private data * @refcount: number of related contexts. * @restore_phase_mutex: lock for context switch and restore phase. */ struct hl_fpriv { - struct hl_device *hdev; - struct file *filp; - struct pid *taskpid; - struct hl_ctx *ctx; - struct hl_ctx_mgr ctx_mgr; - struct hl_mem_mgr mem_mgr; - struct list_head debugfs_list; - struct list_head dev_node; - struct kref refcount; - struct mutex restore_phase_mutex; + struct hl_device *hdev; + struct file *filp; + struct pid *taskpid; + struct hl_ctx *ctx; + struct hl_ctx_mgr ctx_mgr; + struct hl_mem_mgr mem_mgr; + struct hl_notifier_event notifier_event; + struct list_head debugfs_list; + struct list_head dev_node; + struct kref refcount; + struct mutex restore_phase_mutex; }; @@ -2676,8 +2690,8 @@ struct hl_reset_info { * @state_dump_specs: constants and dictionaries needed to dump system state. * @multi_cs_completion: array of multi-CS completion. * @clk_throttling: holds information about current/previous clock throttling events - * @reset_info: holds current device reset information. * @last_error: holds information about last session in which CS timeout or razwi error occurred. + * @reset_info: holds current device reset information. * @stream_master_qid_arr: pointer to array with QIDs of master streams. * @fw_major_version: major version of current loaded preboot * @dram_used_mem: current DRAM memory consumption. @@ -3071,6 +3085,8 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization); int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sensors_arr); +void hl_notifier_event_send_all(struct hl_device *hdev, u64 event); + int hl_sysfs_init(struct hl_device *hdev); void hl_sysfs_fini(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index 1210de39d661..c97173e9507d 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -134,6 +134,10 @@ int hl_device_open(struct inode *inode, struct file *filp) hpriv->hdev = hdev; filp->private_data = hpriv; hpriv->filp = filp; + hpriv->notifier_event.events_mask = 0; + hpriv->notifier_event.eventfd = 0; + + mutex_init(&hpriv->notifier_event.lock); mutex_init(&hpriv->restore_phase_mutex); kref_init(&hpriv->refcount); nonseekable_open(inode, filp); @@ -208,6 +212,7 @@ out_err: hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); filp->private_data = NULL; mutex_destroy(&hpriv->restore_phase_mutex); + mutex_destroy(&hpriv->notifier_event.lock); put_pid(hpriv->taskpid); kfree(hpriv); @@ -241,6 +246,10 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) hpriv->hdev = hdev; filp->private_data = hpriv; hpriv->filp = filp; + hpriv->notifier_event.events_mask = 0; + hpriv->notifier_event.eventfd = 0; + + mutex_init(&hpriv->notifier_event.lock); nonseekable_open(inode, filp); hpriv->taskpid = get_task_pid(current, PIDTYPE_PID); diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index bfb5cfe68110..d1ef56a8d3ac 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -116,6 +116,25 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate, return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; } +static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + int rc; + u32 max_size = args->return_size; + u64 events_mask; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((max_size < sizeof(u64)) || (!out)) + return -EINVAL; + + mutex_lock(&hpriv->notifier_event.lock); + events_mask = hpriv->notifier_event.events_mask; + hpriv->notifier_event.events_mask = 0; + mutex_unlock(&hpriv->notifier_event.lock); + + rc = copy_to_user(out, &events_mask, sizeof(u64)); + return rc; +} + static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { struct hl_device *hdev = hpriv->hdev; @@ -614,6 +633,43 @@ static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; } +static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + int rc; + + /* check if there is already a registered on that process */ + mutex_lock(&hpriv->notifier_event.lock); + if (hpriv->notifier_event.eventfd) { + mutex_unlock(&hpriv->notifier_event.lock); + return -EINVAL; + } + + hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd); + if (IS_ERR(hpriv->notifier_event.eventfd)) { + rc = PTR_ERR(hpriv->notifier_event.eventfd); + hpriv->notifier_event.eventfd = 0; + mutex_unlock(&hpriv->notifier_event.lock); + return rc; + } + + mutex_unlock(&hpriv->notifier_event.lock); + return 0; +} + +static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + mutex_lock(&hpriv->notifier_event.lock); + if (!hpriv->notifier_event.eventfd) { + mutex_unlock(&hpriv->notifier_event.lock); + return -EINVAL; + } + + eventfd_ctx_put(hpriv->notifier_event.eventfd); + hpriv->notifier_event.eventfd = 0; + mutex_unlock(&hpriv->notifier_event.lock); + return 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -667,6 +723,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: return dev_mem_alloc_page_sizes_info(hpriv, args); + case HL_INFO_GET_EVENTS: + return events_info(hpriv, args); + default: break; } @@ -717,6 +776,12 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_DRAM_PENDING_ROWS: return dram_pending_rows_info(hpriv, args); + case HL_INFO_REGISTER_EVENTFD: + return eventfd_register(hpriv, args); + + case HL_INFO_UNREGISTER_EVENTFD: + return eventfd_unregister(hpriv, args); + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -EINVAL; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 08cd60300b4f..1c388537de33 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7879,7 +7879,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_MMU_PAGE_FAULT: case GAUDI_EVENT_MMU_WR_PERM: case GAUDI_EVENT_RAZWI_OR_ADC: - case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM: case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM: fallthrough; @@ -7899,6 +7898,19 @@ static void gaudi_handle_eqe(struct hl_device *hdev, hl_fw_unmask_irq(hdev, event_type); break; + case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM: + gaudi_print_irq_info(hdev, event_type, true); + gaudi_handle_qman_err(hdev, event_type); + hl_fw_unmask_irq(hdev, event_type); + + /* In TPC QM event, notify on TPC assertion. While there isn't + * a specific event for assertion yet, the FW generates QM event. + * The SW upper layer will inspect an internal mapped area to indicate + * if the event is a tpc assertion or tpc QM. + */ + hl_notifier_event_send_all(hdev, HL_NOTIFIER_EVENT_TPC_ASSERT); + break; + case GAUDI_EVENT_RAZWI_OR_ADC_SW: gaudi_print_irq_info(hdev, event_type, true); goto reset_device; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 3576bf2b4841..52540d5b4fc9 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -349,6 +349,9 @@ enum hl_server_type { * Razwi initiator. * Razwi cause, was it a page fault or MMU access error. * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation + * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications. + * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd + * HL_INFO_GET_EVENTS - Retrieve the last occurred events */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -374,6 +377,9 @@ enum hl_server_type { #define HL_INFO_CS_TIMEOUT_EVENT 24 #define HL_INFO_RAZWI_EVENT 25 #define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES 26 +#define HL_INFO_REGISTER_EVENTFD 28 +#define HL_INFO_UNREGISTER_EVENTFD 29 +#define HL_INFO_GET_EVENTS 30 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -679,6 +685,7 @@ enum gaudi_dcores { * @period_ms: Period value, in milliseconds, for utilization rate in range 100ms - 1000ms in 100 ms * resolution. Currently not in use. * @pll_index: Index as defined in hl__pll_index enumeration. + * @eventfd: event file descriptor for event notifications. * @pad: Padding to 64 bit. */ struct hl_info_args { @@ -691,6 +698,7 @@ struct hl_info_args { __u32 ctx_id; __u32 period_ms; __u32 pll_index; + __u32 eventfd; }; __u32 pad; @@ -1390,6 +1398,13 @@ struct hl_debug_args { __u32 ctx_id; }; +/* + * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command + * + * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event + */ +#define HL_NOTIFIER_EVENT_TPC_ASSERT (1 << 0) + /* * Various information operations such as: * - H/W IP information -- cgit v1.2.3