diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-10-02 14:44:15 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2025-10-02 14:44:15 +0300 |
commit | 2844b98acb6112cf1eb7a16bbdc7fe9b319c17d4 (patch) | |
tree | dca5ec4aeb4edeb0cbe0e04a4b24f7cc91f7a2e1 /drivers | |
parent | 56fb05093756ed55ba1cdf5d432a68004da67860 (diff) | |
parent | 72b82d56b82137a92a20e5584029d35408d810c2 (diff) | |
download | linux-rolling-lts.tar.xz |
Merge v6.12.50linux-rolling-lts
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
42 files changed, 313 insertions, 160 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bd55c2356303..9600a96f9117 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2973,6 +2973,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) goto err_null_driver; } + /* + * Mark support for the scheduler's frequency invariance engine for + * drivers that implement target(), target_index() or fast_switch(). + */ + if (!cpufreq_driver->setpolicy) { + static_branch_enable_cpuslocked(&cpufreq_freq_invariance); + pr_debug("cpufreq: supports frequency invariance\n"); + } + ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_boost_unreg; @@ -2994,21 +3003,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) hp_online = ret; ret = 0; - /* - * Mark support for the scheduler's frequency invariance engine for - * drivers that implement target(), target_index() or fast_switch(). - */ - if (!cpufreq_driver->setpolicy) { - static_branch_enable_cpuslocked(&cpufreq_freq_invariance); - pr_debug("supports frequency invariance"); - } - pr_debug("driver %s up and running\n", driver_data->name); goto out; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_boost_unreg: + if (!cpufreq_driver->setpolicy) + static_branch_disable_cpuslocked(&cpufreq_freq_invariance); remove_boost_sysfs_file(); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index b360dca2c69e..cc9731c3616c 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -41,7 +41,7 @@ /* * ABI version history is documented in linux/firewire-cdev.h. */ -#define FW_CDEV_KERNEL_VERSION 5 +#define FW_CDEV_KERNEL_VERSION 6 #define FW_CDEV_VERSION_EVENT_REQUEST2 4 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 209871c219d6..e5d0d2b0d798 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -4317,6 +4317,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode, return desc; } +static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode, + struct device *consumer, + const char *con_id, + unsigned int idx, + enum gpiod_flags *flags, + unsigned long *lookupflags) +{ + struct gpio_desc *desc; + + desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags); + if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode)) + desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id, + idx, flags, lookupflags); + + return desc; +} + struct gpio_desc *gpiod_find_and_request(struct device *consumer, struct fwnode_handle *fwnode, const char *con_id, @@ -4335,8 +4352,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer, int ret = 0; scoped_guard(srcu, &gpio_devices_srcu) { - desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, - &flags, &lookupflags); + desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx, + &flags, &lookupflags); if (gpiod_not_found(desc) && platform_lookup_allowed) { /* * Either we are not using DT or ACPI, or their lookup diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 5dadc895e7f2..5e87ca2320a4 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -79,7 +79,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si * 3. The Delays are often longer a lot when system resume from S3/S4. */ if (j) - mdelay(j + 1); + msleep(j + 1); /* Wait for EDID offset to show up in mirror register */ vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7); diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index ed8626c73541..f0ae675581d9 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -726,8 +726,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev) if (hdmi_dev) { pdev = hdmi_dev->dev; - pci_set_drvdata(pdev, NULL); oaktrail_hdmi_i2c_exit(pdev); + pci_set_drvdata(pdev, NULL); iounmap(hdmi_dev->regs); kfree(hdmi_dev); pci_dev_put(pdev); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 9e05745d797d..4d93781f2dd7 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -40,8 +40,9 @@ static u32 scale(u32 source_val, { u64 target_val; - WARN_ON(source_min > source_max); - WARN_ON(target_min > target_max); + if (WARN_ON(source_min >= source_max) || + WARN_ON(target_min > target_max)) + return target_min; /* defensive */ source_val = clamp(source_val, source_min, source_max); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 20135a9bc026..0bc5b69ec636 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -865,8 +865,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * if (IS_ERR_OR_NULL(queue)) return; - if (queue->entity.fence_context) - drm_sched_entity_destroy(&queue->entity); + drm_sched_entity_destroy(&queue->entity); if (queue->scheduler.ops) drm_sched_fini(&queue->scheduler); @@ -3458,11 +3457,6 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) if (!group) return -EINVAL; - for (u32 i = 0; i < group->queue_count; i++) { - if (group->queues[i]) - drm_sched_entity_destroy(&group->queues[i]->entity); - } - mutex_lock(&sched->reset.lock); mutex_lock(&sched->lock); group->destroyed = true; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c index 3438d392920f..8dae9a776685 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c @@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type) struct amdtp_hid_data *hid_data = hid->driver_data; struct amdtp_cl_data *cli_data = hid_data->cli_data; struct request_list *req_list = &cli_data->req_list; + struct amd_input_data *in_data = cli_data->in_data; + struct amd_mp2_dev *mp2; int i; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); for (i = 0; i < cli_data->num_hid_devices; i++) { if (cli_data->hid_sensor_hubs[i] == hid) { struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL); @@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work) u8 report_id, node_type; u8 report_size = 0; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); req_node = list_last_entry(&req_list->list, struct request_list, list); list_del(&req_node->list); current_index = req_node->current_index; @@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work) node_type = req_node->report_type; kfree(req_node); - mp2 = container_of(in_data, struct amd_mp2_dev, in_data); mp2_ops = mp2->mp2_ops; if (node_type == HID_FEATURE_REPORT) { report_size = mp2_ops->get_feat_rep(sensor_index, report_id, @@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work) cli_data->cur_hid_dev = current_index; cli_data->sensor_requested_cnt[current_index] = 0; amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]); + if (!list_empty(&req_list->list)) + schedule_delayed_work(&cli_data->work, 0); } void amd_sfh_work_buffer(struct work_struct *work) @@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work) u8 report_size; int i; + mp2 = container_of(in_data, struct amd_mp2_dev, in_data); + guard(mutex)(&mp2->lock); for (i = 0; i < cli_data->num_hid_devices; i++) { if (cli_data->sensor_sts[i] == SENSOR_ENABLED) { - mp2 = container_of(in_data, struct amd_mp2_dev, in_data); report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i], cli_data->report_id[i], in_data); hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT, diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h index e5620d7db569..00308d8998d4 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h +++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h @@ -10,6 +10,7 @@ #ifndef AMD_SFH_COMMON_H #define AMD_SFH_COMMON_H +#include <linux/mutex.h> #include <linux/pci.h> #include "amd_sfh_hid.h" @@ -57,6 +58,8 @@ struct amd_mp2_dev { u32 mp2_acs; struct sfh_dev_status dev_en; struct work_struct work; + /* mp2 to protect data */ + struct mutex lock; u8 init_done; u8 rver; }; diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c index 0c28ca349bcd..9739f66e925c 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c @@ -405,6 +405,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i if (!privdata->cl_data) return -ENOMEM; + rc = devm_mutex_init(&pdev->dev, &privdata->lock); + if (rc) + return rc; + privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data; if (privdata->sfh1_1_ops) { if (boot_cpu_data.x86 >= 0x1A) diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 6b90d2c03e88..dd989b519f86 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -971,7 +971,10 @@ static int asus_input_mapping(struct hid_device *hdev, case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break; case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break; case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break; + case 0x4e: asus_map_key_clear(KEY_FN_ESC); break; + case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break; + case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */ case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */ case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */ case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */ diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index a3e86930bf41..ef9bed2f2dcc 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -101,7 +101,7 @@ static int bt1_i2c_request_regs(struct dw_i2c_dev *dev) } #endif -static int txgbe_i2c_request_regs(struct dw_i2c_dev *dev) +static int dw_i2c_get_parent_regmap(struct dw_i2c_dev *dev) { dev->map = dev_get_regmap(dev->dev->parent, NULL); if (!dev->map) @@ -123,12 +123,15 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev) struct platform_device *pdev = to_platform_device(dev->dev); int ret; + if (device_is_compatible(dev->dev, "intel,xe-i2c")) + return dw_i2c_get_parent_regmap(dev); + switch (dev->flags & MODEL_MASK) { case MODEL_BAIKAL_BT1: ret = bt1_i2c_request_regs(dev); break; case MODEL_WANGXUN_SP: - ret = txgbe_i2c_request_regs(dev); + ret = dw_i2c_get_parent_regmap(dev); break; default: dev->base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index f49f78b69ab9..e74273bc078b 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -191,6 +191,7 @@ static u16 get_legacy_obj_type(u16 opcode) { switch (opcode) { case MLX5_CMD_OP_CREATE_RQ: + case MLX5_CMD_OP_CREATE_RMP: return MLX5_EVENT_QUEUE_TYPE_RQ; case MLX5_CMD_OP_CREATE_QP: return MLX5_EVENT_QUEUE_TYPE_QP; diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c index 1b0812f8bf84..af39b2379d53 100644 --- a/drivers/iommu/iommufd/fault.c +++ b/drivers/iommu/iommufd/fault.c @@ -415,7 +415,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) fdno = get_unused_fd_flags(O_CLOEXEC); if (fdno < 0) { rc = fdno; - goto out_fput; + goto out_abort; } cmd->out_fault_id = fault->obj.id; @@ -431,8 +431,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) return 0; out_put_fdno: put_unused_fd(fdno); -out_fput: - fput(filep); out_abort: iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj); diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 649fe79d0f0c..6add737a6708 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -23,6 +23,7 @@ #include "iommufd_test.h" struct iommufd_object_ops { + size_t file_offset; void (*destroy)(struct iommufd_object *obj); void (*abort)(struct iommufd_object *obj); }; @@ -97,10 +98,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj) void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx, struct iommufd_object *obj) { - if (iommufd_object_ops[obj->type].abort) - iommufd_object_ops[obj->type].abort(obj); + const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type]; + + if (ops->file_offset) { + struct file **filep = ((void *)obj) + ops->file_offset; + + /* + * A file should hold a users refcount while the file is open + * and put it back in its release. The file should hold a + * pointer to obj in their private data. Normal fput() is + * deferred to a workqueue and can get out of order with the + * following kfree(obj). Using the sync version ensures the + * release happens immediately. During abort we require the file + * refcount is one at this point - meaning the object alloc + * function cannot do anything to allow another thread to take a + * refcount prior to a guaranteed success. + */ + if (*filep) + __fput_sync(*filep); + } + + if (ops->abort) + ops->abort(obj); else - iommufd_object_ops[obj->type].destroy(obj); + ops->destroy(obj); iommufd_object_abort(ictx, obj); } @@ -498,6 +519,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx) } EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD); +#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \ + .file_offset = (offsetof(_struct, _filep) + \ + BUILD_BUG_ON_ZERO(!__same_type( \ + struct file *, ((_struct *)NULL)->_filep)) + \ + BUILD_BUG_ON_ZERO(offsetof(_struct, _obj))) + static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_ACCESS] = { .destroy = iommufd_access_destroy_object, @@ -518,6 +545,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { }, [IOMMUFD_OBJ_FAULT] = { .destroy = iommufd_fault_destroy, + IOMMUFD_FILE_OFFSET(struct iommufd_fault, filep, obj), }, #ifdef CONFIG_IOMMUFD_TEST [IOMMUFD_OBJ_SELFTEST] = { diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index be1505e8c536..7759531ccca7 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -433,6 +433,13 @@ static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = { }, }; +static const struct sdhci_cdns_drv_data sdhci_eyeq_drv_data = { + .pltfm_data = { + .ops = &sdhci_cdns_ops, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }, +}; + static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = { .pltfm_data = { .ops = &sdhci_cdns_ops, @@ -595,6 +602,10 @@ static const struct of_device_id sdhci_cdns_match[] = { .compatible = "amd,pensando-elba-sd4hc", .data = &sdhci_elba_drv_data, }, + { + .compatible = "mobileye,eyeq-sd4hc", + .data = &sdhci_eyeq_drv_data, + }, { .compatible = "cdns,sd4hc" }, { /* sentinel */ } }; diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 2b7dd359f27b..8569178b66df 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -861,7 +861,6 @@ static int __maybe_unused rcar_can_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct rcar_can_priv *priv = netdev_priv(ndev); - u16 ctlr; int err; if (!netif_running(ndev)) @@ -873,12 +872,7 @@ static int __maybe_unused rcar_can_resume(struct device *dev) return err; } - ctlr = readw(&priv->regs->ctlr); - ctlr &= ~RCAR_CAN_CTLR_SLPM; - writew(ctlr, &priv->regs->ctlr); - ctlr &= ~RCAR_CAN_CTLR_CANM; - writew(ctlr, &priv->regs->ctlr); - priv->can.state = CAN_STATE_ERROR_ACTIVE; + rcar_can_start(ndev); netif_device_attach(ndev); netif_start_queue(ndev); diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 1b9501ee10de..ff39afc77d7d 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -813,6 +813,7 @@ static const struct net_device_ops hi3110_netdev_ops = { .ndo_open = hi3110_open, .ndo_stop = hi3110_stop, .ndo_start_xmit = hi3110_hard_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops hi3110_ethtool_ops = { diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 4311c1f0eafd..30b30fdbcae9 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -768,6 +768,7 @@ static const struct net_device_ops sun4ican_netdev_ops = { .ndo_open = sun4ican_open, .ndo_stop = sun4ican_close, .ndo_start_xmit = sun4ican_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops sun4ican_ethtool_ops = { diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 71f24dc0a927..4fc9bed0d2e1 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -7,7 +7,7 @@ * * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. * Copyright (c) 2020 ETAS K.K.. All rights reserved. - * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr> + * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org> */ #include <linux/unaligned.h> @@ -1977,6 +1977,7 @@ static const struct net_device_ops es58x_netdev_ops = { .ndo_stop = es58x_stop, .ndo_start_xmit = es58x_start_xmit, .ndo_eth_ioctl = can_eth_ioctl_hwts, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops es58x_ethtool_ops = { diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 41c0a1c399bf..1f9b915094e6 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -761,6 +761,7 @@ static const struct net_device_ops mcba_netdev_ops = { .ndo_open = mcba_usb_open, .ndo_stop = mcba_usb_close, .ndo_start_xmit = mcba_usb_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mcba_ethtool_ops = { diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 59f7cd8ceb39..69c44f675ff1 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) - delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; + delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index fcd4505f4925..b1f18a840da7 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -685,18 +685,27 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) return 0; } -static int gswip_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static int gswip_port_setup(struct dsa_switch *ds, int port) { struct gswip_priv *priv = ds->priv; int err; if (!dsa_is_cpu_port(ds, port)) { - u32 mdio_phy = 0; - err = gswip_add_single_port_br(priv, port, true); if (err) return err; + } + + return 0; +} + +static int gswip_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, port)) { + u32 mdio_phy = 0; if (phydev) mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; @@ -1359,8 +1368,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, int i; int err; + /* Operation not supported on the CPU port, don't throw errors */ if (!bridge) - return -EINVAL; + return 0; for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { if (priv->vlans[i].bridge == bridge) { @@ -1829,6 +1839,7 @@ static const struct phylink_mac_ops gswip_phylink_mac_ops = { static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .get_tag_protocol = gswip_get_tag_protocol, .setup = gswip_setup, + .port_setup = gswip_port_setup, .port_enable = gswip_port_enable, .port_disable = gswip_port_disable, .port_bridge_join = gswip_port_bridge_join, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d2ca90407cce..8057350236c5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions, offset < offset_of_ip6_daddr + 16) { actions->nat.src_xlate = false; idx = (offset - offset_of_ip6_daddr) / 4; - actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val); } else { netdev_err(bp->dev, "%s: IPv6_hdr: Invalid pedit field\n", diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0bd814251d56..d144494f97e9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -131,7 +131,7 @@ static const struct fec_devinfo fec_mvf600_info = { FEC_QUIRK_HAS_MDIO_C45, }; -static const struct fec_devinfo fec_imx6x_info = { +static const struct fec_devinfo fec_imx6sx_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | @@ -196,7 +196,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, }, { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, }, { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, }, - { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, }, { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, }, { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, }, { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, }, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b22bb0ae9b9d..b8de97343ad3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1277,7 +1277,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); -int i40e_count_filters(struct i40e_vsi *vsi); +int i40e_count_all_filters(struct i40e_vsi *vsi); +int i40e_count_active_filters(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); static inline bool i40e_is_sw_dcb(struct i40e_pf *pf) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 037c1a0cbd6a..eae5923104f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1241,12 +1241,30 @@ void i40e_update_stats(struct i40e_vsi *vsi) } /** - * i40e_count_filters - counts VSI mac filters + * i40e_count_all_filters - counts VSI MAC filters * @vsi: the VSI to be searched * - * Returns count of mac filters - **/ -int i40e_count_filters(struct i40e_vsi *vsi) + * Return: count of MAC filters in any state. + */ +int i40e_count_all_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + struct hlist_node *h; + int bkt, cnt = 0; + + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) + cnt++; + + return cnt; +} + +/** + * i40e_count_active_filters - counts VSI MAC filters + * @vsi: the VSI to be searched + * + * Return: count of active MAC filters. + */ +int i40e_count_active_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 97f32a0c68d0..646e394f5190 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | - (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx); wr32(hw, reg_idx, reg); } @@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, /* only set the required fields */ tx_ctx.base = info->dma_ring_addr / 128; + + /* ring_len has to be multiple of 8 */ + if (!IS_ALIGNED(info->ring_len, 8) || + info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + ret = -EINVAL; + goto error_context; + } tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); tx_ctx.rdylist_act = 0; @@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, /* only set the required fields */ rx_ctx.base = info->dma_ring_addr / 128; + + /* ring_len has to be multiple of 32 */ + if (!IS_ALIGNED(info->ring_len, 32) || + info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + ret = -EINVAL; + goto error_param; + } rx_ctx.qlen = info->ring_len; if (info->splithdr_enabled) { @@ -1453,6 +1467,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) * functions that may still be running at this point. */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); + clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. @@ -2119,7 +2134,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) size_t len = 0; int ret; - if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) { + i40e_sync_vf_state(vf, I40E_VF_STATE_INIT); + + if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) || + test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) { aq_ret = -EINVAL; goto err; } @@ -2222,6 +2240,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vf->default_lan_addr.addr); } set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states); err: /* send the response back to the VF */ @@ -2384,7 +2403,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } if (vf->adq_enabled) { - if (idx >= ARRAY_SIZE(vf->ch)) { + if (idx >= vf->num_tc) { aq_ret = -ENODEV; goto error_param; } @@ -2405,7 +2424,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) * to its appropriate VSIs based on TC mapping */ if (vf->adq_enabled) { - if (idx >= ARRAY_SIZE(vf->ch)) { + if (idx >= vf->num_tc) { aq_ret = -ENODEV; goto error_param; } @@ -2455,8 +2474,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, queue_id; for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) { - if (vf->adq_enabled) { - vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id; + u16 idx = vsi_queue_id / I40E_MAX_VF_VSI; + + if (vf->adq_enabled && idx < vf->num_tc) { + vsi_id = vf->ch[idx].vsi_id; queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF); } else { queue_id = vsi_queue_id; @@ -2844,24 +2865,6 @@ error_param: (u8 *)&stats, sizeof(stats)); } -/** - * i40e_can_vf_change_mac - * @vf: pointer to the VF info - * - * Return true if the VF is allowed to change its MAC filters, false otherwise - */ -static bool i40e_can_vf_change_mac(struct i40e_vf *vf) -{ - /* If the VF MAC address has been set administratively (via the - * ndo_set_vf_mac command), then deny permission to the VF to - * add/delete unicast MAC addresses, unless the VF is trusted - */ - if (vf->pf_set_mac && !vf->trusted) - return false; - - return true; -} - #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) @@ -2900,8 +2903,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; struct i40e_hw *hw = &pf->hw; - int mac2add_cnt = 0; - int i; + int i, mac_add_max, mac_add_cnt = 0; + bool vf_trusted; + + vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; @@ -2921,9 +2926,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!i40e_can_vf_change_mac(vf) && - !is_multicast_ether_addr(addr) && - !ether_addr_equal(addr, vf->default_lan_addr.addr)) { + if (!vf_trusted && !is_multicast_ether_addr(addr) && + vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; @@ -2932,29 +2936,33 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, /*count filters that really will be added*/ f = i40e_find_mac(vsi, addr); if (!f) - ++mac2add_cnt; + ++mac_add_cnt; } /* If this VF is not privileged, then we can't add more than a limited - * number of addresses. Check to make sure that the additions do not - * push us over the limit. - */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { - if ((i40e_count_filters(vsi) + mac2add_cnt) > - I40E_VC_MAX_MAC_ADDR_PER_VF) { - dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); - return -EPERM; - } - /* If this VF is trusted, it can use more resources than untrusted. + * number of addresses. + * + * If this VF is trusted, it can use more resources than untrusted. * However to ensure that every trusted VF has appropriate number of * resources, divide whole pool of resources per port and then across * all VFs. */ - } else { - if ((i40e_count_filters(vsi) + mac2add_cnt) > - I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, - hw->num_ports)) { + if (!vf_trusted) + mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; + else + mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); + + /* VF can replace all its filters in one step, in this case mac_add_max + * will be added as active and another mac_add_max will be in + * a to-be-removed state. Account for that. + */ + if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || + (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { + if (!vf_trusted) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); + return -EPERM; + } else { dev_err(&pf->pdev->dev, "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); return -EPERM; @@ -3589,7 +3597,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || - tc_filter->action_meta > vf->num_tc) { + tc_filter->action_meta >= vf->num_tc) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; @@ -3887,6 +3895,8 @@ err: aq_ret); } +#define I40E_MAX_VF_CLOUD_FILTER 0xFF00 + /** * i40e_vc_add_cloud_filter * @vf: pointer to the VF info @@ -3926,6 +3936,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) goto err_out; } + if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) { + dev_warn(&pf->pdev->dev, + "VF %d: Max number of filters reached, can't apply cloud filter\n", + vf->vf_id); + aq_ret = -ENOSPC; + goto err_out; + } + cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); if (!cfilter) { aq_ret = -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 66f95e2f3146..e0e797fea138 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -41,7 +41,8 @@ enum i40e_vf_states { I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, I40E_VF_STATE_PRE_ENABLE, - I40E_VF_STATE_RESETTING + I40E_VF_STATE_RESETTING, + I40E_VF_STATE_RESOURCES_LOADED, }; /* VF capabilities */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 971993586fb4..ca74bf684336 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -21,8 +21,7 @@ #include "rvu.h" #include "lmac_common.h" -#define DRV_NAME "Marvell-CGX/RPM" -#define DRV_STRING "Marvell CGX/RPM Driver" +#define DRV_NAME "Marvell-CGX-RPM" #define CGX_RX_STAT_GLOBAL_INDEX 9 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index e63cc1eb6d89..ed041c3f714f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1319,7 +1319,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, free_leaf: otx2_tc_del_from_flow_list(flow_cfg, new_node); - kfree_rcu(new_node, rcu); if (new_node->is_act_police) { mutex_lock(&nic->mbox.lock); @@ -1339,6 +1338,7 @@ free_leaf: mutex_unlock(&nic->mbox.lock); } + kfree_rcu(new_node, rcu); return rc; } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 7b33993f7001..f1827a1bd7a5 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -360,6 +360,11 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp) sfp->state_ignore_mask |= SFP_F_TX_FAULT; } +static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask) +{ + sfp->state_hw_mask &= ~mask; +} + static void sfp_fixup_nokia(struct sfp *sfp) { sfp_fixup_long_startup(sfp); @@ -408,7 +413,19 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp) * these are possibly used for other purposes on this * module, e.g. a serial port. */ - sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS); + sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS); +} + +static void sfp_fixup_potron(struct sfp *sfp) +{ + /* + * The TX_FAULT and LOS pins on this device are used for serial + * communication, so ignore them. Additionally, provide extra + * time for this device to fully start up. + */ + + sfp_fixup_long_startup(sfp); + sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS); } static void sfp_fixup_rollball_cc(struct sfp *sfp) @@ -474,6 +491,9 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, sfp_fixup_nokia), + // FLYPRO SFP-10GT-CS-30M uses Rollball protocol to talk to the PHY. + SFP_QUIRK_F("FLYPRO", "SFP-10GT-CS-30M", sfp_fixup_rollball), + // Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball // protocol to talk to the PHY and needs 4 sec wait before probing the // PHY. @@ -511,6 +531,8 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt), SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt), + SFP_QUIRK_F("YV", "SFP+ONU-XGSPON", sfp_fixup_potron), + // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault), diff --git a/drivers/net/tun.c b/drivers/net/tun.c index fae1a0ab36bd..fb9d425eff8c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1932,6 +1932,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, local_bh_enable(); goto unlock_frags; } + + if (frags && skb != tfile->napi.skb) + tfile->napi.skb = skb; } rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c index 4ee374080466..a77a27c36bdb 100644 --- a/drivers/net/wireless/virtual/virt_wifi.c +++ b/drivers/net/wireless/virtual/virt_wifi.c @@ -277,7 +277,9 @@ static void virt_wifi_connect_complete(struct work_struct *work) priv->is_connected = true; /* Schedules an event that acquires the rtnl lock. */ - cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0, + cfg80211_connect_result(priv->upperdev, + priv->is_connected ? fake_router_bssid : NULL, + NULL, 0, NULL, 0, status, GFP_KERNEL); netif_carrier_on(priv->upperdev); } diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index 4b57102c7f62..6af6cf477c5b 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> +#include <linux/bitfield.h> #include <linux/bits.h> #include <linux/device.h> #include <linux/dev_printk.h> @@ -75,6 +76,9 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages"); #define WMBB_USB_CHARGE 0x10B #define WMBB_BATT_LIMIT 0x10C +#define FAN_MODE_LOWER GENMASK(1, 0) +#define FAN_MODE_UPPER GENMASK(5, 4) + #define PLATFORM_NAME "lg-laptop" MODULE_ALIAS("wmi:" WMI_EVENT_GUID0); @@ -274,29 +278,19 @@ static ssize_t fan_mode_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { - bool value; + unsigned long value; union acpi_object *r; - u32 m; int ret; - ret = kstrtobool(buffer, &value); + ret = kstrtoul(buffer, 10, &value); if (ret) return ret; + if (value >= 3) + return -EINVAL; - r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0); - if (!r) - return -EIO; - - if (r->type != ACPI_TYPE_INTEGER) { - kfree(r); - return -EIO; - } - - m = r->integer.value; - kfree(r); - r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4)); - kfree(r); - r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value); + r = lg_wmab(dev, WM_FAN_MODE, WM_SET, + FIELD_PREP(FAN_MODE_LOWER, value) | + FIELD_PREP(FAN_MODE_UPPER, value)); kfree(r); return count; @@ -305,7 +299,7 @@ static ssize_t fan_mode_store(struct device *dev, static ssize_t fan_mode_show(struct device *dev, struct device_attribute *attr, char *buffer) { - unsigned int status; + unsigned int mode; union acpi_object *r; r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0); @@ -317,10 +311,10 @@ static ssize_t fan_mode_show(struct device *dev, return -EIO; } - status = r->integer.value & 0x01; + mode = FIELD_GET(FAN_MODE_LOWER, r->integer.value); kfree(r); - return sysfs_emit(buffer, "%d\n", status); + return sysfs_emit(buffer, "%d\n", mode); } static ssize_t usb_charge_store(struct device *dev, diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 420e943bb73a..5e6197a6af5e 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -243,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, &hwq->sqe_dma_addr, GFP_KERNEL); - if (!hwq->sqe_dma_addr) { + if (!hwq->sqe_base_addr) { dev_err(hba->dev, "SQE allocation failed\n"); return -ENOMEM; } @@ -252,7 +252,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, &hwq->cqe_dma_addr, GFP_KERNEL); - if (!hwq->cqe_dma_addr) { + if (!hwq->cqe_base_addr) { dev_err(hba->dev, "CQE allocation failed\n"); return -ENOMEM; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index bfd97cad8aa4..c0fd8ab3fe8f 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -734,7 +734,7 @@ void usb_detect_quirks(struct usb_device *udev) udev->quirks ^= usb_detect_dynamic_quirks(udev); if (udev->quirks) - dev_dbg(&udev->dev, "USB quirks for this device: %x\n", + dev_dbg(&udev->dev, "USB quirks for this device: 0x%x\n", udev->quirks); #ifdef CONFIG_USB_DEFAULT_PERSIST diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 1fcc9348dd43..123506681ef0 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -458,7 +458,7 @@ static void xhci_dbc_ring_init(struct xhci_ring *ring) trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma); trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); } - xhci_initialize_ring_info(ring); + xhci_initialize_ring_info(ring, 1); } static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index c9694526b157..f0ed38da6a0c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -27,12 +27,14 @@ * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, + unsigned int cycle_state, unsigned int max_packet, unsigned int num, gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; + int i; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); @@ -54,6 +56,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } } + /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ + if (cycle_state == 0) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE); + } seg->num = num; seg->dma = dma; seg->next = NULL; @@ -131,14 +138,6 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, chain_links = xhci_link_chain_quirk(xhci, ring->type); - /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ - if (ring->cycle_state == 0) { - xhci_for_each_ring_seg(ring->first_seg, seg) { - for (int i = 0; i < TRBS_PER_SEGMENT; i++) - seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); - } - } - next = ring->enq_seg->next; xhci_link_segments(ring->enq_seg, first, ring->type, chain_links); xhci_link_segments(last, next, ring->type, chain_links); @@ -288,7 +287,8 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) kfree(ring); } -void xhci_initialize_ring_info(struct xhci_ring *ring) +void xhci_initialize_ring_info(struct xhci_ring *ring, + unsigned int cycle_state) { /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; @@ -302,7 +302,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring) * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring. */ - ring->cycle_state = 1; + ring->cycle_state = cycle_state; /* * Each segment has a link TRB, and leave an extra TRB for SW @@ -317,6 +317,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, + unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) @@ -327,7 +328,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, chain_links = xhci_link_chain_quirk(xhci, type); - prev = xhci_segment_alloc(xhci, max_packet, num, flags); + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); if (!prev) return -ENOMEM; num++; @@ -336,7 +337,8 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, while (num < num_segs) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, max_packet, num, flags); + next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, + flags); if (!next) goto free_segments; @@ -361,8 +363,9 @@ free_segments: * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + unsigned int num_segs, unsigned int cycle_state, + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_ring *ring; int ret; @@ -380,7 +383,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs, - type, max_packet, flags); + cycle_state, type, max_packet, flags); if (ret) goto fail; @@ -390,7 +393,7 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); } - xhci_initialize_ring_info(ring); + xhci_initialize_ring_info(ring, cycle_state); trace_xhci_ring_alloc(ring); return ring; @@ -418,8 +421,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *last; int ret; - ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type, - ring->bounce_buf_len, flags); + ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state, + ring->type, ring->bounce_buf_len, flags); if (ret) return -ENOMEM; @@ -629,7 +632,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = - xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags); + xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, + mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; @@ -970,7 +974,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, } /* Allocate endpoint 0 ring */ - dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags); + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); if (!dev->eps[0].ring) goto fail; @@ -1453,7 +1457,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags); + xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); if (!virt_dev->eps[ep_index].new_ring) return -ENOMEM; @@ -2262,7 +2266,7 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) if (!ir) return NULL; - ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags); + ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags); if (!ir->event_ring) { xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); kfree(ir); @@ -2468,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; /* Set up the command ring to have one segments for now. */ - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3970ec831b8c..abbf89e82d01 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -769,7 +769,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); } - xhci_initialize_ring_info(ring); + xhci_initialize_ring_info(ring, 1); /* * Reset the hardware dequeue pointer. * Yes, this will need to be re-written after resume, but we're paranoid diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index b2aeb444daaf..b4fa8e7e4376 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1803,12 +1803,14 @@ void xhci_slot_copy(struct xhci_hcd *xhci, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); -struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, +struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags); -void xhci_initialize_ring_info(struct xhci_ring *ring); +void xhci_initialize_ring_info(struct xhci_ring *ring, + unsigned int cycle_state); void xhci_free_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 893fd66b5269..469f81f880f0 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2492,7 +2492,7 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, unsigned charcount = font->charcount; int w = font->width; int h = font->height; - int size; + int size, alloc_size; int i, csum; u8 *new_data, *data = font->data; int pitch = PITCH(font->width); @@ -2519,9 +2519,16 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = CALC_FONTSZ(h, pitch, charcount); + /* Check for integer overflow in font size calculation */ + if (check_mul_overflow(h, pitch, &size) || + check_mul_overflow(size, charcount, &size)) + return -EINVAL; + + /* Check for overflow in allocation size calculation */ + if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size)) + return -EINVAL; - new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); + new_data = kmalloc(alloc_size, GFP_USER); if (!new_data) return -ENOMEM; |