diff options
207 files changed, 1626 insertions, 762 deletions
@@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de> Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com> Corey Minyard <minyard@acm.org> Damian Hobson-Garcia <dhobsong@igel.co.jp> +Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com> Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com> Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net> Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch> @@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de> Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com> Praveen BP <praveenbp@ti.com> Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com> -Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com> +Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com> +Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com> Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com> Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com> Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl> diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst index 1a6b91368e59..a65c1602cb23 100644 --- a/Documentation/admin-guide/device-mapper/verity.rst +++ b/Documentation/admin-guide/device-mapper/verity.rst @@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description> also gain new certificates at run time if they are signed by a certificate already in the secondary trusted keyring. +try_verify_in_tasklet + If verity hashes are in cache, verify data blocks in kernel tasklet instead + of workqueue. This option can reduce IO latency. + Theory of operation =================== diff --git a/Documentation/block/ublk.rst b/Documentation/block/ublk.rst index 2122d1a4a541..ba45c46cc0da 100644 --- a/Documentation/block/ublk.rst +++ b/Documentation/block/ublk.rst @@ -144,6 +144,42 @@ managing and controlling ublk devices with help of several control commands: For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's responsibility to save IO target specific info in userspace. +- ``UBLK_CMD_START_USER_RECOVERY`` + + This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This + command is accepted after the old process has exited, ublk device is quiesced + and ``/dev/ublkc*`` is released. User should send this command before he starts + a new process which re-opens ``/dev/ublkc*``. When this command returns, the + ublk device is ready for the new process. + +- ``UBLK_CMD_END_USER_RECOVERY`` + + This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This + command is accepted after ublk device is quiesced and a new process has + opened ``/dev/ublkc*`` and get all ublk queues be ready. When this command + returns, ublk device is unquiesced and new I/O requests are passed to the + new process. + +- user recovery feature description + + Two new features are added for user recovery: ``UBLK_F_USER_RECOVERY`` and + ``UBLK_F_USER_RECOVERY_REISSUE``. + + With ``UBLK_F_USER_RECOVERY`` set, after one ubq_daemon(ublk server's io + handler) is dying, ublk does not delete ``/dev/ublkb*`` during the whole + recovery stage and ublk device ID is kept. It is ublk server's + responsibility to recover the device context by its own knowledge. + Requests which have not been issued to userspace are requeued. Requests + which have been issued to userspace are aborted. + + With ``UBLK_F_USER_RECOVERY_REISSUE`` set, after one ubq_daemon(ublk + server's io handler) is dying, contrary to ``UBLK_F_USER_RECOVERY``, + requests which have been issued to userspace are requeued and will be + re-issued to the new process after handling ``UBLK_CMD_END_USER_RECOVERY``. + ``UBLK_F_USER_RECOVERY_REISSUE`` is designed for backends who tolerate + double-write since the driver may issue the same I/O request twice. It + might be useful to a read-only FS or a VM backend. + Data plane ---------- diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml index 64995cbb0f97..41c9760227cd 100644 --- a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml +++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml @@ -8,7 +8,6 @@ title: Samsung S3FWRN5 NCI NFC Controller maintainers: - Krzysztof Kozlowski <krzk@kernel.org> - - Krzysztof Opasiak <k.opasiak@samsung.com> properties: compatible: diff --git a/MAINTAINERS b/MAINTAINERS index cf0f18502372..bdd43abd2683 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4459,13 +4459,15 @@ M: Josef Bacik <josef@toxicpanda.com> M: David Sterba <dsterba@suse.com> L: linux-btrfs@vger.kernel.org S: Maintained -W: http://btrfs.wiki.kernel.org/ -Q: http://patchwork.kernel.org/project/linux-btrfs/list/ +W: https://btrfs.readthedocs.io +W: https://btrfs.wiki.kernel.org/ +Q: https://patchwork.kernel.org/project/linux-btrfs/list/ C: irc://irc.libera.chat/btrfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git F: Documentation/filesystems/btrfs.rst F: fs/btrfs/ F: include/linux/btrfs* +F: include/trace/events/btrfs.h F: include/uapi/linux/btrfs* BTTV VIDEO4LINUX DRIVER @@ -5266,6 +5268,7 @@ F: tools/testing/selftests/cgroup/ CONTROL GROUP - BLOCK IO CONTROLLER (BLKIO) M: Tejun Heo <tj@kernel.org> +M: Josef Bacik <josef@toxicpanda.com> M: Jens Axboe <axboe@kernel.dk> L: cgroups@vger.kernel.org L: linux-block@vger.kernel.org @@ -5273,6 +5276,7 @@ T: git git://git.kernel.dk/linux-block F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst F: block/bfq-cgroup.c F: block/blk-cgroup.c +F: block/blk-iocost.c F: block/blk-iolatency.c F: block/blk-throttle.c F: include/linux/blk-cgroup.h @@ -14709,6 +14713,12 @@ F: drivers/nvme/target/auth.c F: drivers/nvme/target/fabrics-cmd-auth.c F: include/linux/nvme-auth.h +NVM EXPRESS HARDWARE MONITORING SUPPORT +M: Guenter Roeck <linux@roeck-us.net> +L: linux-nvme@lists.infradead.org +S: Supported +F: drivers/nvme/host/hwmon.c + NVM EXPRESS FC TRANSPORT DRIVERS M: James Smart <james.smart@broadcom.com> L: linux-nvme@lists.infradead.org @@ -18211,7 +18221,6 @@ F: include/media/drv-intf/s3c_camif.h SAMSUNG S3FWRN5 NFC DRIVER M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> -M: Krzysztof Opasiak <k.opasiak@samsung.com> L: linux-nfc@lists.01.org (subscribers-only) S: Maintained F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml @@ -21293,7 +21302,7 @@ L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org S: Maintained W: https://github.com/petkan/pegasus -T: git git://github.com/petkan/pegasus.git +T: git https://github.com/petkan/pegasus.git F: drivers/net/usb/pegasus.* USB PHY LAYER @@ -21330,7 +21339,7 @@ L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org S: Maintained W: https://github.com/petkan/rtl8150 -T: git git://github.com/petkan/rtl8150.git +T: git https://github.com/petkan/rtl8150.git F: drivers/net/usb/rtl8150.c USB SERIAL SUBSYSTEM @@ -22121,6 +22130,7 @@ F: Documentation/watchdog/ F: drivers/watchdog/ F: include/linux/watchdog.h F: include/uapi/linux/watchdog.h +F: include/trace/events/watchdog.h WHISKEYCOVE PMIC GPIO DRIVER M: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 0bef44d30a27..2fd52b65deac 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -25,8 +25,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { u64 start = rmrr->base_address; u64 end = rmrr->end_address + 1; + int entry_type; - if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + entry_type = e820__get_entry_type(start, end); + if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS) return 0; pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 64ee618064ba..71f721670ab6 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -369,12 +369,8 @@ struct bfq_queue { unsigned long split_time; /* time of last split */ unsigned long first_IO_time; /* time of first I/O for this queue */ - unsigned long creation_time; /* when this queue is created */ - /* max service rate measured so far */ - u32 max_service_rate; - /* * Pointer to the waker queue for this queue, i.e., to the * queue Q such that this queue happens to get new I/O right diff --git a/block/bio.c b/block/bio.c index 633a902468ec..57c2f327225b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -741,7 +741,7 @@ void bio_put(struct bio *bio) return; } - if (bio->bi_opf & REQ_ALLOC_CACHE) { + if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) { struct bio_alloc_cache *cache; bio_uninit(bio); diff --git a/block/blk-mq.c b/block/blk-mq.c index 8070b6c10e8d..33292c01875d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3112,8 +3112,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct page *page; unsigned long flags; - /* There is no need to clear a driver tags own mapping */ - if (drv_tags == tags) + /* + * There is no need to clear mapping if driver tags is not initialized + * or the mapping belongs to the driver tags. + */ + if (!drv_tags || drv_tags == tags) return; list_for_each_entry(page, &tags->page_list, lru) { diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index ca2aed86b540..8059baf4ef27 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1142,7 +1142,8 @@ static void iort_iommu_msi_get_resv_regions(struct device *dev, struct iommu_resv_region *region; region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_MSI, + GFP_KERNEL); if (region) list_add_tail(®ion->list, head); } diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index da7ee8bec165..7add8e79912b 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -257,7 +257,7 @@ enum { PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ - EM_MAX_SLOTS = 8, + EM_MAX_SLOTS = SATA_PMP_MAX_PORTS, EM_MAX_RETRY = 5, /* em_ctl bits */ diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f61795c546cf..6f216eb25610 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -448,7 +448,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; - priv->version = (enum brcm_ahci_version)of_id->data; + priv->version = (unsigned long)of_id->data; priv->dev = dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl"); diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index b734e069034d..a950767f7948 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -1067,7 +1067,7 @@ static int imx_ahci_probe(struct platform_device *pdev) imxpriv->ahci_pdev = pdev; imxpriv->no_device = false; imxpriv->first_time = true; - imxpriv->type = (enum ahci_imx_type)of_id->data; + imxpriv->type = (unsigned long)of_id->data; imxpriv->sata_clk = devm_clk_get(dev, "sata"); if (IS_ERR(imxpriv->sata_clk)) { @@ -1235,4 +1235,4 @@ module_platform_driver(imx_ahci_driver); MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ahci:imx"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 6cd61842ad48..9cf9bf36a874 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -280,7 +280,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev) return -ENOMEM; if (of_id) - qoriq_priv->type = (enum ahci_qoriq_type)of_id->data; + qoriq_priv->type = (unsigned long)of_id->data; else qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data; diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index 5a2cac60a29a..8607b68eee53 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c @@ -236,7 +236,7 @@ static struct platform_driver st_ahci_driver = { .driver = { .name = DRV_NAME, .pm = &st_ahci_pm_ops, - .of_match_table = of_match_ptr(st_ahci_match), + .of_match_table = st_ahci_match, }, .probe = st_ahci_probe, .remove = ata_platform_remove_one, diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 7bb5db17f864..1e08704d5117 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -785,7 +785,7 @@ static int xgene_ahci_probe(struct platform_device *pdev) of_devid = of_match_device(xgene_ahci_of_match, dev); if (of_devid) { if (of_devid->data) - version = (enum xgene_ahci_version) of_devid->data; + version = (unsigned long) of_devid->data; } #ifdef CONFIG_ACPI else { diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 590ebea99601..0195eb29f6c2 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -875,7 +875,7 @@ static int sata_rcar_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->type = (enum sata_rcar_type)of_device_get_match_data(dev); + priv->type = (unsigned long)of_device_get_match_data(dev); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8f7f144e54f3..7f9bcc82fc9c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -30,11 +30,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio return NULL; memset(req, 0, sizeof(*req)); - req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src, - GFP_NOIO, &drbd_io_bio_set); - req->private_bio->bi_private = req; - req->private_bio->bi_end_io = drbd_request_endio; - req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0) | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); @@ -1219,9 +1214,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio) /* Update disk stats */ req->start_jif = bio_start_io_acct(req->master_bio); - if (!get_ldev(device)) { - bio_put(req->private_bio); - req->private_bio = NULL; + if (get_ldev(device)) { + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, + bio, GFP_NOIO, + &drbd_io_bio_set); + req->private_bio->bi_private = req; + req->private_bio->bi_end_io = drbd_request_endio; } /* process discards always from our submitter thread */ diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 2651bf41dde3..5afce6ffaadf 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -124,7 +124,7 @@ struct ublk_queue { bool force_abort; unsigned short nr_io_ready; /* how many ios setup */ struct ublk_device *dev; - struct ublk_io ios[0]; + struct ublk_io ios[]; }; #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index e7dd457e9b22..e98fcac578d6 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; - cpu_relax(); + hwrng_msleep(rng, 1000); } num_words = rng_readl(priv, RNG_STATUS) >> 24; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ae9371b172e3..8639a4f9c6e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0) -#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1) - /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -1065,7 +1062,6 @@ struct amdgpu_device { struct work_struct reset_work; - uint32_t amdgpu_reset_level_mask; bool job_hang; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 03bbfaa51cbc..0561812aa0a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -134,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 0b0a72ca5695..7e80caa05060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id) lock_srbm(adev, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL), + WREG32_SOC15(GC, 0, regCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6066aebf491c..de61a85c4b02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return PTR_ERR(ent); } - debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask); - /* Register debugfs entries for amdgpu_ttm */ amdgpu_ttm_debugfs_init(adev); amdgpu_debugfs_pm_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ab8f970b2849..e0445e8cc342 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* + * Per PMFW team's suggestion, driver needs to handle gfxoff + * and df cstate features disablement for gpu reset(e.g. Mode1Reset) + * scenario. Add the missing df cstate disablement here. + */ + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -5210,7 +5218,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, reset_context->job = job; reset_context->hive = hive; - /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list @@ -5337,11 +5344,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); - if (r && r == -EAGAIN) { - set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags); - adev->asic_reset_res = 0; + if (r && r == -EAGAIN) goto retry; - } if (!r && gpu_reset_for_dev_remove) goto recover_end; @@ -5777,7 +5781,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) reset_context.reset_req_dev = adev; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(adev, &reset_context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 46c99331d7f1..cd968e781077 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2dad7aa9a03b..a4b47e1bd111 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); } @@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): + return true; + default: + return false; + } + } + + if (adev->asic_type == CHIP_IP_DISCOVERY) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 10): + return true; + default: + return false; + } + } + return adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS || @@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - /* If driver run on sriov guest side, only enable ras for aldebaran */ - if (amdgpu_sriov_vf(adev) && - adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) - return; - if (!adev->gmc.xgmi.connected_to_cpu) { if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { dev_info(adev->dev, "MEM ECC is active.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 9da5ead50c90..f778466bb9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; - adev->amdgpu_reset_level_mask = 0x1; - switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): ret = aldebaran_reset_init(adev); @@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, { struct amdgpu_reset_handler *reset_handler = NULL; - if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) - return -ENOSYS; - - if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) - return -ENOSYS; - if (adev->reset_cntl && adev->reset_cntl->get_reset_handler) reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); @@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, int ret; struct amdgpu_reset_handler *reset_handler = NULL; - if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) - return -ENOSYS; - - if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) - return -ENOSYS; - if (adev->reset_cntl) reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index f5318fedf2f0..f4a501ff87d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, - AMDGPU_SKIP_MODE2_RESET = 2, - AMDGPU_RESET_FOR_DEVICE_REMOVE = 3, + AMDGPU_RESET_FOR_DEVICE_REMOVE = 2, }; struct amdgpu_reset_context { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3e316b013fd9..d3558c34d406 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { ktime_t deadline = ktime_add_us(ktime_get(), 10000); - if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY)) - return false; - if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dc262d2c2925..57277b1cf183 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, while (cursor.remaining) { amdgpu_res_next(&cursor, cursor.size); + if (!cursor.remaining) + break; + /* ttm_resource_ioremap only supports contiguous memory */ if (end != cursor.start) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e4af40b9a8aa..9c765b04aae3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -726,6 +726,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } + if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d94c31e68a14..49c4347d154c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -31,6 +31,7 @@ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ /* flags for indirect register access path supported by rlcg for sriov */ #define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28) @@ -297,6 +298,9 @@ struct amdgpu_video_codec_info; #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) +#define amdgpu_sriov_vf_mmio_access_protection(adev) \ +((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 83b0c5d86e48..2291aa14d888 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2338,7 +2338,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ #ifdef CONFIG_X86_64 if (amdgpu_vm_update_mode == -1) { - if (amdgpu_gmc_vram_full_visible(&adev->gmc)) + /* For asic with VF MMIO access protection + * avoid using CPU for VM table updates + */ + if (amdgpu_gmc_vram_full_visible(&adev->gmc) && + !amdgpu_sriov_vf_mmio_access_protection(adev)) adev->vm_manager.vm_update_mode = AMDGPU_VM_USE_CPU_FOR_COMPUTE; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 2b0669c464f6..69e105fa41f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, DMA_RESV_USAGE_BOOKKEEP); } - if (fence && !p->immediate) + if (fence && !p->immediate) { + /* + * Most hw generations now have a separate queue for page table + * updates, but when the queue is shared with userspace we need + * the extra CPU round trip to correctly flush the TLB. + */ + set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags); swap(*fence, f); + } dma_fence_put(f); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 251109723ab6..671ca5a0f208 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1571,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); /* Enable trap for each kfd vmid. */ - data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL)); + data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); } soc21_grbm_select(adev, 0, 0, 0, 0); @@ -5076,6 +5076,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle, case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): gfx_v11_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 846ccb6cf07d..66dfb574cc7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; + unsigned char hub_ip = 0; + + hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, hub_ip); if (tmp & 0x1) break; udelay(1); @@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng, hub_ip); tmp &= 1 << vmid; if (tmp) break; @@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ if ((vmhub != AMDGPU_GFXHUB_0) && diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 5cec6b259b7f..fef7d020bc5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -1156,6 +1156,42 @@ static int mes_v11_0_sw_fini(void *handle) return 0; } +static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev) +{ + uint32_t data; + int i; + + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0); + + /* disable the queue if it's active */ + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + } + data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 1); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0); + + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + adev->mes.ring.sched.ready = false; +} + static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring) { uint32_t tmp; @@ -1207,6 +1243,9 @@ failure: static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) { + if (adev->mes.ring.sched.ready) + mes_v11_0_kiq_dequeue_sched(adev); + mes_v11_0_enable(adev, false); return 0; } @@ -1262,9 +1301,6 @@ failure: static int mes_v11_0_hw_fini(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->mes.ring.sched.ready = false; return 0; } @@ -1296,7 +1332,8 @@ static int mes_v11_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_in_reset(adev)) + if (!amdgpu_in_reset(adev) && + (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) amdgpu_mes_self_test(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index a2f04b249132..12906ba74462 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -290,7 +290,6 @@ flr_done: reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index a977f0027928..e07757eea7ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -317,7 +317,6 @@ flr_done: reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index fd14fa9b9cd7..288c414babdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 298fa11702e7..1122bd4eae98 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1417,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - /* unhalt engine */ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 7aa570c1ce4a..81a6d5b94987 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -31,12 +31,23 @@ #include "amdgpu_psp.h" #include "amdgpu_xgmi.h" +static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl) +{ +#if 0 + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) && + adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) + return true; +#endif + return false; +} + static struct amdgpu_reset_handler * sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_context *reset_context) { struct amdgpu_reset_handler *handler; - struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; if (reset_context->method != AMD_RESET_METHOD_NONE) { list_for_each_entry(handler, &reset_ctl->reset_handlers, @@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, if (handler->reset_method == reset_context->method) return handler; } - } else { - list_for_each_entry(handler, &reset_ctl->reset_handlers, + } + + if (sienna_cichlid_is_mode2_default(reset_ctl)) { + list_for_each_entry (handler, &reset_ctl->reset_handlers, handler_list) { - if (handler->reset_method == AMD_RESET_METHOD_MODE2 && - adev->pm.fw_version >= 0x3a5500 && - !amdgpu_sriov_vf(adev)) { - reset_context->method = AMD_RESET_METHOD_MODE2; + if (handler->reset_method == AMD_RESET_METHOD_MODE2) return handler; - } } } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 183024d7c184..e3b2b6b4f1a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle) return 0; } +static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) +{ + int i; + + /* sdma doorbell range is programed by hypervisor */ + if (!amdgpu_sriov_vf(adev)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->nbio.funcs->sdma_doorbell_range(adev, i, + true, adev->doorbell_index.sdma_engine[i] << 1, + adev->doorbell_index.sdma_doorbell_range); + } + } +} + static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle) /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); + /* HW doorbell routing policy: doorbell writing not + * in SDMA/IH/MM/ACV range will be routed to CP. So + * we need to init SDMA doorbell range prior + * to CP ip block init and ring test. IH already + * happens before CP. + */ + soc15_sdma_doorbell_range_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 795706b3b092..e08044008186 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) case IP_VERSION(11, 0, 0): return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): return false; default: return true; @@ -636,7 +637,11 @@ static int soc21_common_early_init(void *handle) break; case IP_VERSION(11, 0, 3): adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_GFX_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index d70838edba80..ca7d24000621 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index e85364dff4e0..5cb3e8634739 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -262,8 +262,9 @@ struct kfd2kgd_calls { uint32_t queue_id); int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, - uint32_t reset_type, unsigned int timeout, - uint32_t pipe_id, uint32_t queue_id); + enum kfd_preempt_type reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 948cc75376f8..236657eece47 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.sysfs_initialized) return 0; + INIT_LIST_HEAD(&adev->pm.pm_attr_list); + if (adev->pm.dpm_enabled == 0) return 0; - INIT_LIST_HEAD(&adev->pm.pm_attr_list); - adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, hwmon_groups); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 190af79f3236..dad3e3741a4e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed) { - struct amdgpu_device *adev = hwmgr->adev; - uint32_t duty100, duty; - uint64_t tmp64; + uint32_t current_rpm; + uint32_t percent = 0; - duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), - CG_FDO_CTRL1, FMAX_DUTY100); - duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), - CG_THERMAL_STATUS, FDO_PWM_DUTY); + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; - if (!duty100) - return -EINVAL; + if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) + return -1; + + if (hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM != 0) + percent = current_rpm * 255 / + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; - tmp64 = (uint64_t)duty * 255; - do_div(tmp64, duty100); - *speed = MIN((uint32_t)tmp64, 255); + *speed = MIN(percent, 255); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 13c5c7f1ecb9..4fe75dd2b329 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1314,8 +1314,8 @@ static int smu_smc_hw_setup(struct smu_context *smu) ret = smu_enable_thermal_alert(smu); if (ret) { - dev_err(adev->dev, "Failed to enable thermal alert!\n"); - return ret; + dev_err(adev->dev, "Failed to enable thermal alert!\n"); + return ret; } ret = smu_notify_display_change(smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h index ae2d337158f3..f77401709d83 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 5 +#define PMFW_DRIVER_IF_VERSION 7 typedef struct { int32_t value; @@ -163,8 +163,8 @@ typedef struct { uint16_t DclkFrequency; //[MHz] uint16_t MemclkFrequency; //[MHz] uint16_t spare; //[centi] - uint16_t UvdActivity; //[centi] uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC @@ -199,6 +199,19 @@ typedef struct { uint16_t DeviceState; uint16_t CurTemp; //[centi-Celsius] uint16_t spare2; + + uint16_t AverageGfxclkFrequency; + uint16_t AverageFclkFrequency; + uint16_t AverageGfxActivity; + uint16_t AverageSocclkFrequency; + uint16_t AverageVclkFrequency; + uint16_t AverageVcnActivity; + uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads + uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes + uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower + uint16_t AverageCorePower; //Filtered of [sum of CorePower[8]]) + uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency % per core] + uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing. } SmuMetrics_t; typedef struct { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 9d62ea2af132..8f72202aea8e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -28,7 +28,7 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 445005571f76..9cd005131f56 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu) static int arcturus_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; uint32_t smu_version; int ret; + /* + * Arcturus does not need the cstate disablement + * prerequisite for gpu reset. + */ + if (amdgpu_in_reset(adev) || adev->in_suspend) + return 0; + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); if (ret) { dev_err(smu->adev->dev, "Failed to get smu version!\n"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 619aee51b123..d30ec3005ea1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu) static int aldebaran_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; + + /* + * Aldebaran does not need the cstate disablement + * prerequisite for gpu reset. + */ + if (amdgpu_in_reset(adev) || adev->in_suspend) + return 0; + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 93fffdbab4f0..c4552ade8d44 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu) return 0; if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) || - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))) + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))) return 0; /* override pptable_id from driver parameter */ @@ -454,9 +455,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu) dev_info(adev->dev, "override pptable id %d\n", pptable_id); } else { pptable_id = smu->smu_table.boot_values.pp_table_id; - - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) - pptable_id = 6666; } /* force using vbios pptable in sriov mode */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 1d454485e0d9..29529328152d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -119,6 +119,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -1753,6 +1754,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, return ret; } +static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -1822,6 +1832,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_0_set_mp1_state, + .set_df_cstate = smu_v13_0_0_set_df_cstate, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c422bf8a09b1..c4102cfb734c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -121,6 +121,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -1587,6 +1588,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) return true; } + +static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -1649,6 +1660,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, + .set_df_cstate = smu_v13_0_7_set_df_cstate, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index e3142c8142b3..61c29ce74b03 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev, if (drm_WARN_ON(dev, funcs && funcs->destroy)) return -EINVAL; - ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL); + ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 5fe209107246..20fe53815b20 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) .src = &src, .dst = &dst, .pgmap_owner = drm->dev, + .fault_page = vmf->page, .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE, }; diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 89056a1aac7d..6bd0634e2d58 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter, { struct panfrost_dump_object_header *hdr = iter->hdr; - hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC); - hdr->type = cpu_to_le32(type); - hdr->file_offset = cpu_to_le32(iter->data - iter->start); - hdr->file_size = cpu_to_le32(data_end - iter->data); + hdr->magic = PANFROSTDUMP_MAGIC; + hdr->type = type; + hdr->file_offset = iter->data - iter->start; + hdr->file_size = data_end - iter->data; iter->hdr++; - iter->data += le32_to_cpu(hdr->file_size); + iter->data += hdr->file_size; } static void @@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter, reg = panfrost_dump_registers[i] + js_as_offset; - dumpreg->reg = cpu_to_le32(reg); - dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg)); + dumpreg->reg = reg; + dumpreg->value = gpu_read(pfdev, reg); } panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg); @@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job) struct panfrost_dump_iterator iter; struct drm_gem_object *dbo; unsigned int n_obj, n_bomap_pages; - __le64 *bomap, *bomap_start; + u64 *bomap, *bomap_start; size_t file_size; u32 as_nr; int slot; @@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job) * For now, we write the job identifier in the register dump header, * so that we can decode the entire dump later with pandecode */ - iter.hdr->reghdr.jc = cpu_to_le64(job->jc); - iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR); - iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR); - iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id); - iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count); + iter.hdr->reghdr.jc = job->jc; + iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR; + iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR; + iter.hdr->reghdr.gpu_id = pfdev->features.id; + iter.hdr->reghdr.nbos = job->bo_count; panfrost_core_dump_registers(&iter, pfdev, as_nr, slot); @@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job) WARN_ON(!mapping->active); - iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start)); + iter.hdr->bomap.data[0] = bomap - bomap_start; for_each_sgtable_page(bo->base.sgt, &page_iter, 0) { struct page *page = sg_page_iter_page(&page_iter); if (!IS_ERR(page)) { - *bomap++ = cpu_to_le64(page_to_phys(page)); + *bomap++ = page_to_phys(page); } else { dev_err(pfdev->dev, "Panfrost Dump: wrong page\n"); - *bomap++ = ~cpu_to_le64(0); + *bomap++ = 0; } } - iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT); + iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT; vaddr = map.vaddr; memcpy(iter.data, vaddr, bo->base.base.size); drm_gem_shmem_vunmap(&bo->base, &map); - iter.hdr->bomap.valid = cpu_to_le32(1); + iter.hdr->bomap.valid = 1; dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data + bo->base.base.size); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 6b25b2f4f5a3..6137537aaea4 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -385,7 +385,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) } s_fence = to_drm_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { + if (s_fence && s_fence->sched == sched && + !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { /* * Fence is from the same scheduler, only need to wait for diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 8d86c250c2ec..2191e57f2297 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) iosys_map_set_vaddr(&src, xrgb8888); drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); - buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE); + buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index ffbbb454c9e8..2027063fdc30 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -490,6 +490,7 @@ module_init(vc4_drm_register); module_exit(vc4_drm_unregister); MODULE_ALIAS("platform:vc4-drm"); +MODULE_SOFTDEP("pre: snd-soc-hdmi-codec"); MODULE_DESCRIPTION("Broadcom VC4 DRM Driver"); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 64f9feabf43e..596e311d6e58 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -3318,12 +3318,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev) struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); unsigned long __maybe_unused flags; u32 __maybe_unused value; + unsigned long rate; int ret; + /* + * The HSM clock is in the HDMI power domain, so we need to set + * its frequency while the power domain is active so that it + * keeps its rate. + */ + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ); + if (ret) + return ret; + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); if (ret) return ret; + /* + * Whenever the RaspberryPi boots without an HDMI monitor + * plugged in, the firmware won't have initialized the HSM clock + * rate and it will be reported as 0. + * + * If we try to access a register of the controller in such a + * case, it will lead to a silent CPU stall. Let's make sure we + * prevent such a case. + */ + rate = clk_get_rate(vc4_hdmi->hsm_clock); + if (!rate) { + ret = -EINVAL; + goto err_disable_clk; + } + if (vc4_hdmi->variant->reset) vc4_hdmi->variant->reset(vc4_hdmi); @@ -3345,6 +3370,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev) #endif return 0; + +err_disable_clk: + clk_disable_unprepare(vc4_hdmi->hsm_clock); + return ret; } static void vc4_hdmi_put_ddc_device(void *ptr) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index da86565f04d4..dad953f66996 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -867,6 +867,7 @@ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 #define USB_DEVICE_ID_MADCATZ_RAT5 0x1705 #define USB_DEVICE_ID_MADCATZ_RAT9 0x1709 +#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -1142,6 +1143,7 @@ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 #define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6 +#define USB_DEVICE_ID_SONY_PS5_CONTROLLER_2 0x0df2 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 9dabd6323234..44763c0da444 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -985,7 +985,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); - u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; + static const u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; int led_nr = 0; int ret = 0; diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 664a624a363d..c9c968d4b36a 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -480,7 +480,7 @@ static int magicmouse_raw_event(struct hid_device *hdev, magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); - break; + return 0; default: return 0; } diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c index 40050eb85c0a..0b58763bfd30 100644 --- a/drivers/hid/hid-playstation.c +++ b/drivers/hid/hid-playstation.c @@ -46,6 +46,7 @@ struct ps_device { uint32_t fw_version; int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size); + void (*remove)(struct ps_device *dev); }; /* Calibration data for playstation motion sensors. */ @@ -107,6 +108,9 @@ struct ps_led_info { #define DS_STATUS_CHARGING GENMASK(7, 4) #define DS_STATUS_CHARGING_SHIFT 4 +/* Feature version from DualSense Firmware Info report. */ +#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff)) + /* * Status of a DualSense touch point contact. * Contact IDs, with highest bit set are 'inactive' @@ -125,6 +129,7 @@ struct ps_led_info { #define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3) #define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4) #define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1) +#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2) #define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4) #define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1) @@ -142,6 +147,9 @@ struct dualsense { struct input_dev *sensors; struct input_dev *touchpad; + /* Update version is used as a feature/capability version. */ + uint16_t update_version; + /* Calibration data for accelerometer and gyroscope. */ struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; @@ -152,6 +160,7 @@ struct dualsense { uint32_t sensor_timestamp_us; /* Compatible rumble state */ + bool use_vibration_v2; bool update_rumble; uint8_t motor_left; uint8_t motor_right; @@ -174,6 +183,7 @@ struct dualsense { struct led_classdev player_leds[5]; struct work_struct output_worker; + bool output_worker_initialized; void *output_report_dmabuf; uint8_t output_seq; /* Sequence number for output report. */ }; @@ -299,6 +309,7 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = { {0, 0}, }; +static inline void dualsense_schedule_work(struct dualsense *ds); static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue); /* @@ -789,6 +800,7 @@ err_free: return ret; } + static int dualsense_get_firmware_info(struct dualsense *ds) { uint8_t *buf; @@ -808,6 +820,15 @@ static int dualsense_get_firmware_info(struct dualsense *ds) ds->base.hw_version = get_unaligned_le32(&buf[24]); ds->base.fw_version = get_unaligned_le32(&buf[28]); + /* Update version is some kind of feature version. It is distinct from + * the firmware version as there can be many different variations of a + * controller over time with the same physical shell, but with different + * PCBs and other internal changes. The update version (internal name) is + * used as a means to detect what features are available and change behavior. + * Note: the version is different between DualSense and DualSense Edge. + */ + ds->update_version = get_unaligned_le16(&buf[44]); + err_free: kfree(buf); return ret; @@ -878,7 +899,7 @@ static int dualsense_player_led_set_brightness(struct led_classdev *led, enum le ds->update_player_leds = true; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } @@ -922,6 +943,16 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_ } } +static inline void dualsense_schedule_work(struct dualsense *ds) +{ + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + if (ds->output_worker_initialized) + schedule_work(&ds->output_worker); + spin_unlock_irqrestore(&ds->base.lock, flags); +} + /* * Helper function to send DualSense output reports. Applies a CRC at the end of a report * for Bluetooth reports. @@ -960,7 +991,10 @@ static void dualsense_output_worker(struct work_struct *work) if (ds->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT; - common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; + if (ds->use_vibration_v2) + common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2; + else + common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; common->motor_left = ds->motor_left; common->motor_right = ds->motor_right; ds->update_rumble = false; @@ -1082,7 +1116,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r spin_unlock_irqrestore(&ps_dev->lock, flags); /* Schedule updating of microphone state at hardware level. */ - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } ds->last_btn_mic_state = btn_mic_state; @@ -1197,10 +1231,22 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef ds->motor_right = effect->u.rumble.weak_magnitude / 256; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } +static void dualsense_remove(struct ps_device *ps_dev) +{ + struct dualsense *ds = container_of(ps_dev, struct dualsense, base); + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + ds->output_worker_initialized = false; + spin_unlock_irqrestore(&ds->base.lock, flags); + + cancel_work_sync(&ds->output_worker); +} + static int dualsense_reset_leds(struct dualsense *ds) { struct dualsense_output_report report; @@ -1237,7 +1283,7 @@ static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t gr ds->lightbar_blue = blue; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static void dualsense_set_player_leds(struct dualsense *ds) @@ -1260,7 +1306,7 @@ static void dualsense_set_player_leds(struct dualsense *ds) ds->update_player_leds = true; ds->player_leds_state = player_ids[player_id]; - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static struct ps_device *dualsense_create(struct hid_device *hdev) @@ -1299,7 +1345,9 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) ps_dev->battery_capacity = 100; /* initial value until parse_report. */ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; ps_dev->parse_report = dualsense_parse_report; + ps_dev->remove = dualsense_remove; INIT_WORK(&ds->output_worker, dualsense_output_worker); + ds->output_worker_initialized = true; hid_set_drvdata(hdev, ds); max_output_report_size = sizeof(struct dualsense_output_report_bt); @@ -1320,6 +1368,21 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) return ERR_PTR(ret); } + /* Original DualSense firmware simulated classic controller rumble through + * its new haptics hardware. It felt different from classic rumble users + * were used to. Since then new firmwares were introduced to change behavior + * and make this new 'v2' behavior default on PlayStation and other platforms. + * The original DualSense requires a new enough firmware as bundled with PS5 + * software released in 2021. DualSense edge supports it out of the box. + * Both devices also support the old mode, but it is not really used. + */ + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + /* Feature version 2.21 introduced new vibration method. */ + ds->use_vibration_v2 = ds->update_version >= DS_FEATURE_VERSION(2, 21); + } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { + ds->use_vibration_v2 = true; + } + ret = ps_devices_list_add(ps_dev); if (ret) return ERR_PTR(ret); @@ -1436,7 +1499,8 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) goto err_stop; } - if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER || + hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { dev = dualsense_create(hdev); if (IS_ERR(dev)) { hid_err(hdev, "Failed to create dualsense.\n"); @@ -1461,6 +1525,9 @@ static void ps_remove(struct hid_device *hdev) ps_devices_list_remove(dev); ps_device_release_player_id(dev); + if (dev->remove) + dev->remove(dev); + hid_hw_close(hdev); hid_hw_stop(hdev); } @@ -1468,6 +1535,8 @@ static void ps_remove(struct hid_device *hdev) static const struct hid_device_id ps_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, { } }; MODULE_DEVICE_TABLE(hid, ps_devices); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 70f602c64fd1..50e1c717fc0a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -620,6 +620,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) }, #endif #if IS_ENABLED(CONFIG_HID_SAMSUNG) { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index c7bf14c01960..b84e975977c4 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), .driver_data = SAITEK_RELEASE_MODE_MMO7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7), + .driver_data = SAITEK_RELEASE_MODE_MMO7 }, { } }; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 65856e401949..d3b39d0416fa 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2330,7 +2330,8 @@ static void amd_iommu_get_resv_regions(struct device *dev, type = IOMMU_RESV_RESERVED; region = iommu_alloc_resv_region(entry->address_start, - length, prot, type); + length, prot, type, + GFP_KERNEL); if (!region) { dev_err(dev, "Out of memory allocating dm-regions\n"); return; @@ -2340,14 +2341,14 @@ static void amd_iommu_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_RESERVED, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 4526575b999e..4f4a323be0d0 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -758,7 +758,7 @@ static void apple_dart_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(DOORBELL_ADDR, PAGE_SIZE, prot, - IOMMU_RESV_MSI); + IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ba47c73f5b8c..6d5df91c5c46 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2757,7 +2757,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 6c1114a4d6cc..30dab1418e3f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1534,7 +1534,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a8b36c3fddf1..48cdcd0a5cf3 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2410,6 +2410,7 @@ static int __init si_domain_init(int hw) if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); + si_domain = NULL; return -EFAULT; } @@ -3052,6 +3053,10 @@ free_iommu: disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } + if (si_domain) { + domain_exit(si_domain); + si_domain = NULL; + } return ret; } @@ -4534,7 +4539,7 @@ static void intel_iommu_get_resv_regions(struct device *device, struct device *i_dev; int i; - down_read(&dmar_global_lock); + rcu_read_lock(); for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, i_dev) { @@ -4552,14 +4557,15 @@ static void intel_iommu_get_resv_regions(struct device *device, IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT; resv = iommu_alloc_resv_region(rmrr->base_address, - length, prot, type); + length, prot, type, + GFP_ATOMIC); if (!resv) break; list_add_tail(&resv->list, head); } } - up_read(&dmar_global_lock); + rcu_read_unlock(); #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA if (dev_is_pci(device)) { @@ -4567,7 +4573,8 @@ static void intel_iommu_get_resv_regions(struct device *device, if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { reg = iommu_alloc_resv_region(0, 1UL << 24, prot, - IOMMU_RESV_DIRECT_RELAXABLE); + IOMMU_RESV_DIRECT_RELAXABLE, + GFP_KERNEL); if (reg) list_add_tail(®->list, head); } @@ -4576,7 +4583,7 @@ static void intel_iommu_get_resv_regions(struct device *device, reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!reg) return; list_add_tail(®->list, head); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 4893c2429ca5..65a3b3d886dc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -504,7 +504,7 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new, LIST_HEAD(stack); nr = iommu_alloc_resv_region(new->start, new->length, - new->prot, new->type); + new->prot, new->type, GFP_KERNEL); if (!nr) return -ENOMEM; @@ -2579,11 +2579,12 @@ EXPORT_SYMBOL(iommu_put_resv_regions); struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, - enum iommu_resv_type type) + enum iommu_resv_type type, + gfp_t gfp) { struct iommu_resv_region *region; - region = kzalloc(sizeof(*region), GFP_KERNEL); + region = kzalloc(sizeof(*region), gfp); if (!region) return NULL; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5a4e00e4bbbc..2ab2ecfe01f8 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -917,7 +917,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev, continue; region = iommu_alloc_resv_region(resv->iova_base, resv->size, - prot, IOMMU_RESV_RESERVED); + prot, IOMMU_RESV_RESERVED, + GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index b7c22802f57c..8b1b5c270e50 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -490,11 +490,13 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, fallthrough; case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: region = iommu_alloc_resv_region(start, size, 0, - IOMMU_RESV_RESERVED); + IOMMU_RESV_RESERVED, + GFP_KERNEL); break; case VIRTIO_IOMMU_RESV_MEM_T_MSI: region = iommu_alloc_resv_region(start, size, prot, - IOMMU_RESV_MSI); + IOMMU_RESV_MSI, + GFP_KERNEL); break; } if (!region) @@ -909,7 +911,8 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) */ if (!msi) { msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, + GFP_KERNEL); if (!msi) return; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 09c7ed2650ca..9c5ef818ca36 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b) { BUG_ON(b->hold_count); - if (!b->state) /* fast case */ + /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ + if (!smp_load_acquire(&b->state)) /* fast case */ return; wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); @@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) BUG_ON(test_bit(B_DIRTY, &b->state)); if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && - unlikely(test_bit(B_READING, &b->state))) + unlikely(test_bit_acquire(B_READING, &b->state))) continue; if (!b->hold_count) { @@ -1058,7 +1059,7 @@ found_buffer: * If the user called both dm_bufio_prefetch and dm_bufio_get on * the same buffer, it would deadlock if we waited. */ - if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) return NULL; b->hold_count++; @@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b) * invalid buffer. */ if ((b->read_error || b->write_error) && - !test_bit(B_READING, &b->state) && + !test_bit_acquire(B_READING, &b->state) && !test_bit(B_WRITING, &b->state) && !test_bit(B_DIRTY, &b->state)) { __unlink_buffer(b); @@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move); static void forget_buffer_locked(struct dm_buffer *b) { - if (likely(!b->hold_count) && likely(!b->state)) { + if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) { __unlink_buffer(b); __free_buffer_wake(b); } @@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) { if (!(gfp & __GFP_FS) || (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { - if (test_bit(B_READING, &b->state) || + if (test_bit_acquire(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) return false; diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index c05fc3436cef..06eb31af626f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -166,7 +166,7 @@ struct dm_cache_policy_type { struct dm_cache_policy_type *real; /* - * Policies may store a hint for each each cache block. + * Policies may store a hint for each cache block. * Currently the size of this hint must be 0 or 4 bytes but we * expect to relax this in future. */ diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 811b0a5379d0..2f1cc66d2641 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone) reason = "max discard sectors smaller than a region"; if (reason) { - DMWARN("Destination device (%pd) %s: Disabling discard passdown.", + DMWARN("Destination device (%pg) %s: Disabling discard passdown.", dest_dev, reason); clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 98976aaa9db9..6b3f867d0b70 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, hc = __get_name_cell(new); if (hc) { - DMWARN("Unable to change %s on mapped device %s to one that " - "already exists: %s", - change_uuid ? "uuid" : "name", - param->name, new); + DMERR("Unable to change %s on mapped device %s to one that " + "already exists: %s", + change_uuid ? "uuid" : "name", + param->name, new); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); @@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, */ hc = __get_name_cell(param->name); if (!hc) { - DMWARN("Unable to rename non-existent device, %s to %s%s", - param->name, change_uuid ? "uuid " : "", new); + DMERR("Unable to rename non-existent device, %s to %s%s", + param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-ENXIO); @@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, * Does this device already have a uuid? */ if (change_uuid && hc->uuid) { - DMWARN("Unable to change uuid of mapped device %s to %s " - "because uuid is already set to %s", - param->name, new, hc->uuid); + DMERR("Unable to change uuid of mapped device %s to %s " + "because uuid is already set to %s", + param->name, new, hc->uuid); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); @@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t static int check_name(const char *name) { if (strchr(name, '/')) { - DMWARN("invalid device name"); + DMERR("invalid device name"); return -EINVAL; } @@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src down_read(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { - DMWARN("device has been removed from the dev hash table."); + DMERR("device has been removed from the dev hash table."); goto out; } @@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si if (new_data < param->data || invalid_str(new_data, (void *) param + param_size) || !*new_data || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { - DMWARN("Invalid new mapped device name or uuid string supplied."); + DMERR("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } @@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa if (geostr < param->data || invalid_str(geostr, (void *) param + param_size)) { - DMWARN("Invalid geometry supplied."); + DMERR("Invalid geometry supplied."); goto out; } @@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa indata + 1, indata + 2, indata + 3, &dummy); if (x != 4) { - DMWARN("Unable to interpret geometry settings."); + DMERR("Unable to interpret geometry settings."); goto out; } if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255 || indata[3] > ULONG_MAX) { - DMWARN("Geometry exceeds range limits."); + DMERR("Geometry exceeds range limits."); goto out; } @@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table, char *target_params; if (!param->target_count) { - DMWARN("populate_table: no targets specified"); + DMERR("populate_table: no targets specified"); return -EINVAL; } @@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table, r = next_target(spec, next, end, &spec, &target_params); if (r) { - DMWARN("unable to find target"); + DMERR("unable to find target"); return r; } @@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table, (sector_t) spec->length, target_params); if (r) { - DMWARN("error adding target to table"); + DMERR("error adding target to table"); return r; } @@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t)) && !dm_table_get_wildcard_target(t)) { - DMWARN("can't replace immutable target type %s", - immutable_target_type->name); + DMERR("can't replace immutable target type %s", + immutable_target_type->name); r = -EINVAL; goto err_unlock_md_type; } @@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { - DMWARN("unable to set up device queue for new table."); + DMERR("unable to set up device queue for new table."); goto err_unlock_md_type; } } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { - DMWARN("can't change device type (old=%u vs new=%u) after initial table load.", - dm_get_md_type(md), dm_table_get_type(t)); + DMERR("can't change device type (old=%u vs new=%u) after initial table load.", + dm_get_md_type(md), dm_table_get_type(t)); r = -EINVAL; goto err_unlock_md_type; } @@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { - DMWARN("device has been removed from the dev hash table."); + DMERR("device has been removed from the dev hash table."); up_write(&_hash_lock); r = -ENXIO; goto err_destroy_table; @@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para if (tmsg < (struct dm_target_msg *) param->data || invalid_str(tmsg->message, (void *) param + param_size)) { - DMWARN("Invalid target message parameters."); + DMERR("Invalid target message parameters."); r = -EINVAL; goto out; } r = dm_split_args(&argc, &argv, tmsg->message); if (r) { - DMWARN("Failed to split target message parameters"); + DMERR("Failed to split target message parameters"); goto out; } if (!argc) { - DMWARN("Empty message received."); + DMERR("Empty message received."); r = -EINVAL; goto out_argv; } @@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para ti = dm_table_find_target(table, tmsg->sector); if (!ti) { - DMWARN("Target message sector outside device."); + DMERR("Target message sector outside device."); r = -EINVAL; } else if (ti->type->message) r = ti->type->message(ti, argc, argv, result, maxlen); else { - DMWARN("Target type does not support messages"); + DMERR("Target type does not support messages"); r = -EINVAL; } @@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) if ((DM_VERSION_MAJOR != version[0]) || (DM_VERSION_MINOR < version[1])) { - DMWARN("ioctl interface mismatch: " - "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", - DM_VERSION_MAJOR, DM_VERSION_MINOR, - DM_VERSION_PATCHLEVEL, - version[0], version[1], version[2], cmd); + DMERR("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); r = -EINVAL; } @@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param) if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { - DMWARN("name not supplied when creating device"); + DMERR("name not supplied when creating device"); return -EINVAL; } } else if (*param->uuid && *param->name) { - DMWARN("only supply one of name or uuid, cmd(%u)", cmd); + DMERR("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } @@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us fn = lookup_ioctl(cmd, &ioctl_flags); if (!fn) { - DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + DMERR("dm_ctl_ioctl: unknown command 0x%x", command); return -ENOTTY; } @@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, (sector_t) spec_array[i]->length, target_params_array[i]); if (r) { - DMWARN("error adding target to table"); + DMERR("error adding target to table"); goto err_destroy_table; } } @@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { - DMWARN("unable to set up device queue for new table."); + DMERR("unable to set up device queue for new table."); goto err_destroy_table; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c640be453313..54263679a7b1 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) * of the "sync" directive. * * With reshaping capability added, we must ensure that - * that the "sync" directive is disallowed during the reshape. + * the "sync" directive is disallowed during the reshape. */ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) continue; @@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) /* * Adjust data_offset and new_data_offset on all disk members of @rs - * for out of place reshaping if requested by contructor + * for out of place reshaping if requested by constructor * * We need free space at the beginning of each raid disk for forward * and at the end for backward reshapes which userspace has to provide diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3001b10a3fbf..a41209a43506 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) dm_requeue_original_request(tio, true); break; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } @@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio) dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8326f9fe0e91..f105a71915ab 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, return 2; /* this wasn't a stats message */ if (r == -EINVAL) - DMWARN("Invalid parameters for message %s", argv[0]); + DMCRIT("Invalid parameters for message %s", argv[0]); return r; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d8034ff0cb24..078da18bb86d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, return 0; if ((start >= dev_size) || (start + len > dev_size)) { - DMWARN("%s: %pg too small for target: " - "start=%llu, len=%llu, dev_size=%llu", - dm_device_name(ti->table->md), bdev, - (unsigned long long)start, - (unsigned long long)len, - (unsigned long long)dev_size); + DMERR("%s: %pg too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdev, + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); return 1; } @@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, unsigned int zone_sectors = bdev_zone_sectors(bdev); if (start & (zone_sectors - 1)) { - DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)start, - zone_sectors, bdev); + DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)start, + zone_sectors, bdev); return 1; } @@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * the sector range. */ if (len & (zone_sectors - 1)) { - DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)len, - zone_sectors, bdev); + DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)len, + zone_sectors, bdev); return 1; } } @@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, return 0; if (start & (logical_block_size_sectors - 1)) { - DMWARN("%s: start=%llu not aligned to h/w " - "logical block size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)start, - limits->logical_block_size, bdev); + DMERR("%s: start=%llu not aligned to h/w " + "logical block size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)start, + limits->logical_block_size, bdev); return 1; } if (len & (logical_block_size_sectors - 1)) { - DMWARN("%s: len=%llu not aligned to h/w " - "logical block size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)len, - limits->logical_block_size, bdev); + DMERR("%s: len=%llu not aligned to h/w " + "logical block size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)len, + limits->logical_block_size, bdev); return 1; } @@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) } } if (!found) { - DMWARN("%s: device %s not in table devices list", - dm_device_name(ti->table->md), d->name); + DMERR("%s: device %s not in table devices list", + dm_device_name(ti->table->md), d->name); return; } if (refcount_dec_and_test(&dd->count)) { @@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t, } if (remaining) { - DMWARN("%s: table line %u (start sect %llu len %llu) " - "not aligned to h/w logical block size %u", - dm_device_name(t->md), i, - (unsigned long long) ti->begin, - (unsigned long long) ti->len, - limits->logical_block_size); + DMERR("%s: table line %u (start sect %llu len %llu) " + "not aligned to h/w logical block size %u", + dm_device_name(t->md), i, + (unsigned long long) ti->begin, + (unsigned long long) ti->len, + limits->logical_block_size); return -EINVAL; } @@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * struct dm_md_mempools *pools; if (unlikely(type == DM_TYPE_NONE)) { - DMWARN("no table type is set, can't allocate mempools"); + DMERR("no table type is set, can't allocate mempools"); return -EINVAL; } @@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk) * Get a disk whose integrity profile reflects the table's profile. * Returns NULL if integrity support was inconsistent or unavailable. */ -static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) +static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; @@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t) * profile the new profile should not conflict. */ if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { - DMWARN("%s: conflict with existing integrity profile: " - "%s profile mismatch", - dm_device_name(t->md), - template_disk->disk_name); + DMERR("%s: conflict with existing integrity profile: " + "%s profile mismatch", + dm_device_name(t->md), + template_disk->disk_name); return 1; } @@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) if (t->md->queue && !blk_crypto_has_capabilities(profile, t->md->queue->crypto_profile)) { - DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); + DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); dm_destroy_crypto_profile(profile); return -EINVAL; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 8a00cc42e498..ccf5b852fbf7 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) /* WQ_UNBOUND greatly improves performance when running on ramdisk */ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND; - if (v->use_tasklet) { - /* - * Allow verify_wq to preempt softirq since verification in - * tasklet will fall-back to using it for error handling - * (or if the bufio cache doesn't have required hashes). - */ - wq_flags |= WQ_HIGHPRI; - } + /* + * Using WQ_HIGHPRI improves throughput and completion latency by + * reducing wait times when reading from a dm-verity device. + * + * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI + * allows verify_wq to preempt softirq since verification in tasklet + * will fall-back to using it for error handling (or if the bufio cache + * doesn't have required hashes). + */ + wq_flags |= WQ_HIGHPRI; v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus()); if (!v->verify_wq) { ti->error = "Cannot allocate workqueue"; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 60549b65c799..95a1ee3d314e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { - DMWARN("Start sector is beyond the geometry limits."); + DMERR("Start sector is beyond the geometry limits."); return -EINVAL; } @@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio) /* The target will handle the io */ return; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } @@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone) dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } } @@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor) md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { - DMWARN("unable to allocate device, out of memory."); + DMERR("unable to allocate device, out of memory."); return NULL; } @@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor) md->disk->minors = 1; md->disk->flags |= GENHD_FL_NO_PART; md->disk->fops = &dm_blk_dops; - md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index 5669c92c93f7..c5c3b4e92f28 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) struct qca8k_mgmt_eth_data *mgmt_eth_data; struct qca8k_priv *priv = ds->priv; struct qca_mgmt_ethhdr *mgmt_ethhdr; + u32 command; u8 len, cmd; + int i; mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); mgmt_eth_data = &priv->mgmt_eth_data; - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); + command = get_unaligned_le32(&mgmt_ethhdr->command); + cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command); + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command); /* Make sure the seq match the requested packet */ - if (mgmt_ethhdr->seq == mgmt_eth_data->seq) + if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq) mgmt_eth_data->ack = true; if (cmd == MDIO_READ) { - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; + u32 *val = mgmt_eth_data->data; + + *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data); /* Get the rest of the 12 byte of data. * The read/write function will extract the requested data. */ - if (len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(mgmt_eth_data->data + 1, skb->data, - QCA_HDR_MGMT_DATA2_LEN); + if (len > QCA_HDR_MGMT_DATA1_LEN) { + __le32 *data2 = (__le32 *)skb->data; + int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, + len - QCA_HDR_MGMT_DATA1_LEN); + + val++; + + for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { + *val = get_unaligned_le32(data2); + val++; + data2++; + } + } } complete(&mgmt_eth_data->rw_done); @@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * struct qca_mgmt_ethhdr *mgmt_ethhdr; unsigned int real_len; struct sk_buff *skb; - u32 *data2; + __le32 *data2; + u32 command; u16 hdr; + int i; skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); if (!skb) @@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, + command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); + command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); + command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); + command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, QCA_HDR_MGMT_CHECK_CODE_VAL); + put_unaligned_le32(command, &mgmt_ethhdr->command); + if (cmd == MDIO_WRITE) - mgmt_ethhdr->mdio_data = *val; + put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data); mgmt_ethhdr->hdr = htons(hdr); data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); - if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); + if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) { + int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, + len - QCA_HDR_MGMT_DATA1_LEN); + + val++; + + for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { + put_unaligned_le32(*val, data2); + data2++; + val++; + } + } return skb; } @@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) { struct qca_mgmt_ethhdr *mgmt_ethhdr; + u32 seq; + seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); + put_unaligned_le32(seq, &mgmt_ethhdr->seq); } static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) @@ -1487,9 +1518,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk struct qca8k_priv *priv = ds->priv; const struct qca8k_mib_desc *mib; struct mib_ethhdr *mib_ethhdr; - int i, mib_len, offset = 0; - u64 *data; + __le32 *data2; u8 port; + int i; mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); mib_eth_data = &priv->mib_eth_data; @@ -1501,28 +1532,24 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk if (port != mib_eth_data->req_port) goto exit; - data = mib_eth_data->data; + data2 = (__le32 *)skb->data; for (i = 0; i < priv->info->mib_count; i++) { mib = &ar8327_mib[i]; /* First 3 mib are present in the skb head */ if (i < 3) { - data[i] = mib_ethhdr->data[i]; + mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i); continue; } - mib_len = sizeof(uint32_t); - /* Some mib are 64 bit wide */ if (mib->size == 2) - mib_len = sizeof(uint64_t); - - /* Copy the mib value from packet to the */ - memcpy(data + i, skb->data + offset, mib_len); + mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2); + else + mib_eth_data->data[i] = get_unaligned_le32(data2); - /* Set the offset for the next mib */ - offset += mib_len; + data2 += mib->size; } exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index a36803e79e92..8a6f788f6294 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) { + bool rc = false; u32 datalen; u16 index; u8 *buf; @@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) { NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); - goto err; + goto done; } if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) { NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); - goto err; + goto done; } - return true; + rc = true; -err: +done: kfree(buf); - return false; + return rc; } static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 00fafc0f8512..430eccea8e5e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) hdev->cls_dev.release = hnae_release; (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); ret = device_register(&hdev->cls_dev); - if (ret) + if (ret) { + put_device(&hdev->cls_dev); return ret; + } __module_get(THIS_MODULE); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7e75706f76db..87f36d1ce800 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev, err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) goto rx_unwind; - err = i40e_alloc_rx_bi(&rx_rings[i]); - if (err) - goto rx_unwind; /* now allocate the Rx buffers to make sure the OS * has enough memory, any failure here means abort diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2c07fa8ecfc8..b5dcd15ced36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - kfree(ring->rx_bi); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - ret = i40e_alloc_rx_bi_zc(ring); - if (ret) - return ret; ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); /* For AF_XDP ZC, we disallow packets to span on @@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->queue_index); } else { - ret = i40e_alloc_rx_bi(ring); - if (ret) - return ret; ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, i40e_reset_and_rebuild(pf, true, true); } + if (!i40e_enabled_xdp_vsi(vsi) && prog) { + if (i40e_realloc_rx_bi_zc(vsi, true)) + return -ENOMEM; + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { + if (i40e_realloc_rx_bi_zc(vsi, false)) + return -ENOMEM; + } + for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 69e67eb6aea7..b97c95f89fa0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1457,14 +1457,6 @@ err: return -ENOMEM; } -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; - - rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi ? 0 : -ENOMEM; -} - static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); @@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; + rx_ring->rx_bi = + kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); + if (!rx_ring->rx_bi) + return -ENOMEM; + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 41f86e9535a0..768290dc6f48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 6d4009e0cbd6..cd7b52fb6b46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -10,14 +10,6 @@ #include "i40e_txrx_common.h" #include "i40e_xsk.h" -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - - rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi_zc ? 0 : -ENOMEM; -} - void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi_zc, 0, @@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) } /** + * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer + * @rx_ring: Current rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) +{ + size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : + sizeof(*rx_ring->rx_bi); + void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + + if (!sw_ring) + return -ENOMEM; + + if (pool_present) { + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + rx_ring->rx_bi_zc = sw_ring; + } else { + kfree(rx_ring->rx_bi_zc); + rx_ring->rx_bi_zc = NULL; + rx_ring->rx_bi = sw_ring; + } + return 0; +} + +/** + * i40e_realloc_rx_bi_zc - reallocate rx SW rings + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) +{ + struct i40e_ring *rx_ring; + unsigned long q; + + for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { + rx_ring = vsi->rx_rings[q]; + if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) + return -ENOMEM; + } + return 0; +} + +/** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid * @vsi: Current VSI @@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, if (err) return err; + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); + if (err) + return err; + err = i40e_queue_pair_enable(vsi, qid); if (err) return err; @@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); if (if_running) { + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); + if (err) + return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index bb962987f300..821df248f8be 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc); void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 9809f551fc2e..9ec5f38d38a8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -815,6 +815,7 @@ free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, txsc->hw_flow_id, false); fail: + kfree(txsc); return ERR_PTR(ret); } @@ -870,6 +871,7 @@ free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, rxsc->hw_flow_id, false); fail: + kfree(rxsc); return ERR_PTR(ret); } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4fba7cb0144b..7cd381530aa4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4060,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev) eth->irq[i] = platform_get_irq(pdev, i); if (eth->irq[i] < 0) { dev_err(&pdev->dev, "no IRQ%d resource found\n", i); - return -ENXIO; + err = -ENXIO; + goto err_wed_exit; } } for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { eth->clks[i] = devm_clk_get(eth->dev, mtk_clks_source_name[i]); if (IS_ERR(eth->clks[i])) { - if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_wed_exit; + } if (eth->soc->required_clks & BIT(i)) { dev_err(&pdev->dev, "clock %s not found\n", mtk_clks_source_name[i]); - return -EINVAL; + err = -EINVAL; + goto err_wed_exit; } eth->clks[i] = NULL; } @@ -4083,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev) err = mtk_hw_init(eth); if (err) - return err; + goto err_wed_exit; eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); @@ -4179,6 +4183,8 @@ err_free_dev: mtk_free_dev(eth); err_deinit_hw: mtk_hw_deinit(eth); +err_wed_exit: + mtk_wed_exit(); return err; } @@ -4198,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev) phylink_disconnect_phy(mac->phylink); } + mtk_wed_exit(); mtk_hw_deinit(eth); netif_napi_del(ð->tx_napi); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index ae00e572390d..2d8ca99f2467 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, return 0; } -static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) -{ - return !(entry->ib1 & MTK_FOE_IB1_STATIC) && - FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; -} - static bool mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, struct mtk_foe_entry *data) diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 099b6e0df619..65e01bf4b4d2 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, pdev = of_find_device_by_node(np); if (!pdev) - return; + goto err_of_node_put; get_device(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) - return; + goto err_put_device; regs = syscon_regmap_lookup_by_phandle(np, NULL); if (IS_ERR(regs)) - return; + goto err_put_device; rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); @@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, hw_list[index] = hw; + mutex_unlock(&hw_lock); + + return; + unlock: mutex_unlock(&hw_lock); +err_put_device: + put_device(&pdev->dev); +err_of_node_put: + of_node_put(np); } void mtk_wed_exit(void) @@ -1146,6 +1154,7 @@ void mtk_wed_exit(void) hw_list[i] = NULL; debugfs_remove(hw->debugfs_dir); put_device(hw->dev); + of_node_put(hw->node); kfree(hw); } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 5d58fd99be3c..19d4848df17d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2817,11 +2817,15 @@ err_out: * than the full array, but leave the qcq shells in place */ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { - lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->txqcqs[i]); + if (lif->txqcqs && lif->txqcqs[i]) { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->txqcqs[i]); + } - lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->rxqcqs[i]); + if (lif->rxqcqs && lif->rxqcqs[i]) { + lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->rxqcqs[i]); + } } if (err) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d1e1aa19a68e..7022fb2005a2 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) bool was_enabled = efx->port_enabled; int rc; +#ifdef CONFIG_SFC_SRIOV + /* If this function is a VF and we have access to the parent PF, + * then use the PF control path to attempt to change the VF MAC address. + */ + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac[ETH_ALEN]; + + /* net_dev->dev_addr can be zeroed by efx_net_stop in + * efx_ef10_sriov_set_vf_mac, so pass in a copy. + */ + ether_addr_copy(mac, efx->net_dev->dev_addr); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); + if (!rc) + return 0; + + netif_dbg(efx, drv, efx->net_dev, + "Updating VF mac via PF failed (%d), setting directly\n", + rc); + } +#endif + efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); @@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); efx_device_attach_if_not_resetting(efx); -#ifdef CONFIG_SFC_SRIOV - if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - - if (rc == -EPERM) { - struct efx_nic *efx_pf; - - /* Switch to PF and change MAC address on vport */ - efx_pf = pci_get_drvdata(pci_dev_pf); - - rc = efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr); - } else if (!rc) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); - struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; - unsigned int i; - - /* MAC address successfully changed by VF (with MAC - * spoofing) so update the parent PF if possible. - */ - for (i = 0; i < efx_pf->vf_count; ++i) { - struct ef10_vf *vf = nic_data->vf + i; - - if (vf->efx == efx) { - ether_addr_copy(vf->mac, - efx->net_dev->dev_addr); - return 0; - } - } - } - } else -#endif if (rc == -EPERM) { netif_err(efx, drv, efx->net_dev, "Cannot change MAC address; use sfboot to enable" diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index be72e71da027..5f201a547e5b 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -162,9 +162,9 @@ struct efx_filter_spec { u32 priority:2; u32 flags:6; u32 dmaq_id:12; - u32 vport_id; u32 rss_context; - __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + u32 vport_id; + __be16 outer_vid; __be16 inner_vid; u8 loc_mac[ETH_ALEN]; u8 rem_mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 4826e6a7e4ce..9220afeddee8 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left, (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) return false; - return memcmp(&left->outer_vid, &right->outer_vid, + return memcmp(&left->vport_id, &right->vport_id, sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; + offsetof(struct efx_filter_spec, vport_id)) == 0; } u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) { - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, + BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); + return jhash2((const u32 *)&spec->vport_id, (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, + offsetof(struct efx_filter_spec, vport_id)) / 4, 0); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 65c96773c6d2..8273e6a175c8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1214,6 +1214,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) if (priv->plat->tx_queues_to_use > 1) priv->phylink_config.mac_capabilities &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); + priv->phylink_config.mac_managed_pm = true; phylink = phylink_create(&priv->phylink_config, fwnode, mode, &stmmac_phylink_mac_ops); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 91f10f746dff..1c16548415cd 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp) void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; - const char *bursts; + const char *bursts = "64"; u32 regtmp, rxcfg; /* If auto-negotiation timer is running, kill it. */ diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 11f767a20444..eea777ec2541 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -20,6 +20,7 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/ucs2_string.h> +#include <linux/string.h> #include "hyperv_net.h" #include "netvsc_trace.h" @@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev, if (resp->msg_len <= sizeof(struct rndis_message) + RNDIS_EXT_LEN) { memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id)); - memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id), + unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id), data + RNDIS_HEADER_SIZE + sizeof(*req_id), - resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id)); + resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id), + "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request"); if (request->request_msg.ndis_msg_type == RNDIS_MSG_QUERY && request->request_msg.msg. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 8f8f73099de8..c5cfe8555199 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, } spin_unlock(&port->bc_queue.lock); - schedule_work(&port->bc_work); + queue_work(system_unbound_wq, &port->bc_work); if (err) goto free_nskb; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 8549e0e356c9..b60db8b6f477 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -254,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_EEE_ERROR_CHANGE_INT_EN); if (!dp83822->fx_enabled) - misr_status |= DP83822_MDI_XOVER_INT_EN | - DP83822_ANEG_ERR_INT_EN | + misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; err = phy_write(phydev, MII_DP83822_MISR2, misr_status); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 6939563d3b7c..417527f8bbf5 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -853,6 +853,14 @@ static int dp83867_config_init(struct phy_device *phydev) else val &= ~DP83867_SGMII_TYPE; phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); + + /* This is a SW workaround for link instability if RX_CTRL is + * not strapped to mode 3 or 4 in HW. This is required for SGMII + * in addition to clearing bit 7, handled above. + */ + if (dp83867->rxctrl_strap_quirk) + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(8)); } val = phy_read(phydev, DP83867_CFG3); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 75464df191ef..6547b6cc6cbe 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1661,6 +1661,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, if (phy_interrupt_is_valid(phy)) phy_request_interrupt(phy); + if (pl->config->mac_managed_pm) + phy->mac_managed_pm = true; + return 0; } diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c index ff09a8cedf93..2397a903d8f5 100644 --- a/drivers/net/wwan/wwan_hwsim.c +++ b/drivers/net/wwan/wwan_hwsim.c @@ -311,7 +311,7 @@ err_unreg_dev: return ERR_PTR(err); err_free_dev: - kfree(dev); + put_device(&dev->dev); return ERR_PTR(err); } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 5fc5ea196b40..ff8b083dc5c6 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1039,6 +1039,8 @@ static void apple_nvme_reset_work(struct work_struct *work) dma_max_mapping_size(anv->dev) >> 9); anv->ctrl.max_segments = NVME_MAX_SEGS; + dma_set_max_seg_size(anv->dev, 0xffffffff); + /* * Enable NVMMU and linear submission queues. * While we could keep those disabled and pretend this is slightly diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 059737c1a2c1..dc4220600585 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3262,8 +3262,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl) return ret; if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { + /* + * Do not return errors unless we are in a controller reset, + * the controller works perfectly fine without hwmon. + */ ret = nvme_hwmon_init(ctrl); - if (ret < 0) + if (ret == -EINTR) return ret; } @@ -4846,7 +4850,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return 0; out_cleanup_admin_q: - blk_mq_destroy_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->admin_q); out_free_tagset: blk_mq_free_tag_set(ctrl->admin_tagset); return ret; diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 0a586d712920..9e6e56c20ec9 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -12,7 +12,7 @@ struct nvme_hwmon_data { struct nvme_ctrl *ctrl; - struct nvme_smart_log log; + struct nvme_smart_log *log; struct mutex read_lock; }; @@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, - NVME_CSI_NVM, &data->log, sizeof(data->log), 0); + NVME_CSI_NVM, data->log, sizeof(*data->log), 0); } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); - struct nvme_smart_log *log = &data->log; + struct nvme_smart_log *log = data->log; int temp; int err; @@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log.temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1])) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; @@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, break; case hwmon_temp_input: case hwmon_temp_label: - if (!channel || data->log.temp_sensor[channel - 1]) + if (!channel || data->log->temp_sensor[channel - 1]) return 0444; break; default: @@ -230,7 +230,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - return 0; + return -ENOMEM; + + data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); + if (!data->log) { + err = -ENOMEM; + goto err_free_data; + } data->ctrl = ctrl; mutex_init(&data->read_lock); @@ -238,8 +244,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) err = nvme_hwmon_get_smart_log(data); if (err) { dev_warn(dev, "Failed to read smart log (error %d)\n", err); - kfree(data); - return err; + goto err_free_log; } hwmon = hwmon_device_register_with_info(dev, "nvme", @@ -247,11 +252,17 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); - kfree(data); - return PTR_ERR(hwmon); + err = PTR_ERR(hwmon); + goto err_free_log; } ctrl->hwmon_device = hwmon; return 0; + +err_free_log: + kfree(data->log); +err_free_data: + kfree(data); + return err; } void nvme_hwmon_exit(struct nvme_ctrl *ctrl) @@ -262,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl) hwmon_device_unregister(ctrl->hwmon_device); ctrl->hwmon_device = NULL; + kfree(data->log); kfree(data); } } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bcbef6bc5672..31e577b01257 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3511,6 +3511,16 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index e34a2896fedb..9443ee1d4ae3 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1290,12 +1290,8 @@ static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, const char *page, size_t cnt) { - struct nvmet_port *port = to_nvmet_port(item); u16 qid_max; - if (nvmet_is_port_enabled(port, __func__)) - return -EACCES; - if (sscanf(page, "%hu\n", &qid_max) != 1) return -EINVAL; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 14677145bbba..aecb5853f8da 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1176,7 +1176,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) * reset the keep alive timer when the controller is enabled. */ if (ctrl->kato) - mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b49c39569386..b535f1fd3010 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4812,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) rc = lpfc_vmid_res_alloc(phba, vport); if (rc) - goto out; + goto out_put_shost; /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); @@ -4830,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) - goto out_put_shost; + goto out_free_vmid; spin_lock_irq(&phba->port_list_lock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->port_list_lock); return vport; -out_put_shost: +out_free_vmid: kfree(vport->vmid); bitmap_free(vport->vmid_priority_range); +out_put_shost: scsi_host_put(shost); out: return NULL; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index c95177ca6ed2..cac7c902cf70 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -828,6 +828,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); + switch (sdev->sdev_state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + mutex_unlock(&sdev->state_mutex); + return -EINVAL; + } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 3fe8a7edc252..c777a612d932 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -38,6 +38,9 @@ #include "watchdog_core.h" /* For watchdog_dev_register/... */ +#define CREATE_TRACE_POINTS +#include <trace/events/watchdog.h> + static DEFINE_IDA(watchdog_ida); static int stop_on_reboot = -1; @@ -163,6 +166,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb, int ret; ret = wdd->ops->stop(wdd); + trace_watchdog_stop(wdd, ret); if (ret) return NOTIFY_BAD; } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 744b2ab75288..55574ed42504 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -47,6 +47,8 @@ #include "watchdog_core.h" #include "watchdog_pretimeout.h" +#include <trace/events/watchdog.h> + /* the dev_t structure to store the dynamically allocated watchdog devices */ static dev_t watchdog_devt; /* Reference to watchdog device behind /dev/watchdog */ @@ -157,10 +159,13 @@ static int __watchdog_ping(struct watchdog_device *wdd) wd_data->last_hw_keepalive = now; - if (wdd->ops->ping) + if (wdd->ops->ping) { err = wdd->ops->ping(wdd); /* ping the watchdog */ - else + trace_watchdog_ping(wdd, err); + } else { err = wdd->ops->start(wdd); /* restart watchdog */ + trace_watchdog_start(wdd, err); + } if (err == 0) watchdog_hrtimer_pretimeout_start(wdd); @@ -259,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd) } } else { err = wdd->ops->start(wdd); + trace_watchdog_start(wdd, err); if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; @@ -297,6 +303,7 @@ static int watchdog_stop(struct watchdog_device *wdd) if (wdd->ops->stop) { clear_bit(WDOG_HW_RUNNING, &wdd->status); err = wdd->ops->stop(wdd); + trace_watchdog_stop(wdd, err); } else { set_bit(WDOG_HW_RUNNING, &wdd->status); } @@ -369,6 +376,7 @@ static int watchdog_set_timeout(struct watchdog_device *wdd, if (wdd->ops->set_timeout) { err = wdd->ops->set_timeout(wdd, timeout); + trace_watchdog_set_timeout(wdd, timeout, err); } else { wdd->timeout = timeout; /* Disable pretimeout if it doesn't fit the new timeout */ diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 860f37c93af4..daa525df7bdc 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ); static inline dma_addr_t grant_to_dma(grant_ref_t grant) { - return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT); } static inline grant_ref_t dma_to_grant(dma_addr_t dma) { - return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT); } static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) @@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = XEN_PFN_UP(size); unsigned long pfn; grant_ref_t grant; void *ret; @@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size, if (unlikely(data->broken)) return NULL; - ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp); if (!ret) return NULL; pfn = virt_to_pfn(ret); if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { - free_pages_exact(ret, n_pages * PAGE_SIZE); + free_pages_exact(ret, n_pages * XEN_PAGE_SIZE); return NULL; } @@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = XEN_PFN_UP(size); grant_ref_t grant; data = find_xen_grant_dma_data(dev); @@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, gnttab_free_grant_reference_seq(grant, n_pages); - free_pages_exact(vaddr, n_pages * PAGE_SIZE); + free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE); } static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, @@ -168,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(offset + size); + unsigned long dma_offset = xen_offset_in_page(offset), + pfn_offset = XEN_PFN_DOWN(offset); + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size); grant_ref_t grant; dma_addr_t dma_handle; @@ -187,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, for (i = 0; i < n_pages; i++) { gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, - xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset), + dir == DMA_TO_DEVICE); } - dma_handle = grant_to_dma(grant) + offset; + dma_handle = grant_to_dma(grant) + dma_offset; return dma_handle; } @@ -200,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned long offset = dma_handle & (PAGE_SIZE - 1); - unsigned int i, n_pages = PFN_UP(offset + size); + unsigned long dma_offset = xen_offset_in_page(dma_handle); + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size); grant_ref_t grant; if (WARN_ON(dir == DMA_NONE)) diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index dce3a16996b9..4ec18ceb2f21 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -138,6 +138,7 @@ struct share_check { u64 root_objectid; u64 inum; int share_count; + bool have_delayed_delete_refs; }; static inline int extent_is_shared(struct share_check *sc) @@ -820,16 +821,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, struct share_check *sc) { struct btrfs_delayed_ref_node *node; - struct btrfs_delayed_extent_op *extent_op = head->extent_op; struct btrfs_key key; - struct btrfs_key tmp_op_key; struct rb_node *n; int count; int ret = 0; - if (extent_op && extent_op->update_key) - btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); - spin_lock(&head->lock); for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { node = rb_entry(n, struct btrfs_delayed_ref_node, @@ -855,10 +851,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, case BTRFS_TREE_BLOCK_REF_KEY: { /* NORMAL INDIRECT METADATA backref */ struct btrfs_delayed_tree_ref *ref; + struct btrfs_key *key_ptr = NULL; + + if (head->extent_op && head->extent_op->update_key) { + btrfs_disk_key_to_cpu(&key, &head->extent_op->key); + key_ptr = &key; + } ref = btrfs_delayed_node_to_tree_ref(node); ret = add_indirect_ref(fs_info, preftrees, ref->root, - &tmp_op_key, ref->level + 1, + key_ptr, ref->level + 1, node->bytenr, count, sc, GFP_ATOMIC); break; @@ -884,13 +886,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, key.offset = ref->offset; /* - * Found a inum that doesn't match our known inum, we - * know it's shared. + * If we have a share check context and a reference for + * another inode, we can't exit immediately. This is + * because even if this is a BTRFS_ADD_DELAYED_REF + * reference we may find next a BTRFS_DROP_DELAYED_REF + * which cancels out this ADD reference. + * + * If this is a DROP reference and there was no previous + * ADD reference, then we need to signal that when we + * process references from the extent tree (through + * add_inline_refs() and add_keyed_refs()), we should + * not exit early if we find a reference for another + * inode, because one of the delayed DROP references + * may cancel that reference in the extent tree. */ - if (sc && sc->inum && ref->objectid != sc->inum) { - ret = BACKREF_FOUND_SHARED; - goto out; - } + if (sc && count < 0) + sc->have_delayed_delete_refs = true; ret = add_indirect_ref(fs_info, preftrees, ref->root, &key, 0, node->bytenr, count, sc, @@ -920,7 +931,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, } if (!ret) ret = extent_is_shared(sc); -out: + spin_unlock(&head->lock); return ret; } @@ -1023,7 +1034,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1033,6 +1045,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, ret = add_indirect_ref(fs_info, preftrees, root, &key, 0, bytenr, count, sc, GFP_NOFS); + break; } default: @@ -1122,7 +1135,8 @@ static int add_keyed_refs(struct btrfs_root *extent_root, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1522,6 +1536,9 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache { struct btrfs_backref_shared_cache_entry *entry; + if (!cache->use_cache) + return false; + if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) return false; @@ -1557,6 +1574,19 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache return false; *is_shared = entry->is_shared; + /* + * If the node at this level is shared, than all nodes below are also + * shared. Currently some of the nodes below may be marked as not shared + * because we have just switched from one leaf to another, and switched + * also other nodes above the leaf and below the current level, so mark + * them as shared. + */ + if (*is_shared) { + for (int i = 0; i < level; i++) { + cache->entries[i].is_shared = true; + cache->entries[i].gen = entry->gen; + } + } return true; } @@ -1573,6 +1603,9 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache, struct btrfs_backref_shared_cache_entry *entry; u64 gen; + if (!cache->use_cache) + return; + if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) return; @@ -1648,6 +1681,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, .root_objectid = root->root_key.objectid, .inum = inum, .share_count = 0, + .have_delayed_delete_refs = false, }; int level; @@ -1669,6 +1703,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, /* -1 means we are in the bytenr of the data extent. */ level = -1; ULIST_ITER_INIT(&uiter); + cache->use_cache = true; while (1) { bool is_shared; bool cached; @@ -1698,6 +1733,24 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, extent_gen > btrfs_root_last_snapshot(&root->root_item)) break; + /* + * If our data extent was not directly shared (without multiple + * reference items), than it might have a single reference item + * with a count > 1 for the same offset, which means there are 2 + * (or more) file extent items that point to the data extent - + * this happens when a file extent item needs to be split and + * then one item gets moved to another leaf due to a b+tree leaf + * split when inserting some item. In this case the file extent + * items may be located in different leaves and therefore some + * of the leaves may be referenced through shared subtrees while + * others are not. Since our extent buffer cache only works for + * a single path (by far the most common case and simpler to + * deal with), we can not use it if we have multiple leaves + * (which implies multiple paths). + */ + if (level == -1 && tmp->nnodes > 1) + cache->use_cache = false; + if (level >= 0) store_backref_shared_cache(cache, root, bytenr, level, false); @@ -1713,6 +1766,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, break; } shared.share_count = 0; + shared.have_delayed_delete_refs = false; cond_resched(); } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 52ae6957b414..8e69584d538d 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -29,6 +29,7 @@ struct btrfs_backref_shared_cache { * a given data extent should never exceed the maximum b+tree height. */ struct btrfs_backref_shared_cache_entry entries[BTRFS_MAX_LEVEL]; + bool use_cache; }; typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 32c415cfbdfe..deebc8ddbd93 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -774,10 +774,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); out: - /* REVIEW */ if (wait && caching_ctl) ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); - /* wait_event(caching_ctl->wait, space_cache_v1_done(cache)); */ if (caching_ctl) btrfs_put_caching_control(caching_ctl); diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index 618275af19c4..83cb0378096f 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -1641,16 +1641,17 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int err; u64 failed_start; - while (1) { + err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, + cached_state, NULL, GFP_NOFS); + while (err == -EEXIST) { + if (failed_start != start) + clear_extent_bit(tree, start, failed_start - 1, + EXTENT_LOCKED, cached_state); + + wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, cached_state, NULL, GFP_NOFS); - if (err == -EEXIST) { - wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); - start = failed_start; - } else - break; - WARN_ON(start > end); } return err; } diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 4ef4167072b8..ec6e1752af2c 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -348,6 +348,7 @@ static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd) switch (sctx->proto) { case 1: return cmd <= BTRFS_SEND_C_MAX_V1; case 2: return cmd <= BTRFS_SEND_C_MAX_V2; + case 3: return cmd <= BTRFS_SEND_C_MAX_V3; default: return false; } } @@ -6469,7 +6470,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) if (ret < 0) goto out; } - if (sctx->cur_inode_needs_verity) { + + if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY) + && sctx->cur_inode_needs_verity) { ret = process_verity(sctx); if (ret < 0) goto out; diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h index 0a4537775e0c..f7585cfa7e52 100644 --- a/fs/btrfs/send.h +++ b/fs/btrfs/send.h @@ -10,7 +10,12 @@ #include <linux/types.h> #define BTRFS_SEND_STREAM_MAGIC "btrfs-stream" +/* Conditional support for the upcoming protocol version. */ +#ifdef CONFIG_BTRFS_DEBUG +#define BTRFS_SEND_STREAM_VERSION 3 +#else #define BTRFS_SEND_STREAM_VERSION 2 +#endif /* * In send stream v1, no command is larger than 64K. In send stream v2, no limit diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c index fe88b67c863f..60399081046a 100644 --- a/fs/cifs/cached_dir.c +++ b/fs/cifs/cached_dir.c @@ -253,8 +253,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, dentry = dget(cifs_sb->root); else { dentry = path_to_dentry(cifs_sb, path); - if (IS_ERR(dentry)) + if (IS_ERR(dentry)) { + rc = -ENOENT; goto oshr_free; + } } cfid->dentry = dentry; cfid->tcon = tcon; @@ -338,6 +340,27 @@ smb2_close_cached_fid(struct kref *ref) free_cached_dir(cfid); } +void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, + const char *name, struct cifs_sb_info *cifs_sb) +{ + struct cached_fid *cfid = NULL; + int rc; + + rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); + if (rc) { + cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); + return; + } + spin_lock(&cfid->cfids->cfid_list_lock); + if (cfid->has_lease) { + cfid->has_lease = false; + kref_put(&cfid->refcount, smb2_close_cached_fid); + } + spin_unlock(&cfid->cfids->cfid_list_lock); + close_cached_dir(cfid); +} + + void close_cached_dir(struct cached_fid *cfid) { kref_put(&cfid->refcount, smb2_close_cached_fid); @@ -378,22 +401,20 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon) { struct cached_fids *cfids = tcon->cfids; struct cached_fid *cfid, *q; - struct list_head entry; + LIST_HEAD(entry); - INIT_LIST_HEAD(&entry); spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { - list_del(&cfid->entry); - list_add(&cfid->entry, &entry); + list_move(&cfid->entry, &entry); cfids->num_entries--; cfid->is_open = false; + cfid->on_list = false; /* To prevent race with smb2_cached_lease_break() */ kref_get(&cfid->refcount); } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { - cfid->on_list = false; list_del(&cfid->entry); cancel_work_sync(&cfid->lease_break); if (cfid->has_lease) { @@ -518,15 +539,13 @@ struct cached_fids *init_cached_dirs(void) void free_cached_dirs(struct cached_fids *cfids) { struct cached_fid *cfid, *q; - struct list_head entry; + LIST_HEAD(entry); - INIT_LIST_HEAD(&entry); spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { cfid->on_list = false; cfid->is_open = false; - list_del(&cfid->entry); - list_add(&cfid->entry, &entry); + list_move(&cfid->entry, &entry); } spin_unlock(&cfids->cfid_list_lock); diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h index e536304ca2ce..2f4e764c9ca9 100644 --- a/fs/cifs/cached_dir.h +++ b/fs/cifs/cached_dir.h @@ -69,6 +69,10 @@ extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, struct dentry *dentry, struct cached_fid **cfid); extern void close_cached_dir(struct cached_fid *cfid); +extern void drop_cached_dir_by_name(const unsigned int xid, + struct cifs_tcon *tcon, + const char *name, + struct cifs_sb_info *cifs_sb); extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c6ac19223ddc..d0b9fec111aa 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1302,8 +1302,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, ssize_t rc; struct cifsFileInfo *cfile = dst_file->private_data; - if (cfile->swapfile) - return -EOPNOTSUPP; + if (cfile->swapfile) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, len, flags); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 5b4a7a32bdc5..388b745a978e 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ /* when changing internal version - update following two lines at same time */ -#define SMB3_PRODUCT_BUILD 39 -#define CIFS_VERSION "2.39" +#define SMB3_PRODUCT_BUILD 40 +#define CIFS_VERSION "2.40" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index a5c73c2af3a2..8b1c37158556 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -543,8 +543,10 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode, cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n", inode, direntry, direntry); - if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) - return -EIO; + if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) { + rc = -EIO; + goto out_free_xid; + } tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); rc = PTR_ERR(tlink); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index f6ffee514c34..5b3b308e115c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1885,11 +1885,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) struct cifsFileInfo *cfile; __u32 type; - rc = -EACCES; xid = get_xid(); - if (!(fl->fl_flags & FL_FLOCK)) - return -ENOLCK; + if (!(fl->fl_flags & FL_FLOCK)) { + rc = -ENOLCK; + free_xid(xid); + return rc; + } cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); @@ -1908,8 +1910,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) * if no lock or unlock then nothing to do since we do not * know what it is */ + rc = -EOPNOTSUPP; free_xid(xid); - return -EOPNOTSUPP; + return rc; } rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7cf96e581d24..9bde08d44617 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -368,8 +368,10 @@ cifs_get_file_info_unix(struct file *filp) if (cfile->symlink_target) { fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); - if (!fattr.cf_symlink_target) - return -ENOMEM; + if (!fattr.cf_symlink_target) { + rc = -ENOMEM; + goto cifs_gfiunix_out; + } } rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 0435d1dfa9e1..92e4278ec35d 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -496,6 +496,7 @@ out: cifs_put_tcp_session(chan->server, 0); } + free_xid(xid); return rc; } diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index a6640e6ea58b..68e08c85fbb8 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -655,6 +655,7 @@ int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { + drop_cached_dir_by_name(xid, tcon, name, cifs_sb); return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE, NULL, SMB2_OP_RMDIR, NULL, NULL, NULL); @@ -698,6 +699,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon, { struct cifsFileInfo *cfile; + drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb); cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile); return smb2_set_path_attr(xid, tcon, from_name, to_name, diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 17b25153cb68..4f53fa012936 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -530,6 +530,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, p = buf; spin_lock(&ses->iface_lock); + ses->iface_count = 0; /* * Go through iface_list and do kref_put to remove * any unused ifaces. ifaces in use will be removed @@ -651,9 +652,9 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, kref_put(&iface->refcount, release_iface); } else list_add_tail(&info->iface_head, &ses->iface_list); - spin_unlock(&ses->iface_lock); ses->iface_count++; + spin_unlock(&ses->iface_lock); ses->iface_last_update = jiffies; next_iface: nb_iface++; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index a2384509ea84..a5695748a89b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1341,14 +1341,13 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) static void SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) { - int i; + struct kvec *iov = sess_data->iov; - /* zero the session data before freeing, as it might contain sensitive info (keys, etc) */ - for (i = 0; i < 2; i++) - if (sess_data->iov[i].iov_base) - memzero_explicit(sess_data->iov[i].iov_base, sess_data->iov[i].iov_len); + /* iov[1] is already freed by caller */ + if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) + memzero_explicit(iov[0].iov_base, iov[0].iov_len); - free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); + free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; } @@ -1531,7 +1530,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) &blob_length, ses, server, sess_data->nls_cp); if (rc) - goto out_err; + goto out; if (use_spnego) { /* BB eventually need to add this */ @@ -1578,7 +1577,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) } out: - memzero_explicit(ntlmssp_blob, blob_length); + kfree_sensitive(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); if (!rc) { sess_data->result = 0; @@ -1662,7 +1661,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) } #endif out: - memzero_explicit(ntlmssp_blob, blob_length); + kfree_sensitive(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); kfree_sensitive(ses->ntlmssp); ses->ntlmssp = NULL; diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 998cd26a1b3b..fe05bc51f9f2 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -590,14 +590,17 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb, struct super_block *psb = erofs_pseudo_mnt->mnt_sb; mutex_lock(&erofs_domain_cookies_lock); + spin_lock(&psb->s_inode_list_lock); list_for_each_entry(inode, &psb->s_inodes, i_sb_list) { ctx = inode->i_private; if (!ctx || ctx->domain != domain || strcmp(ctx->name, name)) continue; igrab(inode); + spin_unlock(&psb->s_inode_list_lock); mutex_unlock(&erofs_domain_cookies_lock); return ctx; } + spin_unlock(&psb->s_inode_list_lock); ctx = erofs_fscache_domain_init_cookie(sb, name, need_inode); mutex_unlock(&erofs_domain_cookies_lock); return ctx; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 559380a535af..c7f24fc7efd5 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -813,15 +813,14 @@ retry: ++spiltted; if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) fe->pcl->multibases = true; - - if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && - !(map->m_flags & EROFS_MAP_PARTIAL_REF) && - fe->pcl->length == map->m_llen) - fe->pcl->partial = false; if (fe->pcl->length < offset + end - map->m_la) { fe->pcl->length = offset + end - map->m_la; fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; } + if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && + !(map->m_flags & EROFS_MAP_PARTIAL_REF) && + fe->pcl->length == map->m_llen) + fe->pcl->partial = false; next_part: /* shorten the remaining extent to update progress */ map->m_llen = offset + cur - map->m_la; @@ -888,15 +887,13 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { unsigned int pgnr; - struct page *oldpage; pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; DBG_BUGON(pgnr >= be->nr_pages); - oldpage = be->decompressed_pages[pgnr]; - be->decompressed_pages[pgnr] = bvec->page; - - if (!oldpage) + if (!be->decompressed_pages[pgnr]) { + be->decompressed_pages[pgnr] = bvec->page; return; + } } /* (cold path) one pcluster is requested multiple times */ diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index e7f04c4fbb81..d98c95212985 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -126,10 +126,10 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) } /* - * bit 31: I/O error occurred on this page - * bit 0 - 30: remaining parts to complete this page + * bit 30: I/O error occurred on this page + * bit 0 - 29: remaining parts to complete this page */ -#define Z_EROFS_PAGE_EIO (1 << 31) +#define Z_EROFS_PAGE_EIO (1 << 30) static inline void z_erofs_onlinepage_init(struct page *page) { diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 44c27ef39c43..0bb66927e3d0 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -57,8 +57,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + vi->xattr_isize, 8); - kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), - EROFS_KMAP_ATOMIC); + kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out_unlock; @@ -73,7 +72,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER; vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63); vi->z_tailextent_headlcn = 0; - goto unmap_done; + goto done; } vi->z_advise = le16_to_cpu(h->h_advise); vi->z_algorithmtype[0] = h->h_algorithmtype & 15; @@ -85,7 +84,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", headnr + 1, vi->z_algorithmtype[headnr], vi->nid); err = -EOPNOTSUPP; - goto unmap_done; + goto out_put_metabuf; } vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); @@ -95,7 +94,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", vi->nid); err = -EFSCORRUPTED; - goto unmap_done; + goto out_put_metabuf; } if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ @@ -103,12 +102,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", vi->nid); err = -EFSCORRUPTED; - goto unmap_done; + goto out_put_metabuf; } -unmap_done: - erofs_put_metabuf(&buf); - if (err) - goto out_unlock; if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) { struct erofs_map_blocks map = { @@ -127,7 +122,7 @@ unmap_done: err = -EFSCORRUPTED; } if (err < 0) - goto out_unlock; + goto out_put_metabuf; } if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER && @@ -141,11 +136,14 @@ unmap_done: EROFS_GET_BLOCKS_FINDTAIL); erofs_put_metabuf(&map.buf); if (err < 0) - goto out_unlock; + goto out_put_metabuf; } +done: /* paired with smp_mb() at the beginning of the function */ smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); +out_put_metabuf: + erofs_put_metabuf(&buf); out_unlock: clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); return err; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 6a29bcfc9390..dc74a947a440 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1458,12 +1458,14 @@ static __net_init int nfsd_init_net(struct net *net) goto out_drc_error; retval = nfsd_reply_cache_init(nn); if (retval) - goto out_drc_error; + goto out_cache_error; get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); seqlock_init(&nn->writeverf_lock); return 0; +out_cache_error: + nfsd4_leases_net_shutdown(nn); out_drc_error: nfsd_idmap_shutdown(net); out_idmap_error: diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index d73434200df9..8c52b6c9d31a 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -392,8 +392,8 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) skip_pseudoflavor_check: /* Finally, check access permissions. */ error = nfsd_permission(rqstp, exp, dentry, access); - trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error); out: + trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error); if (error == nfserr_stale) nfsd_stats_fh_stale_inc(exp); return error; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 961d1cf54388..05f32989bad6 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -232,6 +232,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, handle_t *handle = NULL; struct ocfs2_super *osb; struct ocfs2_dinode *dirfe; + struct ocfs2_dinode *fe = NULL; struct buffer_head *new_fe_bh = NULL; struct inode *inode = NULL; struct ocfs2_alloc_context *inode_ac = NULL; @@ -382,6 +383,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, goto leave; } + fe = (struct ocfs2_dinode *) new_fe_bh->b_data; if (S_ISDIR(mode)) { status = ocfs2_fill_new_dir(osb, handle, dir, inode, new_fe_bh, data_ac, meta_ac); @@ -454,8 +456,11 @@ roll_back: leave: if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) @@ -632,18 +637,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, return status; } - status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, + return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, parent_fe_bh, handle, inode_ac, fe_blkno, suballoc_loc, suballoc_bit); - if (status < 0) { - u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit); - int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode, - inode_ac->ac_bh, suballoc_bit, bg_blkno, 1); - if (tmp) - mlog_errno(tmp); - } - - return status; } static int ocfs2_mkdir(struct user_namespace *mnt_userns, @@ -2028,8 +2024,11 @@ bail: ocfs2_clusters_to_bytes(osb->sb, 1)); if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8b4f3073f8f5..8a74cdcc9af0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) goto out_put_mm; hold_task_mempolicy(priv); - vma = mas_find(&mas, 0); + vma = mas_find(&mas, ULONG_MAX); if (unlikely(!vma)) goto empty_set; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 599855c6a672..2ae4fd62e01c 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -32,6 +32,15 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) +/** + * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * + * Setting this flag on a scheduler fence prevents pipelining of jobs depending + * on this fence. In other words we always insert a full CPU round trip before + * dependen jobs are pushed to the hw queue. + */ +#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS + struct drm_gem_object; struct drm_gpu_scheduler; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f2a9f2274c3b..528bd44b59e2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -106,6 +106,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup *cgroup_get_from_path(const char *path); struct cgroup *cgroup_get_from_fd(int fd); +struct cgroup *cgroup_v1v2_get_from_fd(int fd); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h index 50be7cbd93a5..b1b5720d89a5 100644 --- a/include/linux/dsa/tag_qca.h +++ b/include/linux/dsa/tag_qca.h @@ -61,9 +61,9 @@ struct sk_buff; /* Special struct emulating a Ethernet header */ struct qca_mgmt_ethhdr { - u32 command; /* command bit 31:0 */ - u32 seq; /* seq 63:32 */ - u32 mdio_data; /* first 4byte mdio */ + __le32 command; /* command bit 31:0 */ + __le32 seq; /* seq 63:32 */ + __le32 mdio_data; /* first 4byte mdio */ __be16 hdr; /* qca hdr */ } __packed; @@ -73,7 +73,7 @@ enum mdio_cmd { }; struct mib_ethhdr { - u32 data[3]; /* first 3 mib counter */ + __le32 data[3]; /* first 3 mib counter */ __be16 hdr; /* qca hdr */ } __packed; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a325532aeab5..3c9da1f8979e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -455,7 +455,7 @@ extern void iommu_set_default_translated(bool cmd_line); extern bool iommu_default_passthrough(void); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, - enum iommu_resv_type type); + enum iommu_resv_type type, gfp_t gfp); extern int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a36edb0ec199..eddf8ee270e7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3663,8 +3663,9 @@ static inline bool netif_attr_test_online(unsigned long j, static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, unsigned int nr_bits) { - /* n is a prior cpu */ - cpu_max_bits_warn(n + 1, nr_bits); + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); if (srcp) return find_next_bit(srcp, nr_bits, n + 1); @@ -3685,8 +3686,9 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, const unsigned long *src2p, unsigned int nr_bits) { - /* n is a prior cpu */ - cpu_max_bits_warn(n + 1, nr_bits); + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); if (src1p && src2p) return find_next_and_bit(src1p, src2p, nr_bits, n + 1); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 664dd409feb9..3f01ac8017e0 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -122,6 +122,7 @@ enum phylink_op_type { * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. + * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM. * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. @@ -134,6 +135,7 @@ struct phylink_config { enum phylink_op_type type; bool legacy_pre_march2020; bool poll_fixed_state; + bool mac_managed_pm; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 8f780170e2f8..3d08e67b3cfc 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -37,6 +37,7 @@ struct genl_info; * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks + * @module: pointer to the owning module (set to THIS_MODULE) * @mcgrps: multicast groups used by this family * @n_mcgrps: number of multicast groups * @resv_start_op: first operation for which reserved fields of the header @@ -173,9 +174,9 @@ struct genl_ops { }; /** - * struct genl_info - info that is available during dumpit op call + * struct genl_dumpit_info - info that is available during dumpit op call * @family: generic netlink family - for internal genl code usage - * @ops: generic netlink ops - for internal genl code usage + * @op: generic netlink ops - for internal genl code usage * @attrs: netlink attributes */ struct genl_dumpit_info { @@ -354,6 +355,7 @@ int genlmsg_multicast_allns(const struct genl_family *family, /** * genlmsg_unicast - unicast a netlink message + * @net: network namespace to look up @portid in * @skb: netlink message as socket buffer * @portid: netlink portid of the destination socket */ @@ -373,7 +375,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info) } /** - * gennlmsg_data - head of message payload + * genlmsg_data - head of message payload * @gnlh: genetlink message header */ static inline void *genlmsg_data(const struct genlmsghdr *gnlh) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 473b0b0fa4ab..efc9085c6892 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk, extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); -static inline bool reuseport_has_conns(struct sock *sk, bool set) +static inline bool reuseport_has_conns(struct sock *sk) { struct sock_reuseport *reuse; bool ret = false; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); - if (reuse) { - if (set) - reuse->has_conns = 1; - ret = reuse->has_conns; - } + if (reuse && reuse->has_conns) + ret = true; rcu_read_unlock(); return ret; } +void reuseport_has_conns_set(struct sock *sk); + #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/trace/events/watchdog.h b/include/trace/events/watchdog.h new file mode 100644 index 000000000000..beb9bb3424c8 --- /dev/null +++ b/include/trace/events/watchdog.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM watchdog + +#if !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_WATCHDOG_H + +#include <linux/watchdog.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(watchdog_template, + + TP_PROTO(struct watchdog_device *wdd, int err), + + TP_ARGS(wdd, err), + + TP_STRUCT__entry( + __field(int, id) + __field(int, err) + ), + + TP_fast_assign( + __entry->id = wdd->id; + __entry->err = err; + ), + + TP_printk("watchdog%d err=%d", __entry->id, __entry->err) +); + +DEFINE_EVENT(watchdog_template, watchdog_start, + TP_PROTO(struct watchdog_device *wdd, int err), + TP_ARGS(wdd, err)); + +DEFINE_EVENT(watchdog_template, watchdog_ping, + TP_PROTO(struct watchdog_device *wdd, int err), + TP_ARGS(wdd, err)); + +DEFINE_EVENT(watchdog_template, watchdog_stop, + TP_PROTO(struct watchdog_device *wdd, int err), + TP_ARGS(wdd, err)); + +TRACE_EVENT(watchdog_set_timeout, + + TP_PROTO(struct watchdog_device *wdd, unsigned int timeout, int err), + + TP_ARGS(wdd, timeout, err), + + TP_STRUCT__entry( + __field(int, id) + __field(unsigned int, timeout) + __field(int, err) + ), + + TP_fast_assign( + __entry->id = wdd->id; + __entry->timeout = timeout; + __entry->err = err; + ), + + TP_printk("watchdog%d timeout=%u err=%d", __entry->id, __entry->timeout, __entry->err) +); + +#endif /* !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index eac87310b348..6f93c915cc88 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -235,25 +235,29 @@ struct drm_panfrost_madvise { #define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1) #define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1) +/* + * This structure is the native endianness of the dumping machine, tools can + * detect the endianness by looking at the value in 'magic'. + */ struct panfrost_dump_object_header { - __le32 magic; - __le32 type; - __le32 file_size; - __le32 file_offset; + __u32 magic; + __u32 type; + __u32 file_size; + __u32 file_offset; union { - struct pan_reg_hdr { - __le64 jc; - __le32 gpu_id; - __le32 major; - __le32 minor; - __le64 nbos; + struct { + __u64 jc; + __u32 gpu_id; + __u32 major; + __u32 minor; + __u64 nbos; } reghdr; struct pan_bomap_hdr { - __le32 valid; - __le64 iova; - __le32 data[2]; + __u32 valid; + __u64 iova; + __u32 data[2]; } bomap; /* @@ -261,14 +265,14 @@ struct panfrost_dump_object_header { * with new fields and also keep it 512-byte aligned */ - __le32 sizer[496]; + __u32 sizer[496]; }; }; /* Registers object, an array of these */ struct panfrost_dump_registers { - __le32 reg; - __le32 value; + __u32 reg; + __u32 value; }; #if defined(__cplusplus) diff --git a/init/Kconfig b/init/Kconfig index 694f7c160c9c..abf65098f1b6 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -66,7 +66,7 @@ config RUST_IS_AVAILABLE This shows whether a suitable Rust toolchain is available (found). Please see Documentation/rust/quick-start.rst for instructions on how - to satify the build requirements of Rust support. + to satisfy the build requirements of Rust support. In particular, the Makefile target 'rustavailable' is useful to check why the Rust toolchain is not being detected. diff --git a/io_uring/filetable.h b/io_uring/filetable.h index ff3a712e11bf..351111ff8882 100644 --- a/io_uring/filetable.h +++ b/io_uring/filetable.h @@ -5,22 +5,9 @@ #include <linux/file.h> #include <linux/io_uring_types.h> -/* - * FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0 - * and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we - * can't safely always dereference the file when the task has exited and ring - * cleanup is done. If a file is tracked and part of SCM, then unix gc on - * process exit may reap it before __io_sqe_files_unregister() is run. - */ #define FFS_NOWAIT 0x1UL #define FFS_ISREG 0x2UL -#if defined(CONFIG_64BIT) -#define FFS_SCM 0x4UL -#else -#define IO_URING_SCM_ALL -#define FFS_SCM 0x0UL -#endif -#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM) +#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG) bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files); void io_free_file_tables(struct io_file_table *table); @@ -38,6 +25,7 @@ unsigned int io_file_get_flags(struct file *file); static inline void io_file_bitmap_clear(struct io_file_table *table, int bit) { + WARN_ON_ONCE(!test_bit(bit, table->bitmap)); __clear_bit(bit, table->bitmap); table->alloc_hint = bit; } diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index c6536d4b2da0..6f1d0e5df23a 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -1164,10 +1164,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); if (!wqe) goto err; + wq->wqes[node] = wqe; if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) goto err; cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); - wq->wqes[node] = wqe; wqe->node = alloc_node; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index de08d9902b30..6cc16e39b27f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1587,8 +1587,6 @@ unsigned int io_file_get_flags(struct file *file) res |= FFS_ISREG; if (__io_file_supports_nowait(file, mode)) res |= FFS_NOWAIT; - if (io_file_need_scm(file)) - res |= FFS_SCM; return res; } @@ -1860,7 +1858,6 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, /* mask in overlapping REQ_F and FFS bits */ req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT); io_req_set_rsrc_node(req, ctx, 0); - WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap)); out: io_ring_submit_unlock(ctx, issue_flags); return file; @@ -2563,18 +2560,14 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx) static void io_req_caches_free(struct io_ring_ctx *ctx) { - struct io_submit_state *state = &ctx->submit_state; int nr = 0; mutex_lock(&ctx->uring_lock); - io_flush_cached_locked_reqs(ctx, state); + io_flush_cached_locked_reqs(ctx, &ctx->submit_state); while (!io_req_cache_empty(ctx)) { - struct io_wq_work_node *node; - struct io_kiocb *req; + struct io_kiocb *req = io_alloc_req(ctx); - node = wq_stack_extract(&state->free_list); - req = container_of(node, struct io_kiocb, comp_list); kmem_cache_free(req_cachep, req); nr++; } @@ -2811,15 +2804,12 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_poll_remove_all(ctx, NULL, true); mutex_unlock(&ctx->uring_lock); - /* failed during ring init, it couldn't have issued any requests */ - if (ctx->rings) { + /* + * If we failed setting up the ctx, we might not have any rings + * and therefore did not submit any requests + */ + if (ctx->rings) io_kill_timeouts(ctx, NULL, true); - /* if we failed setting up the ctx, we might not have any rings */ - io_iopoll_try_reap_events(ctx); - /* drop cached put refs after potentially doing completions */ - if (current->io_uring) - io_uring_drop_tctx_refs(current); - } INIT_WORK(&ctx->exit_work, io_ring_exit_work); /* diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 4a7e5d030c78..90d2fc6fd80e 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -95,6 +95,9 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files); file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr; + if (!file_ptr) + goto out_unlock; + src_file = (struct file *) (file_ptr & FFS_MASK); get_file(src_file); diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 012fdb04ec23..55d4ab96fb92 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -757,20 +757,17 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void __io_sqe_files_unregister(struct io_ring_ctx *ctx) { -#if !defined(IO_URING_SCM_ALL) int i; for (i = 0; i < ctx->nr_user_files; i++) { struct file *file = io_file_from_index(&ctx->file_table, i); - if (!file) - continue; - if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM) + /* skip scm accounted files, they'll be freed by ->ring_sock */ + if (!file || io_file_need_scm(file)) continue; io_file_bitmap_clear(&ctx->file_table, i); fput(file); } -#endif #if defined(CONFIG_UNIX) if (ctx->ring_sock) { diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index 9bce15665444..81445a477622 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -82,11 +82,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file); #if defined(CONFIG_UNIX) static inline bool io_file_need_scm(struct file *filp) { -#if defined(IO_URING_SCM_ALL) - return true; -#else return !!unix_get_socket(filp); -#endif } #else static inline bool io_file_need_scm(struct file *filp) diff --git a/io_uring/rw.c b/io_uring/rw.c index 100de2626e47..bb47cc4da713 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -242,8 +242,6 @@ static void io_req_io_end(struct io_kiocb *req) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - WARN_ON(!in_task()); - if (rw->kiocb.ki_flags & IOCB_WRITE) { kiocb_end_write(req); fsnotify_modify(req->file); diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c index 0d200a993489..9fcf09f2ef00 100644 --- a/kernel/bpf/cgroup_iter.c +++ b/kernel/bpf/cgroup_iter.c @@ -196,7 +196,7 @@ static int bpf_iter_attach_cgroup(struct bpf_prog *prog, return -EINVAL; if (fd) - cgrp = cgroup_get_from_fd(fd); + cgrp = cgroup_v1v2_get_from_fd(fd); else if (id) cgrp = cgroup_get_from_id(id); else /* walk the entire hierarchy by default. */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 7f486677ab1f..2319946715e0 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1392,6 +1392,9 @@ static void cgroup_destroy_root(struct cgroup_root *root) cgroup_free_root(root); } +/* + * Returned cgroup is without refcount but it's valid as long as cset pins it. + */ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, struct cgroup_root *root) { @@ -1403,6 +1406,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, res_cgroup = cset->dfl_cgrp; } else { struct cgrp_cset_link *link; + lockdep_assert_held(&css_set_lock); list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; @@ -1414,6 +1418,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, } } + BUG_ON(!res_cgroup); return res_cgroup; } @@ -1436,23 +1441,36 @@ current_cgns_cgroup_from_root(struct cgroup_root *root) rcu_read_unlock(); - BUG_ON(!res); return res; } +/* + * Look up cgroup associated with current task's cgroup namespace on the default + * hierarchy. + * + * Unlike current_cgns_cgroup_from_root(), this doesn't need locks: + * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu + * pointers. + * - css_set_lock is not needed because we just read cset->dfl_cgrp. + * - As a bonus returned cgrp is pinned with the current because it cannot + * switch cgroup_ns asynchronously. + */ +static struct cgroup *current_cgns_cgroup_dfl(void) +{ + struct css_set *cset; + + cset = current->nsproxy->cgroup_ns->root_cset; + return __cset_cgroup_from_root(cset, &cgrp_dfl_root); +} + /* look up cgroup associated with given css_set on the specified hierarchy */ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, struct cgroup_root *root) { - struct cgroup *res = NULL; - lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&css_set_lock); - res = __cset_cgroup_from_root(cset, root); - - BUG_ON(!res); - return res; + return __cset_cgroup_from_root(cset, root); } /* @@ -6191,9 +6209,7 @@ struct cgroup *cgroup_get_from_id(u64 id) if (!cgrp) return ERR_PTR(-ENOENT); - spin_lock_irq(&css_set_lock); - root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root); - spin_unlock_irq(&css_set_lock); + root_cgrp = current_cgns_cgroup_dfl(); if (!cgroup_is_descendant(cgrp, root_cgrp)) { cgroup_put(cgrp); return ERR_PTR(-ENOENT); @@ -6294,16 +6310,42 @@ void cgroup_fork(struct task_struct *child) INIT_LIST_HEAD(&child->cg_list); } -static struct cgroup *cgroup_get_from_file(struct file *f) +/** + * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer + * @f: file corresponding to cgroup_dir + * + * Find the cgroup from a file pointer associated with a cgroup directory. + * Returns a pointer to the cgroup on success. ERR_PTR is returned if the + * cgroup cannot be found. + */ +static struct cgroup *cgroup_v1v2_get_from_file(struct file *f) { struct cgroup_subsys_state *css; - struct cgroup *cgrp; css = css_tryget_online_from_dir(f->f_path.dentry, NULL); if (IS_ERR(css)) return ERR_CAST(css); - cgrp = css->cgroup; + return css->cgroup; +} + +/** + * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports + * cgroup2. + * @f: file corresponding to cgroup2_dir + */ +static struct cgroup *cgroup_get_from_file(struct file *f) +{ + struct cgroup *cgrp = cgroup_v1v2_get_from_file(f); + + if (IS_ERR(cgrp)) + return ERR_CAST(cgrp); + + if (!cgroup_on_dfl(cgrp)) { + cgroup_put(cgrp); + return ERR_PTR(-EBADF); + } + return cgrp; } @@ -6772,10 +6814,8 @@ struct cgroup *cgroup_get_from_path(const char *path) struct cgroup *cgrp = ERR_PTR(-ENOENT); struct cgroup *root_cgrp; - spin_lock_irq(&css_set_lock); - root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root); + root_cgrp = current_cgns_cgroup_dfl(); kn = kernfs_walk_and_get(root_cgrp->kn, path); - spin_unlock_irq(&css_set_lock); if (!kn) goto out; @@ -6800,15 +6840,15 @@ out: EXPORT_SYMBOL_GPL(cgroup_get_from_path); /** - * cgroup_get_from_fd - get a cgroup pointer from a fd - * @fd: fd obtained by open(cgroup2_dir) + * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd + * @fd: fd obtained by open(cgroup_dir) * * Find the cgroup from a fd which should be obtained * by opening a cgroup directory. Returns a pointer to the * cgroup on success. ERR_PTR is returned if the cgroup * cannot be found. */ -struct cgroup *cgroup_get_from_fd(int fd) +struct cgroup *cgroup_v1v2_get_from_fd(int fd) { struct cgroup *cgrp; struct file *f; @@ -6817,10 +6857,29 @@ struct cgroup *cgroup_get_from_fd(int fd) if (!f) return ERR_PTR(-EBADF); - cgrp = cgroup_get_from_file(f); + cgrp = cgroup_v1v2_get_from_file(f); fput(f); return cgrp; } + +/** + * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports + * cgroup2. + * @fd: fd obtained by open(cgroup2_dir) + */ +struct cgroup *cgroup_get_from_fd(int fd) +{ + struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd); + + if (IS_ERR(cgrp)) + return ERR_CAST(cgrp); + + if (!cgroup_on_dfl(cgrp)) { + cgroup_put(cgrp); + return ERR_PTR(-EBADF); + } + return cgrp; +} EXPORT_SYMBOL_GPL(cgroup_get_from_fd); static u64 power_of_ten(int power) diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 460c12b7dfea..7971e989e425 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -30,6 +30,13 @@ #define GCOV_TAG_FUNCTION_LENGTH 3 +/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */ +#if (__GNUC__ >= 12) +#define GCOV_UNIT_SIZE 4 +#else +#define GCOV_UNIT_SIZE 1 +#endif + static struct gcov_info *gcov_info_head; /** @@ -383,12 +390,18 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info) pos += store_gcov_u32(buffer, pos, info->version); pos += store_gcov_u32(buffer, pos, info->stamp); +#if (__GNUC__ >= 12) + /* Use zero as checksum of the compilation unit. */ + pos += store_gcov_u32(buffer, pos, 0); +#endif + for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) { fi_ptr = info->functions[fi_idx]; /* Function record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH); + pos += store_gcov_u32(buffer, pos, + GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE); pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); @@ -402,7 +415,8 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info) /* Counter record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FOR_COUNTER(ct_idx)); - pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2); + pos += store_gcov_u32(buffer, pos, + ci_ptr->num * 2 * GCOV_UNIT_SIZE); for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) { pos += store_gcov_u64(buffer, pos, diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7f5eb295fe19..a995ea1ef849 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -346,8 +346,40 @@ static void put_probe_ref(void) mutex_unlock(&blk_probe_mutex); } +static int blk_trace_start(struct blk_trace *bt) +{ + if (bt->trace_state != Blktrace_setup && + bt->trace_state != Blktrace_stopped) + return -EINVAL; + + blktrace_seq++; + smp_mb(); + bt->trace_state = Blktrace_running; + raw_spin_lock_irq(&running_trace_lock); + list_add(&bt->running_list, &running_trace_list); + raw_spin_unlock_irq(&running_trace_lock); + trace_note_time(bt); + + return 0; +} + +static int blk_trace_stop(struct blk_trace *bt) +{ + if (bt->trace_state != Blktrace_running) + return -EINVAL; + + bt->trace_state = Blktrace_stopped; + raw_spin_lock_irq(&running_trace_lock); + list_del_init(&bt->running_list); + raw_spin_unlock_irq(&running_trace_lock); + relay_flush(bt->rchan); + + return 0; +} + static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt) { + blk_trace_stop(bt); synchronize_rcu(); blk_trace_free(q, bt); put_probe_ref(); @@ -362,8 +394,7 @@ static int __blk_trace_remove(struct request_queue *q) if (!bt) return -EINVAL; - if (bt->trace_state != Blktrace_running) - blk_trace_cleanup(q, bt); + blk_trace_cleanup(q, bt); return 0; } @@ -658,7 +689,6 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, static int __blk_trace_startstop(struct request_queue *q, int start) { - int ret; struct blk_trace *bt; bt = rcu_dereference_protected(q->blk_trace, @@ -666,36 +696,10 @@ static int __blk_trace_startstop(struct request_queue *q, int start) if (bt == NULL) return -EINVAL; - /* - * For starting a trace, we can transition from a setup or stopped - * trace. For stopping a trace, the state must be running - */ - ret = -EINVAL; - if (start) { - if (bt->trace_state == Blktrace_setup || - bt->trace_state == Blktrace_stopped) { - blktrace_seq++; - smp_mb(); - bt->trace_state = Blktrace_running; - raw_spin_lock_irq(&running_trace_lock); - list_add(&bt->running_list, &running_trace_list); - raw_spin_unlock_irq(&running_trace_lock); - - trace_note_time(bt); - ret = 0; - } - } else { - if (bt->trace_state == Blktrace_running) { - bt->trace_state = Blktrace_stopped; - raw_spin_lock_irq(&running_trace_lock); - list_del_init(&bt->running_list); - raw_spin_unlock_irq(&running_trace_lock); - relay_flush(bt->rchan); - ret = 0; - } - } - - return ret; + if (start) + return blk_trace_start(bt); + else + return blk_trace_stop(bt); } int blk_trace_startstop(struct request_queue *q, int start) @@ -772,10 +776,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) void blk_trace_shutdown(struct request_queue *q) { if (rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->debugfs_mutex))) { - __blk_trace_startstop(q, 0); + lockdep_is_held(&q->debugfs_mutex))) __blk_trace_remove(q); - } } #ifdef CONFIG_BLK_CGROUP @@ -1614,13 +1616,7 @@ static int blk_trace_remove_queue(struct request_queue *q) if (bt == NULL) return -EINVAL; - if (bt->trace_state == Blktrace_running) { - bt->trace_state = Blktrace_stopped; - raw_spin_lock_irq(&running_trace_lock); - list_del_init(&bt->running_list); - raw_spin_unlock_irq(&running_trace_lock); - relay_flush(bt->rchan); - } + blk_trace_stop(bt); put_probe_ref(); synchronize_rcu(); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1cc4a5f4791e..03fc7e5edf07 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2455,7 +2455,16 @@ static void __split_huge_page_tail(struct page *head, int tail, page_tail); page_tail->mapping = head->mapping; page_tail->index = head->index + tail; - page_tail->private = 0; + + /* + * page->private should not be set in tail pages with the exception + * of swap cache pages that store the swp_entry_t in tail pages. + * Fix up and warn once if private is unexpectedly set. + */ + if (!folio_test_swapcache(page_folio(head))) { + VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, head); + page_tail->private = 0; + } /* Page flags must be visible before we make the page non-compound. */ smp_wmb(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b586cdd75930..546df97c31e4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma) VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); /* * Clear vm_private_data + * - For shared mappings this is a per-vma semaphore that may be + * allocated in a subsequent call to hugetlb_vm_op_open. + * Before clearing, make sure pointer is not associated with vma + * as this will leak the structure. This is the case when called + * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already + * been called to allocate a new structure. * - For MAP_PRIVATE mappings, this is the reserve map which does * not apply to children. Faults generated by the children are * not guaranteed to succeed, even if read-only. - * - For shared mappings this is a per-vma semaphore that may be - * allocated in a subsequent call to hugetlb_vm_op_open. */ - vma->vm_private_data = (void *)0; - if (!(vma->vm_flags & VM_MAYSHARE)) - return; + if (vma->vm_flags & VM_MAYSHARE) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + if (vma_lock && vma_lock->vma != vma) + vma->vm_private_data = NULL; + } else + vma->vm_private_data = NULL; } /* @@ -2924,11 +2932,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; + spin_lock_irq(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetHPageRestoreReserve(page); h->resv_huge_pages--; } - spin_lock_irq(&hugetlb_lock); list_add(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); /* Fall through */ @@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) struct resv_map *resv = vma_resv_map(vma); /* + * HPAGE_RESV_OWNER indicates a private mapping. * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA @@ -4615,11 +4624,21 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) /* * vma_lock structure for sharable mappings is vma specific. - * Clear old pointer (if copied via vm_area_dup) and create new. + * Clear old pointer (if copied via vm_area_dup) and allocate + * new structure. Before clearing, make sure vma_lock is not + * for this vma. */ if (vma->vm_flags & VM_MAYSHARE) { - vma->vm_private_data = NULL; - hugetlb_vma_lock_alloc(vma); + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + if (vma_lock) { + if (vma_lock->vma != vma) { + vma->vm_private_data = NULL; + hugetlb_vma_lock_alloc(vma); + } else + pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); + } else + hugetlb_vma_lock_alloc(vma); } } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a937eaec5b68..61aa9aedb728 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -787,17 +787,22 @@ static int vma_replace_policy(struct vm_area_struct *vma, static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { - MA_STATE(mas, &mm->mm_mt, start - 1, start - 1); + MA_STATE(mas, &mm->mm_mt, start, start); struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; - prev = mas_find_rev(&mas, 0); - if (prev && (start < prev->vm_end)) - vma = prev; - else - vma = mas_next(&mas, end - 1); + prev = mas_prev(&mas, 0); + if (unlikely(!prev)) + mas_set(&mas, start); + + vma = mas_find(&mas, end - 1); + if (WARN_ON(!vma)) + return 0; + + if (start > vma->vm_start) + prev = vma; for (; vma; vma = mas_next(&mas, end - 1)) { unsigned long vmstart = max(start, vma->vm_start); diff --git a/mm/mmap.c b/mm/mmap.c index bf2122af94e7..e270057ed04e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -618,7 +618,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end); + struct vm_area_struct *next_next = NULL; /* uninit var warning */ + struct vm_area_struct *next = find_vma(mm, vma->vm_end); struct vm_area_struct *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; @@ -2625,14 +2626,14 @@ cannot_expand: if (error) goto unmap_and_free_vma; - /* Can addr have changed?? - * - * Answer: Yes, several device drivers can do it in their - * f_op->mmap method. -DaveM + /* + * Expansion is handled above, merging is handled below. + * Drivers should not alter the address of the VMA. */ - WARN_ON_ONCE(addr != vma->vm_start); - - addr = vma->vm_start; + if (WARN_ON((addr != vma->vm_start))) { + error = -EINVAL; + goto close_and_free_vma; + } mas_reset(&mas); /* @@ -2654,7 +2655,6 @@ cannot_expand: vm_area_free(vma); vma = merge; /* Update vm_flags to pick up the change. */ - addr = vma->vm_start; vm_flags = vma->vm_flags; goto unmap_writable; } @@ -2681,7 +2681,7 @@ cannot_expand: if (mas_preallocate(&mas, vma, GFP_KERNEL)) { error = -ENOMEM; if (file) - goto unmap_and_free_vma; + goto close_and_free_vma; else goto free_vma; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e20ade858e71..b5a6c815ae28 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5784,14 +5784,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, size_t size) { if (addr) { - unsigned long alloc_end = addr + (PAGE_SIZE << order); - unsigned long used = addr + PAGE_ALIGN(size); - - split_page(virt_to_page((void *)addr), order); - while (used < alloc_end) { - free_page(used); - used += PAGE_SIZE; - } + unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); + struct page *page = virt_to_page((void *)addr); + struct page *last = page + nr; + + split_page_owner(page, 1 << order); + split_page_memcg(page, 1 << order); + while (page < --last) + set_page_refcounted(last); + + last = page + (1UL << order); + for (page += nr; page < last; page++) + __free_pages_ok(page, 0, FPI_TO_TAIL); } return (void *)addr; } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 525758713a55..d03941cace2c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2311,6 +2311,9 @@ void zs_destroy_pool(struct zs_pool *pool) int fg; struct size_class *class = pool->size_class[i]; + if (!class) + continue; + if (class->index != i) continue; diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 829db9eba0cb..aaf64b953915 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, if (!page) return -ENOMEM; - for (p = page, len = 0; len < nbytes; p++, len++) { + for (p = page, len = 0; len < nbytes; p++) { if (get_user(*p, buff++)) { free_page((unsigned long)page); return -EFAULT; } + len += 1; if (*p == '\0' || *p == '\n') break; } diff --git a/net/core/dev.c b/net/core/dev.c index fa53830d0683..3be256051e99 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5136,11 +5136,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, case TC_ACT_SHOT: mini_qdisc_qstats_cpu_drop(miniq); kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS); + *ret = NET_RX_DROP; return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: consume_skb(skb); + *ret = NET_RX_SUCCESS; return NULL; case TC_ACT_REDIRECT: /* skb_mac_header check was done by cls/act_bpf, so @@ -5153,8 +5155,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, *another = true; break; } + *ret = NET_RX_SUCCESS; return NULL; case TC_ACT_CONSUMED: + *ret = NET_RX_SUCCESS; return NULL; default: break; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ca70525621c7..1efdc47a999b 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -500,11 +500,11 @@ bool sk_msg_is_readable(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_msg_is_readable); -static struct sk_msg *alloc_sk_msg(void) +static struct sk_msg *alloc_sk_msg(gfp_t gfp) { struct sk_msg *msg; - msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL); + msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN); if (unlikely(!msg)) return NULL; sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); @@ -520,7 +520,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, if (!sk_rmem_schedule(sk, skb, skb->truesize)) return NULL; - return alloc_sk_msg(); + return alloc_sk_msg(GFP_KERNEL); } static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, @@ -597,7 +597,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len) { - struct sk_msg *msg = alloc_sk_msg(); + struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); struct sock *sk = psock->sk; int err; diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 5daa1fa54249..fb90e1e00773 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -21,6 +21,22 @@ static DEFINE_IDA(reuseport_ida); static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, struct sock_reuseport *reuse, bool bind_inany); +void reuseport_has_conns_set(struct sock *sk) +{ + struct sock_reuseport *reuse; + + if (!rcu_access_pointer(sk->sk_reuseport_cb)) + return; + + spin_lock_bh(&reuseport_lock); + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + if (likely(reuse)) + reuse->has_conns = 1; + spin_unlock_bh(&reuseport_lock); +} +EXPORT_SYMBOL(reuseport_has_conns_set); + static int reuseport_sock_index(struct sock *sk, const struct sock_reuseport *reuse, bool closed) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1a59918d3b30..a9fde48cffd4 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -3145,7 +3145,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, case NETDEV_CHANGELOWERSTATE: { struct netdev_notifier_changelowerstate_info *info = ptr; struct dsa_port *dp; - int err; + int err = 0; if (dsa_slave_dev_check(dev)) { dp = dsa_slave_to_port(dev); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 5bf357734b11..a50429a62f74 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -150,15 +150,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port) { if (!frame->skb_std) { - if (frame->skb_hsr) { + if (frame->skb_hsr) frame->skb_std = create_stripped_skb_hsr(frame->skb_hsr, frame); - } else { - /* Unexpected */ - WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", - __FILE__, __LINE__, port->dev->name); + else + netdev_warn_once(port->dev, + "Unexpected frame received in hsr_get_untagged_frame()\n"); + + if (!frame->skb_std) return NULL; - } } return skb_clone(frame->skb_std, GFP_ATOMIC); diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 0ee7fd259730..4d1af0cd7d99 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len } inet->inet_daddr = fl4->daddr; inet->inet_dport = usin->sin_port; - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); inet->inet_id = get_random_u16(); diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index ff85db52b2e5..ded5bef02f77 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; flow.flowi4_scope = RT_SCOPE_UNIVERSE; flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par)); + flow.flowi4_uid = sock_net_uid(xt_net(par), NULL); return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; } diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index e886147eed11..fc65d69f23e1 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -65,6 +65,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, struct flowi4 fl4 = { .flowi4_scope = RT_SCOPE_UNIVERSE, .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_uid = sock_net_uid(nft_net(pkt), NULL), }; const struct net_device *oif; const struct net_device *found; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 662d717d5123..c83e5271030b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -448,7 +448,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 417834b7169d..9c3f5202a97b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -7214,9 +7214,11 @@ err_reg_dflt: __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL); err_reg_all: kfree(dflt); + net->ipv6.devconf_dflt = NULL; #endif err_alloc_dflt: kfree(all); + net->ipv6.devconf_all = NULL; err_alloc_all: kfree(net->ipv6.inet6_addr_lst); err_alloc_addr: diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index df665d4e8f0f..5ecb56522f9d 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -256,7 +256,7 @@ ipv4_connected: goto out; } - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); out: diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 69d86b040a6a..a01d9b842bd0 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -40,6 +40,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, .flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev), .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, .flowi6_proto = iph->nexthdr, + .flowi6_uid = sock_net_uid(net, NULL), .daddr = iph->saddr, }; int lookup_flags; diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 91faac610e03..36dc14b34388 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -66,6 +66,7 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, struct flowi6 fl6 = { .flowi6_iif = LOOPBACK_IFINDEX, .flowi6_proto = pkt->tprot, + .flowi6_uid = sock_net_uid(nft_net(pkt), NULL), }; u32 ret = 0; @@ -163,6 +164,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, struct flowi6 fl6 = { .flowi6_iif = LOOPBACK_IFINDEX, .flowi6_proto = pkt->tprot, + .flowi6_uid = sock_net_uid(nft_net(pkt), NULL), }; struct rt6_info *rt; int lookup_flags; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8d09f0ea5b8c..129ec5a9b0eb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -195,7 +195,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a0653a8dfa82..58d9cbc9ccdc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5865,8 +5865,9 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set, (NFT_SET_CONCAT | NFT_SET_INTERVAL)) { if (flags & NFT_SET_ELEM_INTERVAL_END) return false; - if (!nla[NFTA_SET_ELEM_KEY_END] && - !(flags & NFT_SET_ELEM_CATCHALL)) + + if (nla[NFTA_SET_ELEM_KEY_END] && + flags & NFT_SET_ELEM_CATCHALL) return false; } else { if (nla[NFTA_SET_ELEM_KEY_END]) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c98af0ada706..4a27dfb1ba0f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1099,12 +1099,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { - notify_and_destroy(net, skb, n, classid, - rtnl_dereference(dev->qdisc), new); + old = rtnl_dereference(dev->qdisc); if (new && !new->ops->attach) qdisc_refcount_inc(new); rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); + notify_and_destroy(net, skb, n, classid, old, new); + if (new && new->ops->attach) new->ops->attach(new); } else { diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 817cd0695b35..3ed0c3342189 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -2224,8 +2224,12 @@ retry: static void cake_reset(struct Qdisc *sch) { + struct cake_sched_data *q = qdisc_priv(sch); u32 c; + if (!q->tins) + return; + for (c = 0; c < CAKE_MAX_TINS; c++) cake_clear_tin(sch, c); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 99d318b60568..8c4fee063436 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -478,24 +478,26 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, if (opt) { err = fq_codel_change(sch, opt, extack); if (err) - return err; + goto init_failure; } err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) - return err; + goto init_failure; if (!q->flows) { q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_codel_flow), GFP_KERNEL); - if (!q->flows) - return -ENOMEM; - + if (!q->flows) { + err = -ENOMEM; + goto init_failure; + } q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); - if (!q->backlogs) - return -ENOMEM; - + if (!q->backlogs) { + err = -ENOMEM; + goto alloc_failure; + } for (i = 0; i < q->flows_cnt; i++) { struct fq_codel_flow *flow = q->flows + i; @@ -508,6 +510,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; + +alloc_failure: + kvfree(q->flows); + q->flows = NULL; +init_failure: + q->flows_cnt = 0; + return err; } static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 0366a1a029a9..1871a1c0224d 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -455,7 +455,8 @@ static void sfb_reset(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); - qdisc_reset(q->qdisc); + if (likely(q->qdisc)) + qdisc_reset(q->qdisc); q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index e6ee797640b4..c305d8dd23f8 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -896,7 +896,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) } memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1], SMC_MAX_PNETID_LEN); - if (smc_wr_alloc_lgr_mem(lgr)) + rc = smc_wr_alloc_lgr_mem(lgr); + if (rc) goto free_wq; smc_llc_lgr_init(lgr, smc); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index da69e1abf68f..e8630707901e 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -148,8 +148,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, { struct net *net = d->net; struct tipc_net *tn = tipc_net(net); - bool trial = time_before(jiffies, tn->addr_trial_end); u32 self = tipc_own_addr(net); + bool trial = time_before(jiffies, tn->addr_trial_end) && !self; if (mtyp == DSC_TRIAL_FAIL_MSG) { if (!trial) diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 5522865deae9..14fd05fd6107 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -568,7 +568,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, sub.seq.upper = upper; sub.timeout = TIPC_WAIT_FOREVER; sub.filter = filter; - *(u32 *)&sub.usr_handle = port; + *(u64 *)&sub.usr_handle = (u64)port; con = tipc_conn_alloc(tipc_topsrv(net)); if (IS_ERR(con)) diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 9b79e334dbd9..955ac3e0bf4d 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -273,7 +273,7 @@ static int tls_strp_read_copyin(struct tls_strparser *strp) return desc.error; } -static int tls_strp_read_short(struct tls_strparser *strp) +static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) { struct skb_shared_info *shinfo; struct page *page; @@ -283,7 +283,7 @@ static int tls_strp_read_short(struct tls_strparser *strp) * to read the data out. Otherwise the connection will stall. * Without pressure threshold of INT_MAX will never be ready. */ - if (likely(!tcp_epollin_ready(strp->sk, INT_MAX))) + if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX))) return 0; shinfo = skb_shinfo(strp->anchor); @@ -315,6 +315,27 @@ static int tls_strp_read_short(struct tls_strparser *strp) return 0; } +static bool tls_strp_check_no_dup(struct tls_strparser *strp) +{ + unsigned int len = strp->stm.offset + strp->stm.full_len; + struct sk_buff *skb; + u32 seq; + + skb = skb_shinfo(strp->anchor)->frag_list; + seq = TCP_SKB_CB(skb)->seq; + + while (skb->len < len) { + seq += skb->len; + len -= skb->len; + skb = skb->next; + + if (TCP_SKB_CB(skb)->seq != seq) + return false; + } + + return true; +} + static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) { struct tcp_sock *tp = tcp_sk(strp->sk); @@ -373,7 +394,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp) return tls_strp_read_copyin(strp); if (inq < strp->stm.full_len) - return tls_strp_read_short(strp); + return tls_strp_read_copy(strp, true); if (!strp->stm.full_len) { tls_strp_load_anchor_with_queue(strp, inq); @@ -387,9 +408,12 @@ static int tls_strp_read_sock(struct tls_strparser *strp) strp->stm.full_len = sz; if (!strp->stm.full_len || inq < strp->stm.full_len) - return tls_strp_read_short(strp); + return tls_strp_read_copy(strp, true); } + if (!tls_strp_check_no_dup(strp)) + return tls_strp_read_copy(strp, false); + strp->msg_ready = 1; tls_rx_msg_ready(strp); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index fe5fcf571c56..64a6a37dc36d 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2022,7 +2022,8 @@ static inline int convert_context_handle_invalid_context( * in `newc'. Verify that the context is valid * under the new policy. */ -static int convert_context(struct context *oldc, struct context *newc, void *p) +static int convert_context(struct context *oldc, struct context *newc, void *p, + gfp_t gfp_flags) { struct convert_context_args *args; struct ocontext *oc; @@ -2036,7 +2037,7 @@ static int convert_context(struct context *oldc, struct context *newc, void *p) args = p; if (oldc->str) { - s = kstrdup(oldc->str, GFP_KERNEL); + s = kstrdup(oldc->str, gfp_flags); if (!s) return -ENOMEM; diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index a54b8652bfb5..db5cce385bf8 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -325,7 +325,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, } rc = convert->func(context, &dst_convert->context, - convert->args); + convert->args, GFP_ATOMIC); if (rc) { context_destroy(&dst->context); goto out_unlock; @@ -404,7 +404,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst, while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { rc = convert->func(&esrc->ptr_leaf->entries[i].context, &edst->ptr_leaf->entries[i].context, - convert->args); + convert->args, GFP_KERNEL); if (rc) return rc; (*pos)++; diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h index 4eff0e49dcb2..9fce0d553fe2 100644 --- a/security/selinux/ss/sidtab.h +++ b/security/selinux/ss/sidtab.h @@ -65,7 +65,7 @@ struct sidtab_isid_entry { }; struct sidtab_convert_params { - int (*func)(struct context *oldc, struct context *newc, void *args); + int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags); void *args; struct sidtab *target; }; diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 2a6b0bc648c4..69c58362c0ed 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -70,6 +70,7 @@ TEST_PROGS += io_uring_zerocopy_tx.sh TEST_GEN_FILES += bind_bhash TEST_GEN_PROGS += sk_bind_sendto_listen TEST_GEN_PROGS += sk_connect_zero_addr +TEST_PROGS += test_ingress_egress_chaining.sh TEST_FILES := settings diff --git a/tools/testing/selftests/net/test_ingress_egress_chaining.sh b/tools/testing/selftests/net/test_ingress_egress_chaining.sh new file mode 100644 index 000000000000..08adff6bb3b6 --- /dev/null +++ b/tools/testing/selftests/net/test_ingress_egress_chaining.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test runs a simple ingress tc setup between two veth pairs, +# and chains a single egress rule to test ingress chaining to egress. +# +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit $ksft_skip +fi + +needed_mods="act_mirred cls_flower sch_ingress" +for mod in $needed_mods; do + modinfo $mod &>/dev/null || { echo "SKIP: Need act_mirred module"; exit $ksft_skip; } +done + +ns="ns$((RANDOM%899+100))" +veth1="veth1$((RANDOM%899+100))" +veth2="veth2$((RANDOM%899+100))" +peer1="peer1$((RANDOM%899+100))" +peer2="peer2$((RANDOM%899+100))" +ip_peer1=198.51.100.5 +ip_peer2=198.51.100.6 + +function fail() { + echo "FAIL: $@" >> /dev/stderr + exit 1 +} + +function cleanup() { + killall -q -9 udpgso_bench_rx + ip link del $veth1 &> /dev/null + ip link del $veth2 &> /dev/null + ip netns del $ns &> /dev/null +} +trap cleanup EXIT + +function config() { + echo "Setup veth pairs [$veth1, $peer1], and veth pair [$veth2, $peer2]" + ip link add $veth1 type veth peer name $peer1 + ip link add $veth2 type veth peer name $peer2 + ip addr add $ip_peer1/24 dev $peer1 + ip link set $peer1 up + ip netns add $ns + ip link set dev $peer2 netns $ns + ip netns exec $ns ip addr add $ip_peer2/24 dev $peer2 + ip netns exec $ns ip link set $peer2 up + ip link set $veth1 up + ip link set $veth2 up + + echo "Add tc filter ingress->egress forwarding $veth1 <-> $veth2" + tc qdisc add dev $veth2 ingress + tc qdisc add dev $veth1 ingress + tc filter add dev $veth2 ingress prio 1 proto all flower \ + action mirred egress redirect dev $veth1 + tc filter add dev $veth1 ingress prio 1 proto all flower \ + action mirred egress redirect dev $veth2 + + echo "Add tc filter egress->ingress forwarding $peer1 -> $veth1, bypassing the veth pipe" + tc qdisc add dev $peer1 clsact + tc filter add dev $peer1 egress prio 20 proto ip flower \ + action mirred ingress redirect dev $veth1 +} + +function test_run() { + echo "Run tcp traffic" + ./udpgso_bench_rx -t & + sleep 1 + ip netns exec $ns timeout -k 2 10 ./udpgso_bench_tx -t -l 2 -4 -D $ip_peer1 || fail "traffic failed" + echo "Test passed" +} + +config +test_run +trap - EXIT +cleanup diff --git a/tools/verification/dot2/dot2c.py b/tools/verification/dot2/dot2c.py index fa73353f7e56..be8a364a469b 100644 --- a/tools/verification/dot2/dot2c.py +++ b/tools/verification/dot2/dot2c.py @@ -111,7 +111,7 @@ class Dot2c(Automata): def format_aut_init_header(self): buff = [] - buff.append("struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def)) + buff.append("static struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def)) return buff def __get_string_vector_per_line_content(self, buff): |