diff options
466 files changed, 5866 insertions, 5187 deletions
@@ -104,6 +104,7 @@ Christoph Hellwig <hch@lst.de> Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com> Corey Minyard <minyard@acm.org> Damian Hobson-Garcia <dhobsong@igel.co.jp> +Dan Carpenter <error27@gmail.com> <dan.carpenter@oracle.com> Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com> Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net> Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch> @@ -353,7 +354,8 @@ Peter Oruba <peter@oruba.de> Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com> Praveen BP <praveenbp@ti.com> Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com> -Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com> +Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com> +Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com> Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com> Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com> Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl> diff --git a/Documentation/admin-guide/acpi/index.rst b/Documentation/admin-guide/acpi/index.rst index 71277689ad97..b078fdb8f4c9 100644 --- a/Documentation/admin-guide/acpi/index.rst +++ b/Documentation/admin-guide/acpi/index.rst @@ -9,7 +9,6 @@ the Linux ACPI support. :maxdepth: 1 initrd_table_override - dsdt-override ssdt-overlays cppc_sysfs fan_performance_states diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst index 1a6b91368e59..a65c1602cb23 100644 --- a/Documentation/admin-guide/device-mapper/verity.rst +++ b/Documentation/admin-guide/device-mapper/verity.rst @@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description> also gain new certificates at run time if they are signed by a certificate already in the secondary trusted keyring. +try_verify_in_tasklet + If verity hashes are in cache, verify data blocks in kernel tasklet instead + of workqueue. This option can reduce IO latency. + Theory of operation =================== diff --git a/Documentation/block/ublk.rst b/Documentation/block/ublk.rst index 2122d1a4a541..ba45c46cc0da 100644 --- a/Documentation/block/ublk.rst +++ b/Documentation/block/ublk.rst @@ -144,6 +144,42 @@ managing and controlling ublk devices with help of several control commands: For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's responsibility to save IO target specific info in userspace. +- ``UBLK_CMD_START_USER_RECOVERY`` + + This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This + command is accepted after the old process has exited, ublk device is quiesced + and ``/dev/ublkc*`` is released. User should send this command before he starts + a new process which re-opens ``/dev/ublkc*``. When this command returns, the + ublk device is ready for the new process. + +- ``UBLK_CMD_END_USER_RECOVERY`` + + This command is valid if ``UBLK_F_USER_RECOVERY`` feature is enabled. This + command is accepted after ublk device is quiesced and a new process has + opened ``/dev/ublkc*`` and get all ublk queues be ready. When this command + returns, ublk device is unquiesced and new I/O requests are passed to the + new process. + +- user recovery feature description + + Two new features are added for user recovery: ``UBLK_F_USER_RECOVERY`` and + ``UBLK_F_USER_RECOVERY_REISSUE``. + + With ``UBLK_F_USER_RECOVERY`` set, after one ubq_daemon(ublk server's io + handler) is dying, ublk does not delete ``/dev/ublkb*`` during the whole + recovery stage and ublk device ID is kept. It is ublk server's + responsibility to recover the device context by its own knowledge. + Requests which have not been issued to userspace are requeued. Requests + which have been issued to userspace are aborted. + + With ``UBLK_F_USER_RECOVERY_REISSUE`` set, after one ubq_daemon(ublk + server's io handler) is dying, contrary to ``UBLK_F_USER_RECOVERY``, + requests which have been issued to userspace are requeued and will be + re-issued to the new process after handling ``UBLK_CMD_END_USER_RECOVERY``. + ``UBLK_F_USER_RECOVERY_REISSUE`` is designed for backends who tolerate + double-write since the driver may issue the same I/O request twice. It + might be useful to a read-only FS or a VM backend. + Data plane ---------- diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt deleted file mode 100644 index b88dcdd41def..000000000000 --- a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt +++ /dev/null @@ -1,9 +0,0 @@ -Dongwoon Anatech DW9714 camera voice coil lens driver - -DW9174 is a 10-bit DAC with current sink capability. It is intended -for driving voice coil lenses in camera modules. - -Mandatory properties: - -- compatible: "dongwoon,dw9714" -- reg: I²C slave address diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml new file mode 100644 index 000000000000..66229a3dc05d --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/media/i2c/dongwoon,dw9714.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Dongwoon Anatech DW9714 camera voice coil lens driver + +maintainers: + - Krzysztof Kozlowski <krzk@kernel.org> + +description: + DW9174 is a 10-bit DAC with current sink capability. It is intended for + driving voice coil lenses in camera modules. + +properties: + compatible: + const: dongwoon,dw9714 + + reg: + maxItems: 1 + + powerdown-gpios: + description: + XSD pin for shutdown (active low) + + vcc-supply: + description: VDD power supply + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + camera-lens@c { + compatible = "dongwoon,dw9714"; + reg = <0x0c>; + vcc-supply = <®_csi_1v8>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml index 64995cbb0f97..41c9760227cd 100644 --- a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml +++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml @@ -8,7 +8,6 @@ title: Samsung S3FWRN5 NCI NFC Controller maintainers: - Krzysztof Kozlowski <krzk@kernel.org> - - Krzysztof Opasiak <k.opasiak@samsung.com> properties: compatible: diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml index 58022ae7d5dd..dfdb8dfb6b65 100644 --- a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml +++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml @@ -81,6 +81,9 @@ properties: power-supply: true + power-domains: + maxItems: 1 + resets: description: | A number of phandles to resets that need to be asserted during diff --git a/Documentation/driver-api/media/mc-core.rst b/Documentation/driver-api/media/mc-core.rst index 84aa7cdb5341..400b8ca29367 100644 --- a/Documentation/driver-api/media/mc-core.rst +++ b/Documentation/driver-api/media/mc-core.rst @@ -214,18 +214,29 @@ Link properties can be modified at runtime by calling Pipelines and media streams ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +A media stream is a stream of pixels or metadata originating from one or more +source devices (such as a sensors) and flowing through media entity pads +towards the final sinks. The stream can be modified on the route by the +devices (e.g. scaling or pixel format conversions), or it can be split into +multiple branches, or multiple branches can be merged. + +A media pipeline is a set of media streams which are interdependent. This +interdependency can be caused by the hardware (e.g. configuration of a second +stream cannot be changed if the first stream has been enabled) or by the driver +due to the software design. Most commonly a media pipeline consists of a single +stream which does not branch. + When starting streaming, drivers must notify all entities in the pipeline to prevent link states from being modified during streaming by calling :c:func:`media_pipeline_start()`. -The function will mark all entities connected to the given entity through -enabled links, either directly or indirectly, as streaming. +The function will mark all the pads which are part of the pipeline as streaming. The struct media_pipeline instance pointed to by -the pipe argument will be stored in every entity in the pipeline. +the pipe argument will be stored in every pad in the pipeline. Drivers should embed the struct media_pipeline in higher-level pipeline structures and can then access the -pipeline through the struct media_entity +pipeline through the struct media_pad pipe field. Calls to :c:func:`media_pipeline_start()` can be nested. diff --git a/Documentation/hwmon/corsair-psu.rst b/Documentation/hwmon/corsair-psu.rst index 3c1b164eb3c0..6a03edb551a8 100644 --- a/Documentation/hwmon/corsair-psu.rst +++ b/Documentation/hwmon/corsair-psu.rst @@ -19,6 +19,8 @@ Supported devices: Corsair HX1200i + Corsair HX1500i + Corsair RM550i Corsair RM650i diff --git a/Documentation/userspace-api/media/cec.h.rst.exceptions b/Documentation/userspace-api/media/cec.h.rst.exceptions index 13de01d9555e..15fa1752d4ef 100644 --- a/Documentation/userspace-api/media/cec.h.rst.exceptions +++ b/Documentation/userspace-api/media/cec.h.rst.exceptions @@ -239,6 +239,7 @@ ignore define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL ignore define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE ignore define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX ignore define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX +ignore define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL ignore define CEC_MSG_GIVE_FEATURES @@ -487,6 +488,7 @@ ignore define CEC_OP_SYS_AUD_STATUS_ON ignore define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST ignore define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS +ignore define CEC_MSG_SET_AUDIO_VOLUME_LEVEL ignore define CEC_OP_AUD_FMT_ID_CEA861 ignore define CEC_OP_AUD_FMT_ID_CEA861_CXT diff --git a/Documentation/userspace-api/media/v4l/libv4l-introduction.rst b/Documentation/userspace-api/media/v4l/libv4l-introduction.rst index 90215313b965..7c8bf160e1c6 100644 --- a/Documentation/userspace-api/media/v4l/libv4l-introduction.rst +++ b/Documentation/userspace-api/media/v4l/libv4l-introduction.rst @@ -136,9 +136,9 @@ V4L2 functions operates like the :c:func:`read()` function. -.. c:function:: void v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset); +.. c:function:: void *v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset); - operates like the :c:func:`munmap()` function. + operates like the :c:func:`mmap()` function. .. c:function:: int v4l2_munmap(void *_start, size_t length); diff --git a/MAINTAINERS b/MAINTAINERS index a119500e1946..7f1b6a02f0e3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3984,7 +3984,7 @@ M: RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/broadcom/stblinux.git +T: git https://github.com/broadcom/stblinux.git F: Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml F: arch/arm64/boot/dts/broadcom/bcmbca/* N: bcmbca @@ -4009,7 +4009,7 @@ R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/broadcom/stblinux.git +T: git https://github.com/broadcom/stblinux.git F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml F: drivers/pci/controller/pcie-brcmstb.c F: drivers/staging/vc04_services @@ -4023,7 +4023,7 @@ M: Ray Jui <rjui@broadcom.com> M: Scott Branden <sbranden@broadcom.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> S: Maintained -T: git git://github.com/broadcom/mach-bcm +T: git https://github.com/broadcom/mach-bcm F: arch/arm/mach-bcm/ N: bcm281* N: bcm113* @@ -4088,7 +4088,7 @@ M: Florian Fainelli <f.fainelli@gmail.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/broadcom/stblinux.git +T: git https://github.com/broadcom/stblinux.git F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml F: arch/arm/boot/dts/bcm7*.dts* F: arch/arm/include/asm/hardware/cache-b15-rac.h @@ -4120,7 +4120,7 @@ M: Florian Fainelli <f.fainelli@gmail.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-mips@vger.kernel.org S: Maintained -T: git git://github.com/broadcom/stblinux.git +T: git https://github.com/broadcom/stblinux.git F: arch/mips/bmips/* F: arch/mips/boot/dts/brcm/bcm*.dts* F: arch/mips/include/asm/mach-bmips/* @@ -4259,7 +4259,7 @@ M: Scott Branden <sbranden@broadcom.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://github.com/broadcom/stblinux.git +T: git https://github.com/broadcom/stblinux.git F: arch/arm64/boot/dts/broadcom/northstar2/* F: arch/arm64/boot/dts/broadcom/stingray/* F: drivers/clk/bcm/clk-ns* @@ -4329,7 +4329,7 @@ M: Florian Fainelli <f.fainelli@gmail.com> R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> L: linux-pm@vger.kernel.org S: Maintained -T: git git://github.com/broadcom/stblinux.git +T: git https://github.com/broadcom/stblinux.git F: drivers/soc/bcm/bcm63xx/bcm-pmb.c F: include/dt-bindings/soc/bcm-pmb.h @@ -4459,13 +4459,15 @@ M: Josef Bacik <josef@toxicpanda.com> M: David Sterba <dsterba@suse.com> L: linux-btrfs@vger.kernel.org S: Maintained -W: http://btrfs.wiki.kernel.org/ -Q: http://patchwork.kernel.org/project/linux-btrfs/list/ +W: https://btrfs.readthedocs.io +W: https://btrfs.wiki.kernel.org/ +Q: https://patchwork.kernel.org/project/linux-btrfs/list/ C: irc://irc.libera.chat/btrfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git F: Documentation/filesystems/btrfs.rst F: fs/btrfs/ F: include/linux/btrfs* +F: include/trace/events/btrfs.h F: include/uapi/linux/btrfs* BTTV VIDEO4LINUX DRIVER @@ -5266,6 +5268,7 @@ F: tools/testing/selftests/cgroup/ CONTROL GROUP - BLOCK IO CONTROLLER (BLKIO) M: Tejun Heo <tj@kernel.org> +M: Josef Bacik <josef@toxicpanda.com> M: Jens Axboe <axboe@kernel.dk> L: cgroups@vger.kernel.org L: linux-block@vger.kernel.org @@ -5273,6 +5276,7 @@ T: git git://git.kernel.dk/linux-block F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst F: block/bfq-cgroup.c F: block/blk-cgroup.c +F: block/blk-iocost.c F: block/blk-iolatency.c F: block/blk-throttle.c F: include/linux/blk-cgroup.h @@ -6280,7 +6284,7 @@ M: Sakari Ailus <sakari.ailus@linux.intel.com> L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git -F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt +F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml F: drivers/media/i2c/dw9714.c DONGWOON DW9768 LENS VOICE COIL DRIVER @@ -9210,7 +9214,7 @@ W: https://www.hisilicon.com F: drivers/i2c/busses/i2c-hisi.c HISILICON LPC BUS DRIVER -M: john.garry@huawei.com +M: Jay Fang <f.fangjian@huawei.com> S: Maintained W: http://www.hisilicon.com F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml @@ -13618,6 +13622,12 @@ S: Supported F: drivers/misc/atmel-ssc.c F: include/linux/atmel-ssc.h +MICROCHIP SOC DRIVERS +M: Conor Dooley <conor@kernel.org> +S: Supported +T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/ +F: drivers/soc/microchip/ + MICROCHIP USB251XB DRIVER M: Richard Leitner <richard.leitner@skidata.com> L: linux-usb@vger.kernel.org @@ -14709,6 +14719,12 @@ F: drivers/nvme/target/auth.c F: drivers/nvme/target/fabrics-cmd-auth.c F: include/linux/nvme-auth.h +NVM EXPRESS HARDWARE MONITORING SUPPORT +M: Guenter Roeck <linux@roeck-us.net> +L: linux-nvme@lists.infradead.org +S: Supported +F: drivers/nvme/host/hwmon.c + NVM EXPRESS FC TRANSPORT DRIVERS M: James Smart <james.smart@broadcom.com> L: linux-nvme@lists.infradead.org @@ -15839,7 +15855,7 @@ F: Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml F: drivers/pci/controller/dwc/*designware* PCI DRIVER FOR TI DRA7XX/J721E -M: Kishon Vijay Abraham I <kishon@ti.com> +M: Vignesh Raghavendra <vigneshr@ti.com> L: linux-omap@vger.kernel.org L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -15856,10 +15872,10 @@ F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt F: drivers/pci/controller/pci-v3-semi.c PCI ENDPOINT SUBSYSTEM -M: Kishon Vijay Abraham I <kishon@ti.com> M: Lorenzo Pieralisi <lpieralisi@kernel.org> R: Krzysztof WilczyÅ„ski <kw@linux.com> R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> +R: Kishon Vijay Abraham I <kishon@kernel.org> L: linux-pci@vger.kernel.org S: Supported Q: https://patchwork.kernel.org/project/linux-pci/list/ @@ -17709,7 +17725,7 @@ F: arch/riscv/ N: riscv K: riscv -RISC-V/MICROCHIP POLARFIRE SOC SUPPORT +RISC-V MICROCHIP FPGA SUPPORT M: Conor Dooley <conor.dooley@microchip.com> M: Daire McNamara <daire.mcnamara@microchip.com> L: linux-riscv@lists.infradead.org @@ -17727,17 +17743,26 @@ F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml F: arch/riscv/boot/dts/microchip/ F: drivers/char/hw_random/mpfs-rng.c F: drivers/clk/microchip/clk-mpfs.c -F: drivers/i2c/busses/i2c-microchip-core.c +F: drivers/i2c/busses/i2c-microchip-corei2c.c F: drivers/mailbox/mailbox-mpfs.c F: drivers/pci/controller/pcie-microchip-host.c F: drivers/reset/reset-mpfs.c F: drivers/rtc/rtc-mpfs.c -F: drivers/soc/microchip/ +F: drivers/soc/microchip/mpfs-sys-controller.c F: drivers/spi/spi-microchip-core-qspi.c F: drivers/spi/spi-microchip-core.c F: drivers/usb/musb/mpfs.c F: include/soc/microchip/mpfs.h +RISC-V MISC SOC SUPPORT +M: Conor Dooley <conor@kernel.org> +L: linux-riscv@lists.infradead.org +S: Maintained +Q: https://patchwork.kernel.org/project/linux-riscv/list/ +T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/ +F: Documentation/devicetree/bindings/riscv/ +F: arch/riscv/boot/dts/ + RNBD BLOCK DRIVERS M: Md. Haris Iqbal <haris.iqbal@ionos.com> M: Jack Wang <jinpu.wang@ionos.com> @@ -18131,7 +18156,6 @@ L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git F: drivers/staging/media/deprecated/saa7146/ -F: include/media/drv-intf/saa7146* SAFESETID SECURITY MODULE M: Micah Morton <mortonm@chromium.org> @@ -18211,7 +18235,6 @@ F: include/media/drv-intf/s3c_camif.h SAMSUNG S3FWRN5 NFC DRIVER M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> -M: Krzysztof Opasiak <k.opasiak@samsung.com> L: linux-nfc@lists.01.org (subscribers-only) S: Maintained F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml @@ -18777,7 +18800,6 @@ M: Palmer Dabbelt <palmer@dabbelt.com> M: Paul Walmsley <paul.walmsley@sifive.com> L: linux-riscv@lists.infradead.org S: Supported -T: git git://github.com/sifive/riscv-linux.git N: sifive K: [^@]sifive @@ -18796,6 +18818,13 @@ S: Maintained F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml F: drivers/dma/sf-pdma/ +SIFIVE SOC DRIVERS +M: Conor Dooley <conor@kernel.org> +L: linux-riscv@lists.infradead.org +S: Maintained +T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/ +F: drivers/soc/sifive/ + SILEAD TOUCHSCREEN DRIVER M: Hans de Goede <hdegoede@redhat.com> L: linux-input@vger.kernel.org @@ -19597,6 +19626,11 @@ M: Ion Badulescu <ionut@badula.org> S: Odd Fixes F: drivers/net/ethernet/adaptec/starfire* +STARFIVE DEVICETREES +M: Emil Renner Berthing <kernel@esmil.dk> +S: Maintained +F: arch/riscv/boot/dts/starfive/ + STARFIVE JH7100 CLOCK DRIVERS M: Emil Renner Berthing <kernel@esmil.dk> S: Maintained @@ -21293,7 +21327,7 @@ L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org S: Maintained W: https://github.com/petkan/pegasus -T: git git://github.com/petkan/pegasus.git +T: git https://github.com/petkan/pegasus.git F: drivers/net/usb/pegasus.* USB PHY LAYER @@ -21330,7 +21364,7 @@ L: linux-usb@vger.kernel.org L: netdev@vger.kernel.org S: Maintained W: https://github.com/petkan/rtl8150 -T: git git://github.com/petkan/rtl8150.git +T: git https://github.com/petkan/rtl8150.git F: drivers/net/usb/rtl8150.c USB SERIAL SUBSYSTEM @@ -22121,6 +22155,7 @@ F: Documentation/watchdog/ F: drivers/watchdog/ F: include/linux/watchdog.h F: include/uapi/linux/watchdog.h +F: include/trace/events/watchdog.h WHISKEYCOVE PMIC GPIO DRIVER M: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com> @@ -22761,7 +22796,7 @@ S: Maintained W: http://mjpeg.sourceforge.net/driver-zoran/ Q: https://patchwork.linuxtv.org/project/linux-media/list/ F: Documentation/driver-api/media/drivers/zoran.rst -F: drivers/staging/media/zoran/ +F: drivers/media/pci/zoran/ ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER M: Minchan Kim <minchan@kernel.org> @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/imx6q-yapp4-crux.dts b/arch/arm/boot/dts/imx6q-yapp4-crux.dts index 15f4824a5142..bddf3822ebf7 100644 --- a/arch/arm/boot/dts/imx6q-yapp4-crux.dts +++ b/arch/arm/boot/dts/imx6q-yapp4-crux.dts @@ -33,6 +33,10 @@ status = "okay"; }; +®_pu { + regulator-always-on; +}; + ®_usb_h1_vbus { status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi index 68e5ab2e27e2..6bb4855d13ce 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi @@ -29,7 +29,7 @@ user-pb { label = "user_pb"; - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; linux,code = <BTN_0>; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi index 8e23cec7149e..696427b487f0 100644 --- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi @@ -26,7 +26,7 @@ user-pb { label = "user_pb"; - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; linux,code = <BTN_0>; }; diff --git a/arch/arm/boot/dts/imx6qp-yapp4-crux-plus.dts b/arch/arm/boot/dts/imx6qp-yapp4-crux-plus.dts index cea165f2161a..afaf4a6759d4 100644 --- a/arch/arm/boot/dts/imx6qp-yapp4-crux-plus.dts +++ b/arch/arm/boot/dts/imx6qp-yapp4-crux-plus.dts @@ -33,6 +33,10 @@ status = "okay"; }; +®_pu { + regulator-always-on; +}; + ®_usb_h1_vbus { status = "okay"; }; diff --git a/arch/arm/boot/dts/lan966x-pcb8291.dts b/arch/arm/boot/dts/lan966x-pcb8291.dts index f4f054cdf2a8..3a3d76af8612 100644 --- a/arch/arm/boot/dts/lan966x-pcb8291.dts +++ b/arch/arm/boot/dts/lan966x-pcb8291.dts @@ -69,6 +69,12 @@ pins = "GPIO_35", "GPIO_36"; function = "can0_b"; }; + + sgpio_a_pins: sgpio-a-pins { + /* SCK, D0, D1, LD */ + pins = "GPIO_32", "GPIO_33", "GPIO_34", "GPIO_35"; + function = "sgpio_a"; + }; }; &can0 { @@ -118,6 +124,20 @@ status = "okay"; }; +&sgpio { + pinctrl-0 = <&sgpio_a_pins>; + pinctrl-names = "default"; + microchip,sgpio-port-ranges = <0 3>, <8 11>; + status = "okay"; + + gpio@0 { + ngpios = <64>; + }; + gpio@1 { + ngpios = <64>; + }; +}; + &switch { status = "okay"; }; diff --git a/arch/arm/boot/dts/sama7g5-pinfunc.h b/arch/arm/boot/dts/sama7g5-pinfunc.h index 4eb30445d205..6e87f0d4b8fc 100644 --- a/arch/arm/boot/dts/sama7g5-pinfunc.h +++ b/arch/arm/boot/dts/sama7g5-pinfunc.h @@ -261,7 +261,7 @@ #define PIN_PB2__FLEXCOM6_IO0 PINMUX_PIN(PIN_PB2, 2, 1) #define PIN_PB2__ADTRG PINMUX_PIN(PIN_PB2, 3, 1) #define PIN_PB2__A20 PINMUX_PIN(PIN_PB2, 4, 1) -#define PIN_PB2__FLEXCOM11_IO0 PINMUX_PIN(PIN_PB2, 6, 3) +#define PIN_PB2__FLEXCOM11_IO1 PINMUX_PIN(PIN_PB2, 6, 3) #define PIN_PB3 35 #define PIN_PB3__GPIO PINMUX_PIN(PIN_PB3, 0, 0) #define PIN_PB3__RF1 PINMUX_PIN(PIN_PB3, 1, 1) diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index fbaa0ce46427..8f1bb78fc1e4 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -24,6 +24,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 1c9094f24893..e2f0cdacba7d 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -28,6 +28,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-codina-tmo.dts b/arch/arm/boot/dts/ste-ux500-samsung-codina-tmo.dts index d6940e0afa86..27a3ab7e25e1 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-codina-tmo.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-codina-tmo.dts @@ -44,6 +44,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-codina.dts b/arch/arm/boot/dts/ste-ux500-samsung-codina.dts index 5f41256d7f4b..b88f0c07873d 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-codina.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-codina.dts @@ -57,6 +57,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts b/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts index 806da3fc33cd..7231bc745200 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-gavini.dts @@ -30,6 +30,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts index b0dce91aff4b..9604695edf53 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts @@ -35,6 +35,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts index ed5c79c3d04b..69387e8754a9 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts @@ -30,6 +30,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-kyle.dts b/arch/arm/boot/dts/ste-ux500-samsung-kyle.dts index c57676faf181..167846df3104 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-kyle.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-kyle.dts @@ -34,6 +34,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts index 81b341a5ae45..93e5f5ed888d 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts @@ -30,6 +30,14 @@ polling-delay = <0>; polling-delay-passive = <0>; thermal-sensors = <&bat_therm>; + + trips { + battery-crit-hi { + temperature = <70000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; }; diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index ffed4d949042..e4904faf1753 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -169,10 +169,15 @@ sr_ena_2: cmp tmp1, #UDDRC_STAT_SELFREF_TYPE_SW bne sr_ena_2 - /* Put DDR PHY's DLL in bypass mode for non-backup modes. */ + /* Disable DX DLLs for non-backup modes. */ cmp r7, #AT91_PM_BACKUP beq sr_ena_3 + /* Do not soft reset the AC DLL. */ + ldr tmp1, [r3, DDR3PHY_ACDLLCR] + bic tmp1, tmp1, DDR3PHY_ACDLLCR_DLLSRST + str tmp1, [r3, DDR3PHY_ACDLLCR] + /* Disable DX DLLs. */ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 2f27619d8abd..8b4d280b1e7e 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -751,12 +751,26 @@ polling-delay = <1000>; polling-delay-passive = <100>; thermal-sensors = <&scpi_sensors0 0>; + trips { + pmic_crit0: trip0 { + temperature = <90000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; soc { polling-delay = <1000>; polling-delay-passive = <100>; thermal-sensors = <&scpi_sensors0 3>; + trips { + soc_crit0: trip0 { + temperature = <80000>; + hysteresis = <2000>; + type = "critical"; + }; + }; }; big_cluster_thermal_zone: big-cluster { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi index 421d879013d7..260d045dbd9a 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi @@ -779,6 +779,9 @@ little-endian; #address-cells = <1>; #size-cells = <0>; + clock-frequency = <2500000>; + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL + QORIQ_CLK_PLL_DIV(1)>; status = "disabled"; }; @@ -788,6 +791,9 @@ little-endian; #address-cells = <1>; #size-cells = <0>; + clock-frequency = <2500000>; + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL + QORIQ_CLK_PLL_DIV(1)>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index f1b9cc8714dc..348d9e3a9125 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -532,6 +532,9 @@ little-endian; #address-cells = <1>; #size-cells = <0>; + clock-frequency = <2500000>; + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL + QORIQ_CLK_PLL_DIV(2)>; status = "disabled"; }; @@ -541,6 +544,9 @@ little-endian; #address-cells = <1>; #size-cells = <0>; + clock-frequency = <2500000>; + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL + QORIQ_CLK_PLL_DIV(2)>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi index 6680fb2a6dc9..8c76d86cb756 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi @@ -1385,6 +1385,9 @@ #address-cells = <1>; #size-cells = <0>; little-endian; + clock-frequency = <2500000>; + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL + QORIQ_CLK_PLL_DIV(2)>; status = "disabled"; }; @@ -1395,6 +1398,9 @@ little-endian; #address-cells = <1>; #size-cells = <0>; + clock-frequency = <2500000>; + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL + QORIQ_CLK_PLL_DIV(2)>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi index 82a1c4488378..10370d1a6c6d 100644 --- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi @@ -38,9 +38,9 @@ conn_subsys: bus@5b000000 { interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>; reg = <0x5b010000 0x10000>; clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>, - <&sdhc0_lpcg IMX_LPCG_CLK_5>, - <&sdhc0_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "per", "ahb"; + <&sdhc0_lpcg IMX_LPCG_CLK_0>, + <&sdhc0_lpcg IMX_LPCG_CLK_5>; + clock-names = "ipg", "ahb", "per"; power-domains = <&pd IMX_SC_R_SDHC_0>; status = "disabled"; }; @@ -49,9 +49,9 @@ conn_subsys: bus@5b000000 { interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>; reg = <0x5b020000 0x10000>; clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>, - <&sdhc1_lpcg IMX_LPCG_CLK_5>, - <&sdhc1_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "per", "ahb"; + <&sdhc1_lpcg IMX_LPCG_CLK_0>, + <&sdhc1_lpcg IMX_LPCG_CLK_5>; + clock-names = "ipg", "ahb", "per"; power-domains = <&pd IMX_SC_R_SDHC_1>; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; @@ -62,9 +62,9 @@ conn_subsys: bus@5b000000 { interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; reg = <0x5b030000 0x10000>; clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>, - <&sdhc2_lpcg IMX_LPCG_CLK_5>, - <&sdhc2_lpcg IMX_LPCG_CLK_0>; - clock-names = "ipg", "per", "ahb"; + <&sdhc2_lpcg IMX_LPCG_CLK_0>, + <&sdhc2_lpcg IMX_LPCG_CLK_5>; + clock-names = "ipg", "ahb", "per"; power-domains = <&pd IMX_SC_R_SDHC_2>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts index 32f6f2f50c10..43e89859c044 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts @@ -250,21 +250,21 @@ /* SODIMM 96 */ MX8MM_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x1c4 /* CPLD_D[7] */ - MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x1c4 + MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x184 /* CPLD_D[6] */ - MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x1c4 + MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x184 /* CPLD_D[5] */ - MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x1c4 + MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x184 /* CPLD_D[4] */ - MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x1c4 + MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x184 /* CPLD_D[3] */ - MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x1c4 + MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x184 /* CPLD_D[2] */ - MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x1c4 + MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x184 /* CPLD_D[1] */ - MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x1c4 + MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x184 /* CPLD_D[0] */ - MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x1c4 + MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x184 /* KBD_intK */ MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x1c4 /* DISP_reset */ diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index afb90f59c83c..dabd94dc30c4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -276,6 +276,7 @@ assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_100M>; clock-names = "main_clk"; + power-domains = <&pgc_otg1>; }; usbphynop2: usbphynop2 { @@ -285,6 +286,7 @@ assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_100M>; clock-names = "main_clk"; + power-domains = <&pgc_otg2>; }; soc: soc@0 { @@ -674,13 +676,11 @@ pgc_otg1: power-domain@2 { #power-domain-cells = <0>; reg = <IMX8MM_POWER_DOMAIN_OTG1>; - power-domains = <&pgc_hsiomix>; }; pgc_otg2: power-domain@3 { #power-domain-cells = <0>; reg = <IMX8MM_POWER_DOMAIN_OTG2>; - power-domains = <&pgc_hsiomix>; }; pgc_gpumix: power-domain@4 { @@ -1186,7 +1186,7 @@ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>; phys = <&usbphynop1>; fsl,usbmisc = <&usbmisc1 0>; - power-domains = <&pgc_otg1>; + power-domains = <&pgc_hsiomix>; status = "disabled"; }; @@ -1206,7 +1206,7 @@ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>; phys = <&usbphynop2>; fsl,usbmisc = <&usbmisc2 0>; - power-domains = <&pgc_otg2>; + power-domains = <&pgc_hsiomix>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi index cb2836bfbd95..ad0b99adf691 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@ -662,7 +662,6 @@ pgc_otg1: power-domain@1 { #power-domain-cells = <0>; reg = <IMX8MN_POWER_DOMAIN_OTG1>; - power-domains = <&pgc_hsiomix>; }; pgc_gpumix: power-domain@2 { @@ -1076,7 +1075,7 @@ assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>; phys = <&usbphynop1>; fsl,usbmisc = <&usbmisc1 0>; - power-domains = <&pgc_otg1>; + power-domains = <&pgc_hsiomix>; status = "disabled"; }; @@ -1175,5 +1174,6 @@ assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>; clock-names = "main_clk"; + power-domains = <&pgc_otg1>; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi index 7b712d1888ea..5dcd1de586b5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi @@ -354,16 +354,6 @@ "SODIMM_82", "SODIMM_70", "SODIMM_72"; - - ctrl-sleep-moci-hog { - gpio-hog; - /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ - gpios = <29 GPIO_ACTIVE_HIGH>; - line-name = "CTRL_SLEEP_MOCI#"; - output-high; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ctrl_sleep_moci>; - }; }; &gpio3 { @@ -432,6 +422,16 @@ "SODIMM_256", "SODIMM_48", "SODIMM_44"; + + ctrl-sleep-moci-hog { + gpio-hog; + /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ + gpios = <29 GPIO_ACTIVE_HIGH>; + line-name = "CTRL_SLEEP_MOCI#"; + output-high; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ctrl_sleep_moci>; + }; }; /* On-module I2C */ diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi index 3a5713bb4880..0247866fc86b 100644 --- a/arch/arm64/boot/dts/freescale/imx93.dtsi +++ b/arch/arm64/boot/dts/freescale/imx93.dtsi @@ -451,7 +451,7 @@ clocks = <&clk IMX93_CLK_GPIO2_GATE>, <&clk IMX93_CLK_GPIO2_GATE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc 0 32 32>; + gpio-ranges = <&iomuxc 0 4 30>; }; gpio3: gpio@43820080 { @@ -465,7 +465,8 @@ clocks = <&clk IMX93_CLK_GPIO3_GATE>, <&clk IMX93_CLK_GPIO3_GATE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc 0 64 32>; + gpio-ranges = <&iomuxc 0 84 8>, <&iomuxc 8 66 18>, + <&iomuxc 26 34 2>, <&iomuxc 28 0 4>; }; gpio4: gpio@43830080 { @@ -479,7 +480,7 @@ clocks = <&clk IMX93_CLK_GPIO4_GATE>, <&clk IMX93_CLK_GPIO4_GATE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc 0 96 32>; + gpio-ranges = <&iomuxc 0 38 28>, <&iomuxc 28 36 2>; }; gpio1: gpio@47400080 { @@ -493,7 +494,7 @@ clocks = <&clk IMX93_CLK_GPIO1_GATE>, <&clk IMX93_CLK_GPIO1_GATE>; clock-names = "gpio", "port"; - gpio-ranges = <&iomuxc 0 0 32>; + gpio-ranges = <&iomuxc 0 92 16>; }; s4muap: mailbox@47520000 { @@ -501,7 +502,7 @@ reg = <0x47520000 0x10000>; interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "txirq", "rxirq"; + interrupt-names = "tx", "rx"; #mbox-cells = <2>; }; diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index a47acf9bdf24..a721cdd80489 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -668,7 +668,7 @@ apcs_glb: mailbox@b111000 { compatible = "qcom,ipq8074-apcs-apps-global"; - reg = <0x0b111000 0x6000>; + reg = <0x0b111000 0x1000>; #clock-cells = <1>; #mbox-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index c0a2baffa49d..aba717644391 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -3504,7 +3504,7 @@ }; saw3: syscon@9a10000 { - compatible = "qcom,tcsr-msm8996", "syscon"; + compatible = "syscon"; reg = <0x09a10000 0x1000>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts index 87ab0e1ecd16..4dee790f1049 100644 --- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts @@ -43,7 +43,6 @@ regulator-always-on; regulator-boot-on; - regulator-allow-set-load; vin-supply = <&vreg_3p3>; }; @@ -137,6 +136,9 @@ regulator-max-microvolt = <880000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l7a_1p8: ldo7 { @@ -152,6 +154,9 @@ regulator-max-microvolt = <2960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l11a_0p8: ldo11 { @@ -258,6 +263,9 @@ regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l7c_1p8: ldo7 { @@ -273,6 +281,9 @@ regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l10c_3p3: ldo10 { diff --git a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts index b608b82dff03..2c62ba6a49c5 100644 --- a/arch/arm64/boot/dts/qcom/sa8295p-adp.dts +++ b/arch/arm64/boot/dts/qcom/sa8295p-adp.dts @@ -83,6 +83,9 @@ regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l4c: ldo4 { @@ -98,6 +101,9 @@ regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l7c: ldo7 { @@ -113,6 +119,9 @@ regulator-max-microvolt = <2504000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l17c: ldo17 { @@ -121,6 +130,9 @@ regulator-max-microvolt = <2504000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index 212580316d3e..4cdc88d33944 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -2296,7 +2296,8 @@ lpass_audiocc: clock-controller@3300000 { compatible = "qcom,sc7280-lpassaudiocc"; - reg = <0 0x03300000 0 0x30000>; + reg = <0 0x03300000 0 0x30000>, + <0 0x032a9000 0 0x1000>; clocks = <&rpmhcc RPMH_CXO_CLK>, <&lpass_aon LPASS_AON_CC_MAIN_RCG_CLK_SRC>; clock-names = "bi_tcxo", "lpass_aon_cc_main_rcg_clk_src"; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts index fea7d8273ccd..5e30349efd20 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts @@ -124,6 +124,9 @@ regulator-max-microvolt = <2504000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l13c: ldo13 { @@ -146,6 +149,9 @@ regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l4d: ldo4 { diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index c32bcded2aef..212d63d5cbf2 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -885,13 +885,13 @@ ufs_mem_phy: phy@1d87000 { compatible = "qcom,sc8280xp-qmp-ufs-phy"; - reg = <0 0x01d87000 0 0xe10>; + reg = <0 0x01d87000 0 0x1c8>; #address-cells = <2>; #size-cells = <2>; ranges; clock-names = "ref", "ref_aux"; - clocks = <&rpmhcc RPMH_CXO_CLK>, + clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, <&gcc GCC_UFS_PHY_PHY_AUX_CLK>; resets = <&ufs_mem_hc 0>; @@ -953,13 +953,13 @@ ufs_card_phy: phy@1da7000 { compatible = "qcom,sc8280xp-qmp-ufs-phy"; - reg = <0 0x01da7000 0 0xe10>; + reg = <0 0x01da7000 0 0x1c8>; #address-cells = <2>; #size-cells = <2>; ranges; clock-names = "ref", "ref_aux"; - clocks = <&gcc GCC_UFS_1_CARD_CLKREF_CLK>, + clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, <&gcc GCC_UFS_CARD_PHY_AUX_CLK>; resets = <&ufs_card_hc 0>; @@ -1181,26 +1181,16 @@ usb_0_ssphy: usb3-phy@88eb400 { reg = <0 0x088eb400 0 0x100>, <0 0x088eb600 0 0x3ec>, - <0 0x088ec400 0 0x1f0>, + <0 0x088ec400 0 0x364>, <0 0x088eba00 0 0x100>, <0 0x088ebc00 0 0x3ec>, - <0 0x088ec700 0 0x64>; + <0 0x088ec200 0 0x18>; #phy-cells = <0>; #clock-cells = <0>; clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; clock-names = "pipe0"; clock-output-names = "usb0_phy_pipe_clk_src"; }; - - usb_0_dpphy: dp-phy@88ed200 { - reg = <0 0x088ed200 0 0x200>, - <0 0x088ed400 0 0x200>, - <0 0x088eda00 0 0x200>, - <0 0x088ea600 0 0x200>, - <0 0x088ea800 0 0x200>; - #clock-cells = <1>; - #phy-cells = <0>; - }; }; usb_1_hsphy: phy@8902000 { @@ -1242,8 +1232,8 @@ usb_1_ssphy: usb3-phy@8903400 { reg = <0 0x08903400 0 0x100>, - <0 0x08903c00 0 0x3ec>, - <0 0x08904400 0 0x1f0>, + <0 0x08903600 0 0x3ec>, + <0 0x08904400 0 0x364>, <0 0x08903a00 0 0x100>, <0 0x08903c00 0 0x3ec>, <0 0x08904200 0 0x18>; @@ -1253,16 +1243,6 @@ clock-names = "pipe0"; clock-output-names = "usb1_phy_pipe_clk_src"; }; - - usb_1_dpphy: dp-phy@8904200 { - reg = <0 0x08904200 0 0x200>, - <0 0x08904400 0 0x200>, - <0 0x08904a00 0 0x200>, - <0 0x08904600 0 0x200>, - <0 0x08904800 0 0x200>; - #clock-cells = <1>; - #phy-cells = <0>; - }; }; system-cache-controller@9200000 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi index 014fe3a31548..fb6e5a140c9f 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi @@ -348,6 +348,9 @@ regulator-max-microvolt = <2960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l7c_3p0: ldo7 { @@ -367,6 +370,9 @@ regulator-max-microvolt = <2960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l10c_3p3: ldo10 { diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi index 549e0a2aa9fe..5428aab3058d 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi @@ -317,6 +317,9 @@ regulator-max-microvolt = <2960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l7c_2p85: ldo7 { @@ -339,6 +342,9 @@ regulator-max-microvolt = <2960000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l10c_3p3: ldo10 { diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index a5b62cadb129..e276eed1f8e2 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -334,6 +334,7 @@ exit-latency-us = <6562>; min-residency-us = <9987>; local-timer-stop; + status = "disabled"; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts index 0fcf5bd88fc7..69ae6503c2f6 100644 --- a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts +++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts @@ -107,6 +107,9 @@ regulator-max-microvolt = <888000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l6b_1p2: ldo6 { @@ -115,6 +118,9 @@ regulator-max-microvolt = <1208000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l7b_2p96: ldo7 { @@ -123,6 +129,9 @@ regulator-max-microvolt = <2504000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; vreg_l9b_1p2: ldo9 { @@ -131,6 +140,9 @@ regulator-max-microvolt = <1200000>; regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; regulator-allow-set-load; + regulator-allowed-modes = + <RPMH_REGULATOR_MODE_LPM + RPMH_REGULATOR_MODE_HPM>; }; }; diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 1b098bd4cd37..3252eb50ecfe 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -13,6 +13,18 @@ #define KVM_PGTABLE_MAX_LEVELS 4U +/* + * The largest supported block sizes for KVM (no 52-bit PA support): + * - 4K (level 1): 1GB + * - 16K (level 2): 32MB + * - 64K (level 2): 512MB + */ +#ifdef CONFIG_ARM64_4K_PAGES +#define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U +#else +#define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U +#endif + static inline u64 kvm_get_parange(u64 mmfr0) { u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, @@ -58,11 +70,7 @@ static inline u64 kvm_granule_size(u32 level) static inline bool kvm_level_supports_block_mapping(u32 level) { - /* - * Reject invalid block mappings and don't bother with 4TB mappings for - * 52-bit PAs. - */ - return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); + return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL; } /** diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index fe341a6578c3..c8dca8ae359c 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -11,13 +11,6 @@ #include <linux/pgtable.h> /* - * PGDIR_SHIFT determines the size a top-level page table entry can map - * and depends on the number of levels in the page table. Compute the - * PGDIR_SHIFT for a given number of levels. - */ -#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) - -/* * The hardware supports concatenation of up to 16 tables at stage2 entry * level and we use the feature whenever possible, which means we resolve 4 * additional bits of address at the entry level. @@ -30,11 +23,6 @@ #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) -/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ -#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) -#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) -#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) - /* * kvm_mmmu_cache_min_pages() is the number of pages required to install * a stage-2 translation. We pre-allocate the entry level page table at @@ -42,12 +30,4 @@ */ #define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) -static inline phys_addr_t -stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) -{ - phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); - - return (boundary - 1 < end - 1) ? boundary : end; -} - #endif /* __ARM64_S2_PGTABLE_H_ */ diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index bd5df50e4643..795344ab4ec4 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -7,6 +7,7 @@ */ #include <linux/linkage.h> +#include <linux/cfi_types.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> #include <asm/ftrace.h> @@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ -SYM_FUNC_START(ftrace_stub) +SYM_TYPED_FUNC_START(ftrace_stub) ret SYM_FUNC_END(ftrace_stub) +SYM_TYPED_FUNC_START(ftrace_stub_graph) + ret +SYM_FUNC_END(ftrace_stub_graph) + #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * void return_to_handler(void) diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 687598e41b21..a38dea6186c9 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -5,9 +5,6 @@ incdir := $(srctree)/$(src)/include subdir-asflags-y := -I$(incdir) -subdir-ccflags-y := -I$(incdir) \ - -fno-stack-protector \ - -DDISABLE_BRANCH_PROFILING \ - $(DISABLE_STACKLEAK_PLUGIN) +subdir-ccflags-y := -I$(incdir) obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index b5c5119c7396..be0a2bc3e20d 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -10,6 +10,9 @@ asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS # will explode instantly (Words of Marc Zyngier). So introduce a generic flag # __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM. ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__ +ccflags-y += -fno-stack-protector \ + -DDISABLE_BRANCH_PROFILING \ + $(DISABLE_STACKLEAK_PLUGIN) hostprogs := gen-hyprel HOST_EXTRACFLAGS += -I$(objtree)/include @@ -89,6 +92,10 @@ quiet_cmd_hypcopy = HYPCOPY $@ # Remove ftrace, Shadow Call Stack, and CFI CFLAGS. # This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations. KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS)) +# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile' +# when profile optimization is applied. gen-hyprel does not support SHT_REL and +# causes a build failure. Remove profile optimization flags. +KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS)) # KVM nVHE code is run at a different exception code with a different map, so # compiler instrumentation that inserts callbacks or checks into the code may diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 34c5feed9dc1..60ee3d9f01f8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -31,6 +31,13 @@ static phys_addr_t hyp_idmap_vector; static unsigned long io_map_base; +static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL); + phys_addr_t boundary = ALIGN_DOWN(addr + size, size); + + return (boundary - 1 < end - 1) ? boundary : end; +} /* * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, @@ -52,7 +59,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr, if (!pgt) return -EINVAL; - next = stage2_pgd_addr_end(kvm, addr, end); + next = stage2_range_addr_end(addr, end); ret = fn(pgt, addr, next - addr); if (ret) break; diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 24d7778d1ce6..733b53055f97 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2149,7 +2149,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, memset(entry, 0, esz); - while (len > 0) { + while (true) { int next_offset; size_t byte_offset; @@ -2162,6 +2162,9 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, return next_offset; byte_offset = next_offset * esz; + if (byte_offset >= len) + break; + id += next_offset; gpa += byte_offset; len -= byte_offset; diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 8a5c246b0a21..f6fbe7042f1c 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -42,16 +42,8 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ -/* - * The T-Head CMO errata internally probe the CBOM block size, but otherwise - * don't depend on Zicbom. - */ extern unsigned int riscv_cbom_block_size; -#ifdef CONFIG_RISCV_ISA_ZICBOM void riscv_init_cbom_blocksize(void); -#else -static inline void riscv_init_cbom_blocksize(void) { } -#endif #ifdef CONFIG_RISCV_DMA_NONCOHERENT void riscv_noncoherent_supported(void); diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h index 0d8fdb8ec63a..82f7260301da 100644 --- a/arch/riscv/include/asm/kvm_vcpu_timer.h +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -45,6 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); void kvm_riscv_guest_timer_init(struct kvm *kvm); +void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index a032c4f0d600..71ebbc4821f0 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -708,6 +708,9 @@ void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu) clear_bit(IRQ_VS_SOFT, &v->irqs_pending); } } + + /* Sync-up timer CSRs */ + kvm_riscv_vcpu_timer_sync(vcpu); } int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 185f2386a747..ad34519c8a13 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -320,20 +320,33 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_timer_unblocking(vcpu); } -void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) +void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu) { struct kvm_vcpu_timer *t = &vcpu->arch.timer; if (!t->sstc_enabled) return; - t = &vcpu->arch.timer; #if defined(CONFIG_32BIT) t->next_cycles = csr_read(CSR_VSTIMECMP); t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; #else t->next_cycles = csr_read(CSR_VSTIMECMP); #endif +} + +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + if (!t->sstc_enabled) + return; + + /* + * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync() + * upon every VM exit so no need to save here. + */ + /* timer should be enabled for the remaining operations */ if (unlikely(!t->init_done)) return; diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 6cb7d96ad9c7..57b40a350420 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -3,6 +3,7 @@ * Copyright (C) 2017 SiFive */ +#include <linux/of.h> #include <asm/cacheflush.h> #ifdef CONFIG_SMP @@ -86,3 +87,40 @@ void flush_icache_pte(pte_t pte) flush_icache_all(); } #endif /* CONFIG_MMU */ + +unsigned int riscv_cbom_block_size; +EXPORT_SYMBOL_GPL(riscv_cbom_block_size); + +void riscv_init_cbom_blocksize(void) +{ + struct device_node *node; + unsigned long cbom_hartid; + u32 val, probed_block_size; + int ret; + + probed_block_size = 0; + for_each_of_cpu_node(node) { + unsigned long hartid; + + ret = riscv_of_processor_hartid(node, &hartid); + if (ret) + continue; + + /* set block-size for cbom extension if available */ + ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); + if (ret) + continue; + + if (!probed_block_size) { + probed_block_size = val; + cbom_hartid = hartid; + } else { + if (probed_block_size != val) + pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", + cbom_hartid, hartid); + } + } + + if (probed_block_size) + riscv_cbom_block_size = probed_block_size; +} diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index b0add983530a..d919efab6eba 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -8,13 +8,8 @@ #include <linux/dma-direct.h> #include <linux/dma-map-ops.h> #include <linux/mm.h> -#include <linux/of.h> -#include <linux/of_device.h> #include <asm/cacheflush.h> -unsigned int riscv_cbom_block_size; -EXPORT_SYMBOL_GPL(riscv_cbom_block_size); - static bool noncoherent_supported; void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, @@ -77,42 +72,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, dev->dma_coherent = coherent; } -#ifdef CONFIG_RISCV_ISA_ZICBOM -void riscv_init_cbom_blocksize(void) -{ - struct device_node *node; - unsigned long cbom_hartid; - u32 val, probed_block_size; - int ret; - - probed_block_size = 0; - for_each_of_cpu_node(node) { - unsigned long hartid; - - ret = riscv_of_processor_hartid(node, &hartid); - if (ret) - continue; - - /* set block-size for cbom extension if available */ - ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); - if (ret) - continue; - - if (!probed_block_size) { - probed_block_size = val; - cbom_hartid = hartid; - } else { - if (probed_block_size != val) - pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", - cbom_hartid, hartid); - } - } - - if (probed_block_size) - riscv_cbom_block_size = probed_block_size; -} -#endif - void riscv_noncoherent_supported(void) { WARN(!riscv_cbom_block_size, diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6d1879ef933a..67745ceab0db 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1973,7 +1973,6 @@ config EFI config EFI_STUB bool "EFI stub support" depends on EFI - depends on $(cc-option,-mabi=ms) || X86_32 select RELOCATABLE help This kernel feature allows a bzImage to be loaded directly diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 4fce1a4226e3..8259d725054d 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1596,7 +1596,7 @@ void __init intel_pmu_arch_lbr_init(void) return; clear_arch_lbr: - clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR); + setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR); } /** diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 0bef44d30a27..2fd52b65deac 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -25,8 +25,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { u64 start = rmrr->base_address; u64 end = rmrr->end_address + 1; + int entry_type; - if (e820__mapped_all(start, end, E820_TYPE_RESERVED)) + entry_type = e820__get_entry_type(start, end); + if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS) return 0; pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index e7410e98fc1f..3a35dec3ec55 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p return ret; native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - if (rev >= mc->hdr.patch_id) + + /* + * Allow application of the same revision to pick up SMT-specific + * changes even if the revision of the other SMT thread is already + * up-to-date. + */ + if (rev > mc->hdr.patch_id) return ret; if (!__apply_microcode_amd(mc)) { @@ -528,8 +534,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - /* Check whether we have saved a new patch already: */ - if (*new_rev && rev < mc->hdr.patch_id) { + /* + * Check whether a new patch has been saved already. Also, allow application of + * the same revision in order to pick up SMT-thread-specific configuration even + * if the sibling SMT thread already has an up-to-date revision. + */ + if (*new_rev && rev <= mc->hdr.patch_id) { if (!__apply_microcode_amd(mc)) { *new_rev = mc->hdr.patch_id; return; diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index de62b0b87ced..3266ea36667c 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L3, .name = "L3", .cache_level = 3, - .cache = { - .min_cbm_bits = 1, - }, .domains = domain_init(RDT_RESOURCE_L3), .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", @@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .rid = RDT_RESOURCE_L2, .name = "L2", .cache_level = 2, - .cache = { - .min_cbm_bits = 1, - }, .domains = domain_init(RDT_RESOURCE_L2), .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", @@ -836,6 +830,7 @@ static __init void rdt_init_res_defs_intel(void) r->cache.arch_has_sparse_bitmaps = false; r->cache.arch_has_empty_bitmaps = false; r->cache.arch_has_per_cpu_cfg = false; + r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; hw_res->msr_update = mba_wrmsr_intel; @@ -856,6 +851,7 @@ static __init void rdt_init_res_defs_amd(void) r->cache.arch_has_sparse_bitmaps = true; r->cache.arch_has_empty_bitmaps = true; r->cache.arch_has_per_cpu_cfg = true; + r->cache.min_cbm_bits = 0; } else if (r->rid == RDT_RESOURCE_MBA) { hw_res->msr_base = MSR_IA32_MBA_BW_BASE; hw_res->msr_update = mba_wrmsr_amd; diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 132a2de44d2f..5e868b62a7c4 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int core_select_mask, core_level_siblings; unsigned int die_select_mask, die_level_siblings; + unsigned int pkg_mask_width; bool die_level_present = false; int leaf; @@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c) core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); - die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); sub_index = 1; - do { + while (true) { cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); /* @@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c) die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } + if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE) + pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); + else + break; + sub_index++; - } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); + } - core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; + core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width; die_select_mask = (~(-1 << die_plus_mask_width)) >> core_plus_mask_width; @@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) } c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, - die_plus_mask_width); + pkg_mask_width); /* * Reinit the apicid, now that we have extended initial_apicid. */ diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 621f4b6cac4a..8946f89761cc 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -210,13 +210,6 @@ static void __init fpu__init_system_xstate_size_legacy(void) fpstate_reset(¤t->thread.fpu); } -static void __init fpu__init_init_fpstate(void) -{ - /* Bring init_fpstate size and features up to date */ - init_fpstate.size = fpu_kernel_cfg.max_size; - init_fpstate.xfeatures = fpu_kernel_cfg.max_features; -} - /* * Called on the boot CPU once per system bootup, to set up the initial * FPU state that is later cloned into all processes: @@ -236,5 +229,4 @@ void __init fpu__init_system(struct cpuinfo_x86 *c) fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate(fpu_kernel_cfg.max_size); fpu__init_task_struct_size(); - fpu__init_init_fpstate(); } diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c8340156bfd2..59e543b95a3c 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -360,7 +360,7 @@ static void __init setup_init_fpu_buf(void) print_xstate_features(); - xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features); + xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures); /* * Init all the features state with header.xfeatures being 0x0 @@ -678,20 +678,6 @@ static unsigned int __init get_xsave_size_user(void) return ebx; } -/* - * Will the runtime-enumerated 'xstate_size' fit in the init - * task's statically-allocated buffer? - */ -static bool __init is_supported_xstate_size(unsigned int test_xstate_size) -{ - if (test_xstate_size <= sizeof(init_fpstate.regs)) - return true; - - pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n", - sizeof(init_fpstate.regs), test_xstate_size); - return false; -} - static int __init init_xstate_size(void) { /* Recompute the context size for enabled features: */ @@ -717,10 +703,6 @@ static int __init init_xstate_size(void) kernel_default_size = xstate_calculate_size(fpu_kernel_cfg.default_features, compacted); - /* Ensure we have the space to store all default enabled features. */ - if (!is_supported_xstate_size(kernel_default_size)) - return -EINVAL; - if (!paranoid_xstate_size_valid(kernel_size)) return -EINVAL; @@ -875,6 +857,19 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) update_regset_xstate_info(fpu_user_cfg.max_size, fpu_user_cfg.max_features); + /* + * init_fpstate excludes dynamic states as they are large but init + * state is zero. + */ + init_fpstate.size = fpu_kernel_cfg.default_size; + init_fpstate.xfeatures = fpu_kernel_cfg.default_features; + + if (init_fpstate.size > sizeof(init_fpstate.regs)) { + pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n", + sizeof(init_fpstate.regs), init_fpstate.size); + goto out_disable; + } + setup_init_fpu_buf(); /* @@ -1130,6 +1125,15 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, */ mask = fpstate->user_xfeatures; + /* + * Dynamic features are not present in init_fpstate. When they are + * in an all zeros init state, remove those from 'mask' to zero + * those features in the user buffer instead of retrieving them + * from init_fpstate. + */ + if (fpu_state_size_dynamic()) + mask &= (header.xfeatures | xinit->header.xcomp_bv); + for_each_extended_xfeature(i, mask) { /* * If there was a feature or alignment gap, zero the space diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index dfeb227de561..2a4be92fd144 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -4,6 +4,7 @@ */ #include <linux/linkage.h> +#include <linux/cfi_types.h> #include <asm/ptrace.h> #include <asm/ftrace.h> #include <asm/export.h> @@ -129,6 +130,14 @@ .endm +SYM_TYPED_FUNC_START(ftrace_stub) + RET +SYM_FUNC_END(ftrace_stub) + +SYM_TYPED_FUNC_START(ftrace_stub_graph) + RET +SYM_FUNC_END(ftrace_stub_graph) + #ifdef CONFIG_DYNAMIC_FTRACE SYM_FUNC_START(__fentry__) @@ -172,21 +181,10 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) */ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR - - jmp ftrace_epilogue + RET SYM_FUNC_END(ftrace_caller); STACK_FRAME_NON_STANDARD_FP(ftrace_caller) -SYM_FUNC_START(ftrace_epilogue) -/* - * This is weak to keep gas from relaxing the jumps. - */ -SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) - UNWIND_HINT_FUNC - ENDBR - RET -SYM_FUNC_END(ftrace_epilogue) - SYM_FUNC_START(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq @@ -262,14 +260,11 @@ SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL) popfq /* - * As this jmp to ftrace_epilogue can be a short jump - * it must not be copied into the trampoline. - * The trampoline will add the code to jump - * to the return. + * The trampoline will add the return. */ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR - jmp ftrace_epilogue + RET /* Swap the flags with orig_rax */ 1: movq MCOUNT_REG_SIZE(%rsp), %rdi @@ -280,7 +275,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) /* Restore flags */ popfq UNWIND_HINT_FUNC - jmp ftrace_epilogue + RET SYM_FUNC_END(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) @@ -291,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) SYM_FUNC_START(__fentry__) cmpq $ftrace_stub, ftrace_trace_function jnz trace - -SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) - ENDBR RET trace: diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 0ea57da92940..c059820dfaea 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -713,7 +713,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, /* Otherwise, skip ahead to the user-specified starting frame: */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->sp < (unsigned long)first_frame)) + state->sp <= (unsigned long)first_frame)) unwind_next_frame(state); return; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4bd5f8a751de..9cf1ba865562 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6442,26 +6442,22 @@ static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter, return 0; } -static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) +static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, + struct kvm_msr_filter *filter) { - struct kvm_msr_filter __user *user_msr_filter = argp; struct kvm_x86_msr_filter *new_filter, *old_filter; - struct kvm_msr_filter filter; bool default_allow; bool empty = true; int r = 0; u32 i; - if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) - return -EFAULT; - - if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY) + if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) - empty &= !filter.ranges[i].nmsrs; + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) + empty &= !filter->ranges[i].nmsrs; - default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY); + default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); if (empty && !default_allow) return -EINVAL; @@ -6469,8 +6465,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) if (!new_filter) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { - r = kvm_add_msr_filter(new_filter, &filter.ranges[i]); + for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { + r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); if (r) { kvm_free_msr_filter(new_filter); return r; @@ -6493,6 +6489,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp) return 0; } +#ifdef CONFIG_KVM_COMPAT +/* for KVM_X86_SET_MSR_FILTER */ +struct kvm_msr_filter_range_compat { + __u32 flags; + __u32 nmsrs; + __u32 base; + __u32 bitmap; +}; + +struct kvm_msr_filter_compat { + __u32 flags; + struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES]; +}; + +#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat) + +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm *kvm = filp->private_data; + long r = -ENOTTY; + + switch (ioctl) { + case KVM_X86_SET_MSR_FILTER_COMPAT: { + struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_msr_filter_compat filter_compat; + struct kvm_msr_filter filter; + int i; + + if (copy_from_user(&filter_compat, user_msr_filter, + sizeof(filter_compat))) + return -EFAULT; + + filter.flags = filter_compat.flags; + for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) { + struct kvm_msr_filter_range_compat *cr; + + cr = &filter_compat.ranges[i]; + filter.ranges[i] = (struct kvm_msr_filter_range) { + .flags = cr->flags, + .nmsrs = cr->nmsrs, + .base = cr->base, + .bitmap = (__u8 *)(ulong)cr->bitmap, + }; + } + + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); + break; + } + } + + return r; +} +#endif + #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER static int kvm_arch_suspend_notifier(struct kvm *kvm) { @@ -6915,9 +6967,16 @@ set_pit2_out: case KVM_SET_PMU_EVENT_FILTER: r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp); break; - case KVM_X86_SET_MSR_FILTER: - r = kvm_vm_ioctl_set_msr_filter(kvm, argp); + case KVM_X86_SET_MSR_FILTER: { + struct kvm_msr_filter __user *user_msr_filter = argp; + struct kvm_msr_filter filter; + + if (copy_from_user(&filter, user_msr_filter, sizeof(filter))) + return -EFAULT; + + r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; + } default: r = -ENOTTY; } diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 64ee618064ba..71f721670ab6 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -369,12 +369,8 @@ struct bfq_queue { unsigned long split_time; /* time of last split */ unsigned long first_IO_time; /* time of first I/O for this queue */ - unsigned long creation_time; /* when this queue is created */ - /* max service rate measured so far */ - u32 max_service_rate; - /* * Pointer to the waker queue for this queue, i.e., to the * queue Q such that this queue happens to get new I/O right diff --git a/block/bio.c b/block/bio.c index 633a902468ec..57c2f327225b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -741,7 +741,7 @@ void bio_put(struct bio *bio) return; } - if (bio->bi_opf & REQ_ALLOC_CACHE) { + if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) { struct bio_alloc_cache *cache; bio_uninit(bio); diff --git a/block/blk-mq.c b/block/blk-mq.c index 8070b6c10e8d..33292c01875d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3112,8 +3112,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct page *page; unsigned long flags; - /* There is no need to clear a driver tags own mapping */ - if (drv_tags == tags) + /* + * There is no need to clear mapping if driver tags is not initialized + * or the mapping belongs to the driver tags. + */ + if (!drv_tags || drv_tags == tags) return; list_for_each_entry(page, &tags->page_list, lru) { diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 72f1fb77abcd..e648158368a7 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -12,6 +12,7 @@ #include <linux/ratelimit.h> #include <linux/edac.h> #include <linux/ras.h> +#include <acpi/ghes.h> #include <asm/cpu.h> #include <asm/mce.h> @@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, int cpu = mce->extcpu; struct acpi_hest_generic_status *estatus, *tmp; struct acpi_hest_generic_data *gdata; - const guid_t *fru_id = &guid_null; - char *fru_text = ""; + const guid_t *fru_id; + char *fru_text; guid_t *sec_type; static u32 err_seq; @@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, /* log event via trace */ err_seq++; - gdata = (struct acpi_hest_generic_data *)(tmp + 1); - if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) - fru_id = (guid_t *)gdata->fru_id; - if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) - fru_text = gdata->fru_text; - sec_type = (guid_t *)gdata->section_type; - if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { - struct cper_sec_mem_err *mem = (void *)(gdata + 1); - if (gdata->error_data_length >= sizeof(*mem)) - trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, - (u8)gdata->error_severity); + apei_estatus_for_each_section(tmp, gdata) { + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + fru_id = (guid_t *)gdata->fru_id; + else + fru_id = &guid_null; + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + fru_text = gdata->fru_text; + else + fru_text = ""; + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem = (void *)(gdata + 1); + + if (gdata->error_data_length >= sizeof(*mem)) + trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, + (u8)gdata->error_severity); + } } out: diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 80ad530583c9..9952f3a792ba 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) clear_fixmap(fixmap_idx); } -int ghes_estatus_pool_init(int num_ghes) +int ghes_estatus_pool_init(unsigned int num_ghes) { unsigned long addr, len; int rc; diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index ca2aed86b540..8059baf4ef27 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1142,7 +1142,8 @@ static void iort_iommu_msi_get_resv_regions(struct device *dev, struct iommu_resv_region *region; region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_MSI, + GFP_KERNEL); if (region) list_add_tail(®ion->list, head); } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index c8385ef54c37..4e3db20e9cbb 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -323,6 +323,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) list_for_each_entry(pn, &adev->physical_node_list, node) { if (dev_is_pci(pn->dev)) { + get_device(pn->dev); pci_dev = to_pci_dev(pn->dev); break; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 6f9489edfb4e..78c2804164c6 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -428,17 +428,31 @@ static const struct dmi_system_id asus_laptop[] = { { } }; +static const struct dmi_system_id lenovo_82ra[] = { + { + .ident = "LENOVO IdeaPad Flex 5 16ALC7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82RA"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; unsigned char triggering; unsigned char polarity; unsigned char shareable; + bool override; }; -static const struct irq_override_cmp skip_override_table[] = { - { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, - { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, +static const struct irq_override_cmp override_table[] = { + { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, @@ -446,6 +460,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, { int i; + for (i = 0; i < ARRAY_SIZE(override_table); i++) { + const struct irq_override_cmp *entry = &override_table[i]; + + if (dmi_check_system(entry->system) && + entry->irq == gsi && + entry->triggering == triggering && + entry->polarity == polarity && + entry->shareable == shareable) + return entry->override; + } + #ifdef CONFIG_X86 /* * IRQ override isn't needed on modern AMD Zen systems and @@ -456,17 +481,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, return false; #endif - for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) { - const struct irq_override_cmp *entry = &skip_override_table[i]; - - if (dmi_check_system(entry->system) && - entry->irq == gsi && - entry->triggering == triggering && - entry->polarity == polarity && - entry->shareable == shareable) - return false; - } - return true; } @@ -498,8 +512,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { - pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "level" : "edge", p ? "low" : "high"); + pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi, + t ? "level" : "edge", + trig == triggering ? "" : "(!)", + p ? "low" : "high", + pol == polarity ? "" : "(!)"); triggering = trig; polarity = pol; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 558664d169fc..024cc373a197 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1509,9 +1509,12 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) goto out; } + *map = r; + list_for_each_entry(rentry, &list, node) { if (rentry->res->start >= rentry->res->end) { - kfree(r); + kfree(*map); + *map = NULL; ret = -EINVAL; dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); goto out; @@ -1523,8 +1526,6 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) r->offset = rentry->offset; r++; } - - *map = r; } out: acpi_dev_free_resource_list(&list); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index da7ee8bec165..7add8e79912b 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -257,7 +257,7 @@ enum { PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ - EM_MAX_SLOTS = 8, + EM_MAX_SLOTS = SATA_PMP_MAX_PORTS, EM_MAX_RETRY = 5, /* em_ctl bits */ diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f61795c546cf..6f216eb25610 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -448,7 +448,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; - priv->version = (enum brcm_ahci_version)of_id->data; + priv->version = (unsigned long)of_id->data; priv->dev = dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl"); diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index b734e069034d..a950767f7948 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -1067,7 +1067,7 @@ static int imx_ahci_probe(struct platform_device *pdev) imxpriv->ahci_pdev = pdev; imxpriv->no_device = false; imxpriv->first_time = true; - imxpriv->type = (enum ahci_imx_type)of_id->data; + imxpriv->type = (unsigned long)of_id->data; imxpriv->sata_clk = devm_clk_get(dev, "sata"); if (IS_ERR(imxpriv->sata_clk)) { @@ -1235,4 +1235,4 @@ module_platform_driver(imx_ahci_driver); MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ahci:imx"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 6cd61842ad48..9cf9bf36a874 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -280,7 +280,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev) return -ENOMEM; if (of_id) - qoriq_priv->type = (enum ahci_qoriq_type)of_id->data; + qoriq_priv->type = (unsigned long)of_id->data; else qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data; diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index 5a2cac60a29a..8607b68eee53 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c @@ -236,7 +236,7 @@ static struct platform_driver st_ahci_driver = { .driver = { .name = DRV_NAME, .pm = &st_ahci_pm_ops, - .of_match_table = of_match_ptr(st_ahci_match), + .of_match_table = st_ahci_match, }, .probe = st_ahci_probe, .remove = ata_platform_remove_one, diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 7bb5db17f864..1e08704d5117 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -785,7 +785,7 @@ static int xgene_ahci_probe(struct platform_device *pdev) of_devid = of_match_device(xgene_ahci_of_match, dev); if (of_devid) { if (of_devid->data) - version = (enum xgene_ahci_version) of_devid->data; + version = (unsigned long) of_devid->data; } #ifdef CONFIG_ACPI else { diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 590ebea99601..0195eb29f6c2 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -875,7 +875,7 @@ static int sata_rcar_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->type = (enum sata_rcar_type)of_device_get_match_data(dev); + priv->type = (unsigned long)of_device_get_match_data(dev); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8f7f144e54f3..7f9bcc82fc9c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -30,11 +30,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio return NULL; memset(req, 0, sizeof(*req)); - req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src, - GFP_NOIO, &drbd_io_bio_set); - req->private_bio->bi_private = req; - req->private_bio->bi_end_io = drbd_request_endio; - req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0) | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); @@ -1219,9 +1214,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio) /* Update disk stats */ req->start_jif = bio_start_io_acct(req->master_bio); - if (!get_ldev(device)) { - bio_put(req->private_bio); - req->private_bio = NULL; + if (get_ldev(device)) { + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, + bio, GFP_NOIO, + &drbd_io_bio_set); + req->private_bio->bi_private = req; + req->private_bio->bi_end_io = drbd_request_endio; } /* process discards always from our submitter thread */ diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 2651bf41dde3..5afce6ffaadf 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -124,7 +124,7 @@ struct ublk_queue { bool force_abort; unsigned short nr_io_ready; /* how many ios setup */ struct ublk_device *dev; - struct ublk_io ios[0]; + struct ublk_io ios[]; }; #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index e7dd457e9b22..e98fcac578d6 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; - cpu_relax(); + hwrng_msleep(rng, 1000); } num_words = rng_readl(priv, RNG_STATUS) >> 24; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index d69d13a26414..4aec4b2a5225 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -222,10 +222,8 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) if (reg_name[0]) { priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name); if (priv->opp_token < 0) { - ret = priv->opp_token; - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to set regulators: %d\n", - ret); + ret = dev_err_probe(cpu_dev, priv->opp_token, + "failed to set regulators\n"); goto free_cpumask; } } diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 90beb26ed34e..ad4ce8493144 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -396,9 +396,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) ret = imx6q_opp_check_speed_grading(cpu_dev); } if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to read ocotp: %d\n", - ret); + dev_err_probe(cpu_dev, ret, "failed to read ocotp\n"); goto out_free_opp; } diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 863548f59c3e..a577586b23be 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -64,7 +64,7 @@ static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; static void get_krait_bin_format_a(struct device *cpu_dev, int *speed, int *pvs, int *pvs_ver, - struct nvmem_cell *pvs_nvmem, u8 *buf) + u8 *buf) { u32 pte_efuse; @@ -95,7 +95,7 @@ static void get_krait_bin_format_a(struct device *cpu_dev, static void get_krait_bin_format_b(struct device *cpu_dev, int *speed, int *pvs, int *pvs_ver, - struct nvmem_cell *pvs_nvmem, u8 *buf) + u8 *buf) { u32 pte_efuse, redundant_sel; @@ -213,6 +213,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, int speed = 0, pvs = 0, pvs_ver = 0; u8 *speedbin; size_t len; + int ret = 0; speedbin = nvmem_cell_read(speedbin_nvmem, &len); @@ -222,15 +223,16 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, switch (len) { case 4: get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver, - speedbin_nvmem, speedbin); + speedbin); break; case 8: get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver, - speedbin_nvmem, speedbin); + speedbin); break; default: dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); - return -ENODEV; + ret = -ENODEV; + goto len_error; } snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d", @@ -238,8 +240,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, drv->versions = (1 << speed); +len_error: kfree(speedbin); - return 0; + return ret; } static const struct qcom_cpufreq_match_data match_data_kryo = { @@ -262,7 +265,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) struct nvmem_cell *speedbin_nvmem; struct device_node *np; struct device *cpu_dev; - char *pvs_name = "speedXX-pvsXX-vXX"; + char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; + char *pvs_name = pvs_name_buffer; unsigned cpu; const struct of_device_id *match; int ret; @@ -295,11 +299,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (drv->data->get_version) { speedbin_nvmem = of_nvmem_cell_get(np, NULL); if (IS_ERR(speedbin_nvmem)) { - if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) - dev_err(cpu_dev, - "Could not get nvmem cell: %ld\n", - PTR_ERR(speedbin_nvmem)); - ret = PTR_ERR(speedbin_nvmem); + ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), + "Could not get nvmem cell\n"); goto free_drv; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index a4922580ce06..1583a370da39 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -56,12 +56,9 @@ static int sun50i_cpufreq_get_efuse(u32 *versions) speedbin_nvmem = of_nvmem_cell_get(np, NULL); of_node_put(np); - if (IS_ERR(speedbin_nvmem)) { - if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) - pr_err("Could not get nvmem cell: %ld\n", - PTR_ERR(speedbin_nvmem)); - return PTR_ERR(speedbin_nvmem); - } + if (IS_ERR(speedbin_nvmem)) + return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), + "Could not get nvmem cell\n"); speedbin = nvmem_cell_read(speedbin_nvmem, &len); nvmem_cell_put(speedbin_nvmem); diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index c2004cae3f02..4596c3e323aa 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -589,6 +589,7 @@ static const struct of_device_id tegra194_cpufreq_of_match[] = { { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match); static struct platform_driver tegra194_ccplex_driver = { .driver = { diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index d4e23101448a..35bb70724d44 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -216,9 +216,20 @@ void scmi_device_destroy(struct scmi_device *scmi_dev) device_unregister(&scmi_dev->dev); } +void scmi_device_link_add(struct device *consumer, struct device *supplier) +{ + struct device_link *link; + + link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); + + WARN_ON(!link); +} + void scmi_set_handle(struct scmi_device *scmi_dev) { scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); + if (scmi_dev->handle) + scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); } int scmi_protocol_register(const struct scmi_protocol *proto) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 61aba7447c32..a1c0154c31c6 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -97,6 +97,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) struct scmi_revision_info * scmi_revision_area_get(const struct scmi_protocol_handle *ph); int scmi_handle_put(const struct scmi_handle *handle); +void scmi_device_link_add(struct device *consumer, struct device *supplier); struct scmi_handle *scmi_handle_get(struct device *dev); void scmi_set_handle(struct scmi_device *scmi_dev); void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, @@ -117,6 +118,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel + * @rx_timeout_ms: The configured RX timeout in milliseconds. * @handle: Pointer to SCMI entity handle * @no_completion_irq: Flag to indicate that this channel has no completion * interrupt mechanism for synchronous commands. @@ -126,6 +128,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); */ struct scmi_chan_info { struct device *dev; + unsigned int rx_timeout_ms; struct scmi_handle *handle; bool no_completion_irq; void *transport_info; @@ -232,7 +235,7 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); struct scmi_shared_mem; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer); + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 609ebedee9cb..f818d00bb2c6 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2013,6 +2013,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, return -ENOMEM; cinfo->dev = dev; + cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); if (ret) @@ -2044,8 +2045,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) { int ret = scmi_chan_setup(info, dev, prot_id, true); - if (!ret) /* Rx is optional, hence no error check */ - scmi_chan_setup(info, dev, prot_id, false); + if (!ret) { + /* Rx is optional, report only memory errors */ + ret = scmi_chan_setup(info, dev, prot_id, false); + if (ret && ret != -ENOMEM) + ret = 0; + } return ret; } @@ -2273,10 +2278,16 @@ int scmi_protocol_device_request(const struct scmi_device_id *id_table) sdev = scmi_get_protocol_device(child, info, id_table->protocol_id, id_table->name); - /* Set handle if not already set: device existed */ - if (sdev && !sdev->handle) - sdev->handle = - scmi_handle_get_from_info_unlocked(info); + if (sdev) { + /* Set handle if not already set: device existed */ + if (!sdev->handle) + sdev->handle = + scmi_handle_get_from_info_unlocked(info); + /* Relink consumer and suppliers */ + if (sdev->handle) + scmi_device_link_add(&sdev->dev, + sdev->handle->dev); + } } else { dev_err(info->dev, "Failed. SCMI protocol %d not active.\n", @@ -2475,20 +2486,17 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) static int scmi_remove(struct platform_device *pdev) { - int ret = 0, id; + int ret, id; struct scmi_info *info = platform_get_drvdata(pdev); struct device_node *child; mutex_lock(&scmi_list_mutex); if (info->users) - ret = -EBUSY; - else - list_del(&info->node); + dev_warn(&pdev->dev, + "Still active SCMI users will be forcibly unbound.\n"); + list_del(&info->node); mutex_unlock(&scmi_list_mutex); - if (ret) - return ret; - scmi_notification_exit(&info->handle); mutex_lock(&info->protocols_mtx); @@ -2500,7 +2508,11 @@ static int scmi_remove(struct platform_device *pdev) idr_destroy(&info->active_protocols); /* Safe to free channels since no more users */ - return scmi_cleanup_txrx_channels(info); + ret = scmi_cleanup_txrx_channels(info); + if (ret) + dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); + + return 0; } static ssize_t protocol_version_show(struct device *dev, @@ -2571,6 +2583,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match); static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", + .suppress_bind_attrs = true, .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 08ff4d110beb..1e40cb035044 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -36,7 +36,7 @@ static void tx_prepare(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - shmem_tx_prepare(smbox->shmem, m); + shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); } static void rx_callback(struct mbox_client *cl, void *m) diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index f42dad997ac9..2a7aeab40e54 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -498,7 +498,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo, msg_tx_prepare(channel->req.msg, xfer); ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); } else { - shmem_tx_prepare(channel->req.shmem, xfer); + shmem_tx_prepare(channel->req.shmem, xfer, cinfo); ret = invoke_process_smt_channel(channel); } diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 0e3eaea5d852..1dfe534b8518 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -5,10 +5,13 @@ * Copyright (C) 2019 ARM Ltd. */ +#include <linux/ktime.h> #include <linux/io.h> #include <linux/processor.h> #include <linux/types.h> +#include <asm-generic/bug.h> + #include "common.h" /* @@ -30,16 +33,36 @@ struct scmi_shared_mem { }; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) { + ktime_t stop; + /* * Ideally channel must be free by now unless OS timeout last * request and platform continued to process the same, wait * until it releases the shared memory, otherwise we may endup - * overwriting its response with new message payload or vice-versa + * overwriting its response with new message payload or vice-versa. + * Giving up anyway after twice the expected channel timeout so as + * not to bail-out on intermittent issues where the platform is + * occasionally a bit slower to answer. + * + * Note that after a timeout is detected we bail-out and carry on but + * the transport functionality is probably permanently compromised: + * this is just to ease debugging and avoid complete hangs on boot + * due to a misbehaving SCMI firmware. */ - spin_until_cond(ioread32(&shmem->channel_status) & - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); + stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms); + spin_until_cond((ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) || + ktime_after(ktime_get(), stop)); + if (!(ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) { + WARN_ON_ONCE(1); + dev_err(cinfo->dev, + "Timeout waiting for a free TX channel !\n"); + return; + } + /* Mark channel busy + clear error */ iowrite32(0x0, &shmem->channel_status); iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 745acfdd0b3d..87a7b13cf868 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -188,7 +188,7 @@ static int smc_send_message(struct scmi_chan_info *cinfo, */ smc_channel_lock_acquire(scmi_info, xfer); - shmem_tx_prepare(scmi_info->shmem, xfer); + shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 14709dbc96a1..33c9b81a55cd 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -148,7 +148,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) { unsigned long flags; DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); - void *deferred_wq = NULL; /* * Prepare to wait for the last release if not already released @@ -162,16 +161,11 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) vioch->shutdown_done = &vioch_shutdown_done; virtio_break_device(vioch->vqueue->vdev); - if (!vioch->is_rx && vioch->deferred_tx_wq) { - deferred_wq = vioch->deferred_tx_wq; + if (!vioch->is_rx && vioch->deferred_tx_wq) /* Cannot be kicked anymore after this...*/ vioch->deferred_tx_wq = NULL; - } spin_unlock_irqrestore(&vioch->lock, flags); - if (deferred_wq) - destroy_workqueue(deferred_wq); - scmi_vio_channel_release(vioch); /* Let any possibly concurrent RX path release the channel */ @@ -416,6 +410,11 @@ static bool virtio_chan_available(struct device *dev, int idx) return vioch && !vioch->cinfo; } +static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) +{ + destroy_workqueue(deferred_tx_wq); +} + static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { @@ -430,6 +429,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, /* Setup a deferred worker for polling. */ if (tx && !vioch->deferred_tx_wq) { + int ret; + vioch->deferred_tx_wq = alloc_workqueue(dev_name(&scmi_vdev->dev), WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, @@ -437,6 +438,11 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!vioch->deferred_tx_wq) return -ENOMEM; + ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, + vioch->deferred_tx_wq); + if (ret) + return ret; + INIT_WORK(&vioch->deferred_tx_work, scmi_vio_deferred_tx_worker); } @@ -444,12 +450,12 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, for (i = 0; i < vioch->max_msg; i++) { struct scmi_vio_msg *msg; - msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; if (tx) { - msg->request = devm_kzalloc(cinfo->dev, + msg->request = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, GFP_KERNEL); if (!msg->request) @@ -458,7 +464,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, refcount_set(&msg->users, 1); } - msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, + msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, GFP_KERNEL); if (!msg->input) return -ENOMEM; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 5b79a4a4a88d..6787ed8dfacf 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -124,28 +124,6 @@ config EFI_ZBOOT is supported by the encapsulated image. (The compression algorithm used is described in the zboot image header) -config EFI_ZBOOT_SIGNED - def_bool y - depends on EFI_ZBOOT_SIGNING_CERT != "" - depends on EFI_ZBOOT_SIGNING_KEY != "" - -config EFI_ZBOOT_SIGNING - bool "Sign the EFI decompressor for UEFI secure boot" - depends on EFI_ZBOOT - help - Use the 'sbsign' command line tool (which must exist on the host - path) to sign both the EFI decompressor PE/COFF image, as well as the - encapsulated PE/COFF image, which is subsequently compressed and - wrapped by the former image. - -config EFI_ZBOOT_SIGNING_CERT - string "Certificate to use for signing the compressed EFI boot image" - depends on EFI_ZBOOT_SIGNING - -config EFI_ZBOOT_SIGNING_KEY - string "Private key to use for signing the compressed EFI boot image" - depends on EFI_ZBOOT_SIGNING - config EFI_ARMSTUB_DTB_LOADER bool "Enable the DTB loader" depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 3359ae2adf24..7c48c380d722 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -63,7 +63,7 @@ static bool __init efi_virtmap_init(void) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) + if (md->virt_addr == U64_MAX) return false; ret = efi_create_mapping(&efi_mm, md); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9624735f1575..3ecdc43a3f2b 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -271,6 +271,8 @@ static __init int efivar_ssdt_load(void) acpi_status ret = acpi_load_table(data, NULL); if (ret) pr_err("failed to load table: %u\n", ret); + else + continue; } else { pr_err("failed to get var data: 0x%lx\n", status); } diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 35f234ad8738..3340b385a05b 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -20,22 +20,11 @@ zboot-size-len-y := 4 zboot-method-$(CONFIG_KERNEL_GZIP) := gzip zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 -quiet_cmd_sbsign = SBSIGN $@ - cmd_sbsign = sbsign --out $@ $< \ - --key $(CONFIG_EFI_ZBOOT_SIGNING_KEY) \ - --cert $(CONFIG_EFI_ZBOOT_SIGNING_CERT) - -$(obj)/$(EFI_ZBOOT_PAYLOAD).signed: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE - $(call if_changed,sbsign) - -ZBOOT_PAYLOAD-y := $(EFI_ZBOOT_PAYLOAD) -ZBOOT_PAYLOAD-$(CONFIG_EFI_ZBOOT_SIGNED) := $(EFI_ZBOOT_PAYLOAD).signed - -$(obj)/vmlinuz: $(obj)/$(ZBOOT_PAYLOAD-y) FORCE +$(obj)/vmlinuz: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE $(call if_changed,$(zboot-method-y)) OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \ - --rename-section .data=.gzdata,load,alloc,readonly,contents + --rename-section .data=.gzdata,load,alloc,readonly,contents $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE $(call if_changed,objcopy) @@ -53,18 +42,8 @@ LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds $(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE $(call if_changed,ld) -ZBOOT_EFI-y := vmlinuz.efi -ZBOOT_EFI-$(CONFIG_EFI_ZBOOT_SIGNED) := vmlinuz.efi.unsigned - -OBJCOPYFLAGS_$(ZBOOT_EFI-y) := -O binary -$(obj)/$(ZBOOT_EFI-y): $(obj)/vmlinuz.efi.elf FORCE +OBJCOPYFLAGS_vmlinuz.efi := -O binary +$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE $(call if_changed,objcopy) targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi - -ifneq ($(CONFIG_EFI_ZBOOT_SIGNED),) -$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.unsigned FORCE - $(call if_changed,sbsign) -endif - -targets += $(EFI_ZBOOT_PAYLOAD).signed vmlinuz.efi.unsigned diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 4f4d98e51fbf..70e9789ff9de 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -313,16 +313,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, /* * Set the virtual address field of all - * EFI_MEMORY_RUNTIME entries to 0. This will signal - * the incoming kernel that no virtual translation has - * been installed. + * EFI_MEMORY_RUNTIME entries to U64_MAX. This will + * signal the incoming kernel that no virtual + * translation has been installed. */ for (l = 0; l < priv.boot_memmap->map_size; l += priv.boot_memmap->desc_size) { p = (void *)priv.boot_memmap->map + l; if (p->attribute & EFI_MEMORY_RUNTIME) - p->virt_addr = 0; + p->virt_addr = U64_MAX; } } return EFI_SUCCESS; diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index b9ce6393e353..33a7811e12c6 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -765,9 +765,9 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) * relocated by efi_relocate_kernel. * On failure, we exit to the firmware via efi_exit instead of returning. */ -unsigned long efi_main(efi_handle_t handle, - efi_system_table_t *sys_table_arg, - struct boot_params *boot_params) +asmlinkage unsigned long efi_main(efi_handle_t handle, + efi_system_table_t *sys_table_arg, + struct boot_params *boot_params) { unsigned long bzimage_addr = (unsigned long)startup_32; unsigned long buffer_start, buffer_end; diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index 87a62765bafd..93d33f68333b 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -38,7 +38,8 @@ SECTIONS } } -PROVIDE(__efistub__gzdata_size = ABSOLUTE(. - __efistub__gzdata_start)); +PROVIDE(__efistub__gzdata_size = + ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start)); PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); PROVIDE(__data_size = ABSOLUTE(_end - _etext)); diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index d28e715d2bcc..d0daacd2c903 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -41,7 +41,7 @@ static bool __init efi_virtmap_init(void) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) + if (md->virt_addr == U64_MAX) return false; ret = efi_create_mapping(&efi_mm, md); diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index dd74d2ad3184..433b61587139 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -7,6 +7,7 @@ */ #include <linux/types.h> +#include <linux/sizes.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> @@ -20,19 +21,19 @@ static struct efivars *__efivars; static DEFINE_SEMAPHORE(efivars_lock); -efi_status_t check_var_size(u32 attributes, unsigned long size) +static efi_status_t check_var_size(u32 attributes, unsigned long size) { const struct efivar_operations *fops; fops = __efivars->ops; if (!fops->query_variable_store) - return EFI_UNSUPPORTED; + return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; return fops->query_variable_store(attributes, size, false); } -EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR); +static efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size) { const struct efivar_operations *fops; @@ -40,11 +41,10 @@ efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size) fops = __efivars->ops; if (!fops->query_variable_store) - return EFI_UNSUPPORTED; + return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; return fops->query_variable_store(attributes, size, true); } -EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR); /** * efivars_kobject - get the kobject for the registered efivars diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ae9371b172e3..8639a4f9c6e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0) -#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1) - /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -1065,7 +1062,6 @@ struct amdgpu_device { struct work_struct reset_work; - uint32_t amdgpu_reset_level_mask; bool job_hang; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 03bbfaa51cbc..0561812aa0a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -134,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 0b0a72ca5695..7e80caa05060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id) lock_srbm(adev, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL), + WREG32_SOC15(GC, 0, regCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6066aebf491c..de61a85c4b02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return PTR_ERR(ent); } - debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask); - /* Register debugfs entries for amdgpu_ttm */ amdgpu_ttm_debugfs_init(adev); amdgpu_debugfs_pm_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ab8f970b2849..e0445e8cc342 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* + * Per PMFW team's suggestion, driver needs to handle gfxoff + * and df cstate features disablement for gpu reset(e.g. Mode1Reset) + * scenario. Add the missing df cstate disablement here. + */ + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -5210,7 +5218,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, reset_context->job = job; reset_context->hive = hive; - /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list @@ -5337,11 +5344,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); - if (r && r == -EAGAIN) { - set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags); - adev->asic_reset_res = 0; + if (r && r == -EAGAIN) goto retry; - } if (!r && gpu_reset_for_dev_remove) goto recover_end; @@ -5777,7 +5781,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) reset_context.reset_req_dev = adev; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(adev, &reset_context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 46c99331d7f1..cd968e781077 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2dad7aa9a03b..a4b47e1bd111 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); } @@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): + return true; + default: + return false; + } + } + + if (adev->asic_type == CHIP_IP_DISCOVERY) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 10): + return true; + default: + return false; + } + } + return adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS || @@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - /* If driver run on sriov guest side, only enable ras for aldebaran */ - if (amdgpu_sriov_vf(adev) && - adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) - return; - if (!adev->gmc.xgmi.connected_to_cpu) { if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { dev_info(adev->dev, "MEM ECC is active.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 9da5ead50c90..f778466bb9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; - adev->amdgpu_reset_level_mask = 0x1; - switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): ret = aldebaran_reset_init(adev); @@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, { struct amdgpu_reset_handler *reset_handler = NULL; - if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) - return -ENOSYS; - - if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) - return -ENOSYS; - if (adev->reset_cntl && adev->reset_cntl->get_reset_handler) reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); @@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, int ret; struct amdgpu_reset_handler *reset_handler = NULL; - if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) - return -ENOSYS; - - if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) - return -ENOSYS; - if (adev->reset_cntl) reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index f5318fedf2f0..f4a501ff87d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, - AMDGPU_SKIP_MODE2_RESET = 2, - AMDGPU_RESET_FOR_DEVICE_REMOVE = 3, + AMDGPU_RESET_FOR_DEVICE_REMOVE = 2, }; struct amdgpu_reset_context { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3e316b013fd9..d3558c34d406 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { ktime_t deadline = ktime_add_us(ktime_get(), 10000); - if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY)) - return false; - if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dc262d2c2925..57277b1cf183 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, while (cursor.remaining) { amdgpu_res_next(&cursor, cursor.size); + if (!cursor.remaining) + break; + /* ttm_resource_ioremap only supports contiguous memory */ if (end != cursor.start) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e4af40b9a8aa..9c765b04aae3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -726,6 +726,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } + if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d94c31e68a14..49c4347d154c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -31,6 +31,7 @@ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ /* flags for indirect register access path supported by rlcg for sriov */ #define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28) @@ -297,6 +298,9 @@ struct amdgpu_video_codec_info; #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) +#define amdgpu_sriov_vf_mmio_access_protection(adev) \ +((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 83b0c5d86e48..2291aa14d888 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2338,7 +2338,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ #ifdef CONFIG_X86_64 if (amdgpu_vm_update_mode == -1) { - if (amdgpu_gmc_vram_full_visible(&adev->gmc)) + /* For asic with VF MMIO access protection + * avoid using CPU for VM table updates + */ + if (amdgpu_gmc_vram_full_visible(&adev->gmc) && + !amdgpu_sriov_vf_mmio_access_protection(adev)) adev->vm_manager.vm_update_mode = AMDGPU_VM_USE_CPU_FOR_COMPUTE; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 2b0669c464f6..69e105fa41f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, DMA_RESV_USAGE_BOOKKEEP); } - if (fence && !p->immediate) + if (fence && !p->immediate) { + /* + * Most hw generations now have a separate queue for page table + * updates, but when the queue is shared with userspace we need + * the extra CPU round trip to correctly flush the TLB. + */ + set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags); swap(*fence, f); + } dma_fence_put(f); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 251109723ab6..671ca5a0f208 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1571,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); /* Enable trap for each kfd vmid. */ - data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL)); + data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); } soc21_grbm_select(adev, 0, 0, 0, 0); @@ -5076,6 +5076,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle, case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): gfx_v11_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 846ccb6cf07d..66dfb574cc7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; + unsigned char hub_ip = 0; + + hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, hub_ip); if (tmp & 0x1) break; udelay(1); @@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng, hub_ip); tmp &= 1 << vmid; if (tmp) break; @@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ if ((vmhub != AMDGPU_GFXHUB_0) && diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 5cec6b259b7f..fef7d020bc5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -1156,6 +1156,42 @@ static int mes_v11_0_sw_fini(void *handle) return 0; } +static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev) +{ + uint32_t data; + int i; + + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0); + + /* disable the queue if it's active */ + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + } + data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 1); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0); + + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + adev->mes.ring.sched.ready = false; +} + static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring) { uint32_t tmp; @@ -1207,6 +1243,9 @@ failure: static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) { + if (adev->mes.ring.sched.ready) + mes_v11_0_kiq_dequeue_sched(adev); + mes_v11_0_enable(adev, false); return 0; } @@ -1262,9 +1301,6 @@ failure: static int mes_v11_0_hw_fini(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->mes.ring.sched.ready = false; return 0; } @@ -1296,7 +1332,8 @@ static int mes_v11_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_in_reset(adev)) + if (!amdgpu_in_reset(adev) && + (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) amdgpu_mes_self_test(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index a2f04b249132..12906ba74462 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -290,7 +290,6 @@ flr_done: reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index a977f0027928..e07757eea7ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -317,7 +317,6 @@ flr_done: reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index fd14fa9b9cd7..288c414babdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 298fa11702e7..1122bd4eae98 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1417,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - /* unhalt engine */ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 7aa570c1ce4a..81a6d5b94987 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -31,12 +31,23 @@ #include "amdgpu_psp.h" #include "amdgpu_xgmi.h" +static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl) +{ +#if 0 + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) && + adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) + return true; +#endif + return false; +} + static struct amdgpu_reset_handler * sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_context *reset_context) { struct amdgpu_reset_handler *handler; - struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; if (reset_context->method != AMD_RESET_METHOD_NONE) { list_for_each_entry(handler, &reset_ctl->reset_handlers, @@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, if (handler->reset_method == reset_context->method) return handler; } - } else { - list_for_each_entry(handler, &reset_ctl->reset_handlers, + } + + if (sienna_cichlid_is_mode2_default(reset_ctl)) { + list_for_each_entry (handler, &reset_ctl->reset_handlers, handler_list) { - if (handler->reset_method == AMD_RESET_METHOD_MODE2 && - adev->pm.fw_version >= 0x3a5500 && - !amdgpu_sriov_vf(adev)) { - reset_context->method = AMD_RESET_METHOD_MODE2; + if (handler->reset_method == AMD_RESET_METHOD_MODE2) return handler; - } } } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 183024d7c184..e3b2b6b4f1a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle) return 0; } +static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) +{ + int i; + + /* sdma doorbell range is programed by hypervisor */ + if (!amdgpu_sriov_vf(adev)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->nbio.funcs->sdma_doorbell_range(adev, i, + true, adev->doorbell_index.sdma_engine[i] << 1, + adev->doorbell_index.sdma_doorbell_range); + } + } +} + static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle) /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); + /* HW doorbell routing policy: doorbell writing not + * in SDMA/IH/MM/ACV range will be routed to CP. So + * we need to init SDMA doorbell range prior + * to CP ip block init and ring test. IH already + * happens before CP. + */ + soc15_sdma_doorbell_range_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 795706b3b092..e08044008186 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) case IP_VERSION(11, 0, 0): return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): return false; default: return true; @@ -636,7 +637,11 @@ static int soc21_common_early_init(void *handle) break; case IP_VERSION(11, 0, 3): adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_GFX_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index d70838edba80..ca7d24000621 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index e85364dff4e0..5cb3e8634739 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -262,8 +262,9 @@ struct kfd2kgd_calls { uint32_t queue_id); int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, - uint32_t reset_type, unsigned int timeout, - uint32_t pipe_id, uint32_t queue_id); + enum kfd_preempt_type reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 948cc75376f8..236657eece47 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.sysfs_initialized) return 0; + INIT_LIST_HEAD(&adev->pm.pm_attr_list); + if (adev->pm.dpm_enabled == 0) return 0; - INIT_LIST_HEAD(&adev->pm.pm_attr_list); - adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, hwmon_groups); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 190af79f3236..dad3e3741a4e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed) { - struct amdgpu_device *adev = hwmgr->adev; - uint32_t duty100, duty; - uint64_t tmp64; + uint32_t current_rpm; + uint32_t percent = 0; - duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), - CG_FDO_CTRL1, FMAX_DUTY100); - duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), - CG_THERMAL_STATUS, FDO_PWM_DUTY); + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; - if (!duty100) - return -EINVAL; + if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) + return -1; + + if (hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM != 0) + percent = current_rpm * 255 / + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; - tmp64 = (uint64_t)duty * 255; - do_div(tmp64, duty100); - *speed = MIN((uint32_t)tmp64, 255); + *speed = MIN(percent, 255); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 13c5c7f1ecb9..4fe75dd2b329 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1314,8 +1314,8 @@ static int smu_smc_hw_setup(struct smu_context *smu) ret = smu_enable_thermal_alert(smu); if (ret) { - dev_err(adev->dev, "Failed to enable thermal alert!\n"); - return ret; + dev_err(adev->dev, "Failed to enable thermal alert!\n"); + return ret; } ret = smu_notify_display_change(smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h index ae2d337158f3..f77401709d83 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 5 +#define PMFW_DRIVER_IF_VERSION 7 typedef struct { int32_t value; @@ -163,8 +163,8 @@ typedef struct { uint16_t DclkFrequency; //[MHz] uint16_t MemclkFrequency; //[MHz] uint16_t spare; //[centi] - uint16_t UvdActivity; //[centi] uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC @@ -199,6 +199,19 @@ typedef struct { uint16_t DeviceState; uint16_t CurTemp; //[centi-Celsius] uint16_t spare2; + + uint16_t AverageGfxclkFrequency; + uint16_t AverageFclkFrequency; + uint16_t AverageGfxActivity; + uint16_t AverageSocclkFrequency; + uint16_t AverageVclkFrequency; + uint16_t AverageVcnActivity; + uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads + uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes + uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower + uint16_t AverageCorePower; //Filtered of [sum of CorePower[8]]) + uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency % per core] + uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing. } SmuMetrics_t; typedef struct { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 9d62ea2af132..8f72202aea8e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -28,7 +28,7 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 445005571f76..9cd005131f56 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu) static int arcturus_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; uint32_t smu_version; int ret; + /* + * Arcturus does not need the cstate disablement + * prerequisite for gpu reset. + */ + if (amdgpu_in_reset(adev) || adev->in_suspend) + return 0; + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); if (ret) { dev_err(smu->adev->dev, "Failed to get smu version!\n"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 619aee51b123..d30ec3005ea1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu) static int aldebaran_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; + + /* + * Aldebaran does not need the cstate disablement + * prerequisite for gpu reset. + */ + if (amdgpu_in_reset(adev) || adev->in_suspend) + return 0; + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 93fffdbab4f0..c4552ade8d44 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu) return 0; if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) || - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))) + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))) return 0; /* override pptable_id from driver parameter */ @@ -454,9 +455,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu) dev_info(adev->dev, "override pptable id %d\n", pptable_id); } else { pptable_id = smu->smu_table.boot_values.pp_table_id; - - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) - pptable_id = 6666; } /* force using vbios pptable in sriov mode */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 1d454485e0d9..29529328152d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -119,6 +119,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -1753,6 +1754,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, return ret; } +static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -1822,6 +1832,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_0_set_mp1_state, + .set_df_cstate = smu_v13_0_0_set_df_cstate, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c422bf8a09b1..c4102cfb734c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -121,6 +121,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -1587,6 +1588,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) return true; } + +static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -1649,6 +1660,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, + .set_df_cstate = smu_v13_0_7_set_df_cstate, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index e3142c8142b3..61c29ce74b03 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev, if (drm_WARN_ON(dev, funcs && funcs->destroy)) return -EINVAL; - ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL); + ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 5fe209107246..20fe53815b20 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) .src = &src, .dst = &dst, .pgmap_owner = drm->dev, + .fault_page = vmf->page, .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE, }; diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 89056a1aac7d..6bd0634e2d58 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter, { struct panfrost_dump_object_header *hdr = iter->hdr; - hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC); - hdr->type = cpu_to_le32(type); - hdr->file_offset = cpu_to_le32(iter->data - iter->start); - hdr->file_size = cpu_to_le32(data_end - iter->data); + hdr->magic = PANFROSTDUMP_MAGIC; + hdr->type = type; + hdr->file_offset = iter->data - iter->start; + hdr->file_size = data_end - iter->data; iter->hdr++; - iter->data += le32_to_cpu(hdr->file_size); + iter->data += hdr->file_size; } static void @@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter, reg = panfrost_dump_registers[i] + js_as_offset; - dumpreg->reg = cpu_to_le32(reg); - dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg)); + dumpreg->reg = reg; + dumpreg->value = gpu_read(pfdev, reg); } panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg); @@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job) struct panfrost_dump_iterator iter; struct drm_gem_object *dbo; unsigned int n_obj, n_bomap_pages; - __le64 *bomap, *bomap_start; + u64 *bomap, *bomap_start; size_t file_size; u32 as_nr; int slot; @@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job) * For now, we write the job identifier in the register dump header, * so that we can decode the entire dump later with pandecode */ - iter.hdr->reghdr.jc = cpu_to_le64(job->jc); - iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR); - iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR); - iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id); - iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count); + iter.hdr->reghdr.jc = job->jc; + iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR; + iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR; + iter.hdr->reghdr.gpu_id = pfdev->features.id; + iter.hdr->reghdr.nbos = job->bo_count; panfrost_core_dump_registers(&iter, pfdev, as_nr, slot); @@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job) WARN_ON(!mapping->active); - iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start)); + iter.hdr->bomap.data[0] = bomap - bomap_start; for_each_sgtable_page(bo->base.sgt, &page_iter, 0) { struct page *page = sg_page_iter_page(&page_iter); if (!IS_ERR(page)) { - *bomap++ = cpu_to_le64(page_to_phys(page)); + *bomap++ = page_to_phys(page); } else { dev_err(pfdev->dev, "Panfrost Dump: wrong page\n"); - *bomap++ = ~cpu_to_le64(0); + *bomap++ = 0; } } - iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT); + iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT; vaddr = map.vaddr; memcpy(iter.data, vaddr, bo->base.base.size); drm_gem_shmem_vunmap(&bo->base, &map); - iter.hdr->bomap.valid = cpu_to_le32(1); + iter.hdr->bomap.valid = 1; dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data + bo->base.base.size); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 6b25b2f4f5a3..6137537aaea4 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -385,7 +385,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) } s_fence = to_drm_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { + if (s_fence && s_fence->sched == sched && + !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { /* * Fence is from the same scheduler, only need to wait for diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 8d86c250c2ec..2191e57f2297 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) iosys_map_set_vaddr(&src, xrgb8888); drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); - buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE); + buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index ffbbb454c9e8..2027063fdc30 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -490,6 +490,7 @@ module_init(vc4_drm_register); module_exit(vc4_drm_unregister); MODULE_ALIAS("platform:vc4-drm"); +MODULE_SOFTDEP("pre: snd-soc-hdmi-codec"); MODULE_DESCRIPTION("Broadcom VC4 DRM Driver"); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 64f9feabf43e..596e311d6e58 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -3318,12 +3318,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev) struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); unsigned long __maybe_unused flags; u32 __maybe_unused value; + unsigned long rate; int ret; + /* + * The HSM clock is in the HDMI power domain, so we need to set + * its frequency while the power domain is active so that it + * keeps its rate. + */ + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ); + if (ret) + return ret; + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); if (ret) return ret; + /* + * Whenever the RaspberryPi boots without an HDMI monitor + * plugged in, the firmware won't have initialized the HSM clock + * rate and it will be reported as 0. + * + * If we try to access a register of the controller in such a + * case, it will lead to a silent CPU stall. Let's make sure we + * prevent such a case. + */ + rate = clk_get_rate(vc4_hdmi->hsm_clock); + if (!rate) { + ret = -EINVAL; + goto err_disable_clk; + } + if (vc4_hdmi->variant->reset) vc4_hdmi->variant->reset(vc4_hdmi); @@ -3345,6 +3370,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev) #endif return 0; + +err_disable_clk: + clk_disable_unprepare(vc4_hdmi->hsm_clock); + return ret; } static void vc4_hdmi_put_ddc_device(void *ptr) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index da86565f04d4..dad953f66996 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -867,6 +867,7 @@ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 #define USB_DEVICE_ID_MADCATZ_RAT5 0x1705 #define USB_DEVICE_ID_MADCATZ_RAT9 0x1709 +#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -1142,6 +1143,7 @@ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 #define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6 +#define USB_DEVICE_ID_SONY_PS5_CONTROLLER_2 0x0df2 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 9dabd6323234..44763c0da444 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -985,7 +985,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); - u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; + static const u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; int led_nr = 0; int ret = 0; diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 664a624a363d..c9c968d4b36a 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -480,7 +480,7 @@ static int magicmouse_raw_event(struct hid_device *hdev, magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); - break; + return 0; default: return 0; } diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c index 40050eb85c0a..0b58763bfd30 100644 --- a/drivers/hid/hid-playstation.c +++ b/drivers/hid/hid-playstation.c @@ -46,6 +46,7 @@ struct ps_device { uint32_t fw_version; int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size); + void (*remove)(struct ps_device *dev); }; /* Calibration data for playstation motion sensors. */ @@ -107,6 +108,9 @@ struct ps_led_info { #define DS_STATUS_CHARGING GENMASK(7, 4) #define DS_STATUS_CHARGING_SHIFT 4 +/* Feature version from DualSense Firmware Info report. */ +#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff)) + /* * Status of a DualSense touch point contact. * Contact IDs, with highest bit set are 'inactive' @@ -125,6 +129,7 @@ struct ps_led_info { #define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3) #define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4) #define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1) +#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2) #define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4) #define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1) @@ -142,6 +147,9 @@ struct dualsense { struct input_dev *sensors; struct input_dev *touchpad; + /* Update version is used as a feature/capability version. */ + uint16_t update_version; + /* Calibration data for accelerometer and gyroscope. */ struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; @@ -152,6 +160,7 @@ struct dualsense { uint32_t sensor_timestamp_us; /* Compatible rumble state */ + bool use_vibration_v2; bool update_rumble; uint8_t motor_left; uint8_t motor_right; @@ -174,6 +183,7 @@ struct dualsense { struct led_classdev player_leds[5]; struct work_struct output_worker; + bool output_worker_initialized; void *output_report_dmabuf; uint8_t output_seq; /* Sequence number for output report. */ }; @@ -299,6 +309,7 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = { {0, 0}, }; +static inline void dualsense_schedule_work(struct dualsense *ds); static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue); /* @@ -789,6 +800,7 @@ err_free: return ret; } + static int dualsense_get_firmware_info(struct dualsense *ds) { uint8_t *buf; @@ -808,6 +820,15 @@ static int dualsense_get_firmware_info(struct dualsense *ds) ds->base.hw_version = get_unaligned_le32(&buf[24]); ds->base.fw_version = get_unaligned_le32(&buf[28]); + /* Update version is some kind of feature version. It is distinct from + * the firmware version as there can be many different variations of a + * controller over time with the same physical shell, but with different + * PCBs and other internal changes. The update version (internal name) is + * used as a means to detect what features are available and change behavior. + * Note: the version is different between DualSense and DualSense Edge. + */ + ds->update_version = get_unaligned_le16(&buf[44]); + err_free: kfree(buf); return ret; @@ -878,7 +899,7 @@ static int dualsense_player_led_set_brightness(struct led_classdev *led, enum le ds->update_player_leds = true; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } @@ -922,6 +943,16 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_ } } +static inline void dualsense_schedule_work(struct dualsense *ds) +{ + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + if (ds->output_worker_initialized) + schedule_work(&ds->output_worker); + spin_unlock_irqrestore(&ds->base.lock, flags); +} + /* * Helper function to send DualSense output reports. Applies a CRC at the end of a report * for Bluetooth reports. @@ -960,7 +991,10 @@ static void dualsense_output_worker(struct work_struct *work) if (ds->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT; - common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; + if (ds->use_vibration_v2) + common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2; + else + common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; common->motor_left = ds->motor_left; common->motor_right = ds->motor_right; ds->update_rumble = false; @@ -1082,7 +1116,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r spin_unlock_irqrestore(&ps_dev->lock, flags); /* Schedule updating of microphone state at hardware level. */ - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } ds->last_btn_mic_state = btn_mic_state; @@ -1197,10 +1231,22 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef ds->motor_right = effect->u.rumble.weak_magnitude / 256; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } +static void dualsense_remove(struct ps_device *ps_dev) +{ + struct dualsense *ds = container_of(ps_dev, struct dualsense, base); + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + ds->output_worker_initialized = false; + spin_unlock_irqrestore(&ds->base.lock, flags); + + cancel_work_sync(&ds->output_worker); +} + static int dualsense_reset_leds(struct dualsense *ds) { struct dualsense_output_report report; @@ -1237,7 +1283,7 @@ static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t gr ds->lightbar_blue = blue; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static void dualsense_set_player_leds(struct dualsense *ds) @@ -1260,7 +1306,7 @@ static void dualsense_set_player_leds(struct dualsense *ds) ds->update_player_leds = true; ds->player_leds_state = player_ids[player_id]; - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static struct ps_device *dualsense_create(struct hid_device *hdev) @@ -1299,7 +1345,9 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) ps_dev->battery_capacity = 100; /* initial value until parse_report. */ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; ps_dev->parse_report = dualsense_parse_report; + ps_dev->remove = dualsense_remove; INIT_WORK(&ds->output_worker, dualsense_output_worker); + ds->output_worker_initialized = true; hid_set_drvdata(hdev, ds); max_output_report_size = sizeof(struct dualsense_output_report_bt); @@ -1320,6 +1368,21 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) return ERR_PTR(ret); } + /* Original DualSense firmware simulated classic controller rumble through + * its new haptics hardware. It felt different from classic rumble users + * were used to. Since then new firmwares were introduced to change behavior + * and make this new 'v2' behavior default on PlayStation and other platforms. + * The original DualSense requires a new enough firmware as bundled with PS5 + * software released in 2021. DualSense edge supports it out of the box. + * Both devices also support the old mode, but it is not really used. + */ + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + /* Feature version 2.21 introduced new vibration method. */ + ds->use_vibration_v2 = ds->update_version >= DS_FEATURE_VERSION(2, 21); + } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { + ds->use_vibration_v2 = true; + } + ret = ps_devices_list_add(ps_dev); if (ret) return ERR_PTR(ret); @@ -1436,7 +1499,8 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) goto err_stop; } - if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER || + hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { dev = dualsense_create(hdev); if (IS_ERR(dev)) { hid_err(hdev, "Failed to create dualsense.\n"); @@ -1461,6 +1525,9 @@ static void ps_remove(struct hid_device *hdev) ps_devices_list_remove(dev); ps_device_release_player_id(dev); + if (dev->remove) + dev->remove(dev); + hid_hw_close(hdev); hid_hw_stop(hdev); } @@ -1468,6 +1535,8 @@ static void ps_remove(struct hid_device *hdev) static const struct hid_device_id ps_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, { } }; MODULE_DEVICE_TABLE(hid, ps_devices); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 70f602c64fd1..50e1c717fc0a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -620,6 +620,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) }, #endif #if IS_ENABLED(CONFIG_HID_SAMSUNG) { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index c7bf14c01960..b84e975977c4 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), .driver_data = SAITEK_RELEASE_MODE_MMO7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7), + .driver_data = SAITEK_RELEASE_MODE_MMO7 }, { } }; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index ccf0af5b988a..8bf32c6c85d9 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) -#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) - #ifdef CONFIG_SMP #define for_each_sibling(i, cpu) \ for_each_cpu(i, topology_sibling_cpumask(cpu)) @@ -91,6 +88,8 @@ struct temp_data { struct platform_data { struct device *hwmon_dev; u16 pkg_id; + u16 cpu_map[NUM_REAL_CORES]; + struct ida ida; struct cpumask cpumask; struct temp_data *core_data[MAX_CORE_DATA]; struct device_attribute name_attr; @@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) MSR_IA32_THERM_STATUS; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; - tdata->cpu_core_id = TO_CORE_ID(cpu); + tdata->cpu_core_id = topology_core_id(cpu); tdata->attr_size = MAX_CORE_ATTRS; mutex_init(&tdata->update_lock); return tdata; @@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; - int err, attr_no; + int err, index, attr_no; /* * Find attr number for sysfs: @@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, * The attr number is always core id + 2 * The Pkgtemp will always show up as temp1_*, if available */ - attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu); + if (pkg_flag) { + attr_no = PKG_SYSFS_ATTR_NO; + } else { + index = ida_alloc(&pdata->ida, GFP_KERNEL); + if (index < 0) + return index; + pdata->cpu_map[index] = topology_core_id(cpu); + attr_no = index + BASE_SYSFS_ATTR_NO; + } - if (attr_no > MAX_CORE_DATA - 1) - return -ERANGE; + if (attr_no > MAX_CORE_DATA - 1) { + err = -ERANGE; + goto ida_free; + } tdata = init_temp_data(cpu, pkg_flag); - if (!tdata) - return -ENOMEM; + if (!tdata) { + err = -ENOMEM; + goto ida_free; + } /* Test if we can access the status register */ err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); @@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, exit_free: pdata->core_data[attr_no] = NULL; kfree(tdata); +ida_free: + if (!pkg_flag) + ida_free(&pdata->ida, index); return err; } @@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) kfree(pdata->core_data[indx]); pdata->core_data[indx] = NULL; + + if (indx >= BASE_SYSFS_ATTR_NO) + ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO); } static int coretemp_probe(struct platform_device *pdev) @@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev) return -ENOMEM; pdata->pkg_id = pdev->id; + ida_init(&pdata->ida); platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, @@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev) if (pdata->core_data[i]) coretemp_remove_core(pdata, i); + ida_destroy(&pdata->ida); return 0; } @@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu) struct platform_device *pdev = coretemp_get_pdev(cpu); struct platform_data *pd; struct temp_data *tdata; - int indx, target; + int i, indx = -1, target; /* * Don't execute this on suspend as the device remove locks @@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu) if (!pdev) return 0; - /* The core id is too big, just return */ - indx = TO_ATTR_NO(cpu); - if (indx > MAX_CORE_DATA - 1) + pd = platform_get_drvdata(pdev); + + for (i = 0; i < NUM_REAL_CORES; i++) { + if (pd->cpu_map[i] == topology_core_id(cpu)) { + indx = i + BASE_SYSFS_ATTR_NO; + break; + } + } + + /* Too many cores and this core is not populated, just return */ + if (indx < 0) return 0; - pd = platform_get_drvdata(pdev); tdata = pd->core_data[indx]; cpumask_clear_cpu(cpu, &pd->cpumask); diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 345d883ab044..2210aa62e3d0 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -820,7 +820,8 @@ static const struct hid_device_id corsairpsu_idtable[] = { { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */ { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */ { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */ - { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsaur HX1000i revision 2 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i revision 2 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i */ { }, }; MODULE_DEVICE_TABLE(hid, corsairpsu_idtable); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index dc3d9a22d917..83a347ca35da 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -257,7 +257,10 @@ static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val) if (val == 0) { /* Disable pwm-fan unconditionally */ - ret = __set_pwm(ctx, 0); + if (ctx->enabled) + ret = __set_pwm(ctx, 0); + else + ret = pwm_fan_switch_power(ctx, false); if (ret) ctx->enable_mode = old_val; pwm_fan_update_state(ctx, 0); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 264e780ae32e..e50f9603d189 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -764,6 +764,7 @@ config I2C_LPC2K config I2C_MLXBF tristate "Mellanox BlueField I2C controller" depends on MELLANOX_PLATFORM && ARM64 + depends on ACPI select I2C_SLAVE help Enabling this option will add I2C SMBus support for Mellanox BlueField diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index e68e775f187e..1810d5791b3d 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -2247,7 +2247,6 @@ static struct i2c_adapter_quirks mlxbf_i2c_quirks = { .max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH, }; -#ifdef CONFIG_ACPI static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = { { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] }, { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] }, @@ -2282,12 +2281,6 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) return 0; } -#else -static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) -{ - return -ENOENT; -} -#endif /* CONFIG_ACPI */ static int mlxbf_i2c_probe(struct platform_device *pdev) { @@ -2490,9 +2483,7 @@ static struct platform_driver mlxbf_i2c_driver = { .remove = mlxbf_i2c_remove, .driver = { .name = "i2c-mlxbf", -#ifdef CONFIG_ACPI .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), -#endif /* CONFIG_ACPI */ }, }; diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 72fcfb17dd67..081f51ef0551 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -40,7 +40,7 @@ #define MLXCPLD_LPCI2C_STATUS_REG 0x9 #define MLXCPLD_LPCI2C_DATA_REG 0xa -/* LPC I2C masks and parametres */ +/* LPC I2C masks and parameters */ #define MLXCPLD_LPCI2C_RST_SEL_MASK 0x1 #define MLXCPLD_LPCI2C_TRANS_END 0x1 #define MLXCPLD_LPCI2C_STATUS_NACK 0x10 diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 87739fb4388b..a4b97fe3c3a5 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -639,6 +639,11 @@ static int cci_probe(struct platform_device *pdev) if (ret < 0) goto error; + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + for (i = 0; i < cci->data->num_masters; i++) { if (!cci->master[i].cci) continue; @@ -650,14 +655,12 @@ static int cci_probe(struct platform_device *pdev) } } - pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); - pm_runtime_use_autosuspend(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; error_i2c: + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + for (--i ; i >= 0; i--) { if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index cfb8e04a2a83..87d56250d78a 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -97,7 +97,7 @@ MODULE_PARM_DESC(high_clock, module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!"); -/* SMBus base adress */ +/* SMBus base address */ static unsigned short smbus_base; /* supported chips */ diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index b3fe6b2aa3ca..277a02455cdd 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -920,6 +920,7 @@ static struct platform_driver xiic_i2c_driver = { module_platform_driver(xiic_i2c_driver); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("info@mocean-labs.com"); MODULE_DESCRIPTION("Xilinx I2C bus driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 65856e401949..d3b39d0416fa 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2330,7 +2330,8 @@ static void amd_iommu_get_resv_regions(struct device *dev, type = IOMMU_RESV_RESERVED; region = iommu_alloc_resv_region(entry->address_start, - length, prot, type); + length, prot, type, + GFP_KERNEL); if (!region) { dev_err(dev, "Out of memory allocating dm-regions\n"); return; @@ -2340,14 +2341,14 @@ static void amd_iommu_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_RESERVED, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 4526575b999e..4f4a323be0d0 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -758,7 +758,7 @@ static void apple_dart_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(DOORBELL_ADDR, PAGE_SIZE, prot, - IOMMU_RESV_MSI); + IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ba47c73f5b8c..6d5df91c5c46 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2757,7 +2757,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 6c1114a4d6cc..30dab1418e3f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1534,7 +1534,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a8b36c3fddf1..48cdcd0a5cf3 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2410,6 +2410,7 @@ static int __init si_domain_init(int hw) if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); + si_domain = NULL; return -EFAULT; } @@ -3052,6 +3053,10 @@ free_iommu: disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } + if (si_domain) { + domain_exit(si_domain); + si_domain = NULL; + } return ret; } @@ -4534,7 +4539,7 @@ static void intel_iommu_get_resv_regions(struct device *device, struct device *i_dev; int i; - down_read(&dmar_global_lock); + rcu_read_lock(); for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, i_dev) { @@ -4552,14 +4557,15 @@ static void intel_iommu_get_resv_regions(struct device *device, IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT; resv = iommu_alloc_resv_region(rmrr->base_address, - length, prot, type); + length, prot, type, + GFP_ATOMIC); if (!resv) break; list_add_tail(&resv->list, head); } } - up_read(&dmar_global_lock); + rcu_read_unlock(); #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA if (dev_is_pci(device)) { @@ -4567,7 +4573,8 @@ static void intel_iommu_get_resv_regions(struct device *device, if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { reg = iommu_alloc_resv_region(0, 1UL << 24, prot, - IOMMU_RESV_DIRECT_RELAXABLE); + IOMMU_RESV_DIRECT_RELAXABLE, + GFP_KERNEL); if (reg) list_add_tail(®->list, head); } @@ -4576,7 +4583,7 @@ static void intel_iommu_get_resv_regions(struct device *device, reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!reg) return; list_add_tail(®->list, head); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 4893c2429ca5..65a3b3d886dc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -504,7 +504,7 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new, LIST_HEAD(stack); nr = iommu_alloc_resv_region(new->start, new->length, - new->prot, new->type); + new->prot, new->type, GFP_KERNEL); if (!nr) return -ENOMEM; @@ -2579,11 +2579,12 @@ EXPORT_SYMBOL(iommu_put_resv_regions); struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, - enum iommu_resv_type type) + enum iommu_resv_type type, + gfp_t gfp) { struct iommu_resv_region *region; - region = kzalloc(sizeof(*region), GFP_KERNEL); + region = kzalloc(sizeof(*region), gfp); if (!region) return NULL; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5a4e00e4bbbc..2ab2ecfe01f8 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -917,7 +917,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev, continue; region = iommu_alloc_resv_region(resv->iova_base, resv->size, - prot, IOMMU_RESV_RESERVED); + prot, IOMMU_RESV_RESERVED, + GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index b7c22802f57c..8b1b5c270e50 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -490,11 +490,13 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, fallthrough; case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: region = iommu_alloc_resv_region(start, size, 0, - IOMMU_RESV_RESERVED); + IOMMU_RESV_RESERVED, + GFP_KERNEL); break; case VIRTIO_IOMMU_RESV_MEM_T_MSI: region = iommu_alloc_resv_region(start, size, prot, - IOMMU_RESV_MSI); + IOMMU_RESV_MSI, + GFP_KERNEL); break; } if (!region) @@ -909,7 +911,8 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) */ if (!msi) { msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, + GFP_KERNEL); if (!msi) return; diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 09c7ed2650ca..9c5ef818ca36 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b) { BUG_ON(b->hold_count); - if (!b->state) /* fast case */ + /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ + if (!smp_load_acquire(&b->state)) /* fast case */ return; wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); @@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) BUG_ON(test_bit(B_DIRTY, &b->state)); if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && - unlikely(test_bit(B_READING, &b->state))) + unlikely(test_bit_acquire(B_READING, &b->state))) continue; if (!b->hold_count) { @@ -1058,7 +1059,7 @@ found_buffer: * If the user called both dm_bufio_prefetch and dm_bufio_get on * the same buffer, it would deadlock if we waited. */ - if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) return NULL; b->hold_count++; @@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b) * invalid buffer. */ if ((b->read_error || b->write_error) && - !test_bit(B_READING, &b->state) && + !test_bit_acquire(B_READING, &b->state) && !test_bit(B_WRITING, &b->state) && !test_bit(B_DIRTY, &b->state)) { __unlink_buffer(b); @@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move); static void forget_buffer_locked(struct dm_buffer *b) { - if (likely(!b->hold_count) && likely(!b->state)) { + if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) { __unlink_buffer(b); __free_buffer_wake(b); } @@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) { if (!(gfp & __GFP_FS) || (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { - if (test_bit(B_READING, &b->state) || + if (test_bit_acquire(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) return false; diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index c05fc3436cef..06eb31af626f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -166,7 +166,7 @@ struct dm_cache_policy_type { struct dm_cache_policy_type *real; /* - * Policies may store a hint for each each cache block. + * Policies may store a hint for each cache block. * Currently the size of this hint must be 0 or 4 bytes but we * expect to relax this in future. */ diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 811b0a5379d0..2f1cc66d2641 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone) reason = "max discard sectors smaller than a region"; if (reason) { - DMWARN("Destination device (%pd) %s: Disabling discard passdown.", + DMWARN("Destination device (%pg) %s: Disabling discard passdown.", dest_dev, reason); clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 98976aaa9db9..6b3f867d0b70 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, hc = __get_name_cell(new); if (hc) { - DMWARN("Unable to change %s on mapped device %s to one that " - "already exists: %s", - change_uuid ? "uuid" : "name", - param->name, new); + DMERR("Unable to change %s on mapped device %s to one that " + "already exists: %s", + change_uuid ? "uuid" : "name", + param->name, new); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); @@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, */ hc = __get_name_cell(param->name); if (!hc) { - DMWARN("Unable to rename non-existent device, %s to %s%s", - param->name, change_uuid ? "uuid " : "", new); + DMERR("Unable to rename non-existent device, %s to %s%s", + param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-ENXIO); @@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, * Does this device already have a uuid? */ if (change_uuid && hc->uuid) { - DMWARN("Unable to change uuid of mapped device %s to %s " - "because uuid is already set to %s", - param->name, new, hc->uuid); + DMERR("Unable to change uuid of mapped device %s to %s " + "because uuid is already set to %s", + param->name, new, hc->uuid); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); @@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t static int check_name(const char *name) { if (strchr(name, '/')) { - DMWARN("invalid device name"); + DMERR("invalid device name"); return -EINVAL; } @@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src down_read(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { - DMWARN("device has been removed from the dev hash table."); + DMERR("device has been removed from the dev hash table."); goto out; } @@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si if (new_data < param->data || invalid_str(new_data, (void *) param + param_size) || !*new_data || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { - DMWARN("Invalid new mapped device name or uuid string supplied."); + DMERR("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } @@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa if (geostr < param->data || invalid_str(geostr, (void *) param + param_size)) { - DMWARN("Invalid geometry supplied."); + DMERR("Invalid geometry supplied."); goto out; } @@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa indata + 1, indata + 2, indata + 3, &dummy); if (x != 4) { - DMWARN("Unable to interpret geometry settings."); + DMERR("Unable to interpret geometry settings."); goto out; } if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255 || indata[3] > ULONG_MAX) { - DMWARN("Geometry exceeds range limits."); + DMERR("Geometry exceeds range limits."); goto out; } @@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table, char *target_params; if (!param->target_count) { - DMWARN("populate_table: no targets specified"); + DMERR("populate_table: no targets specified"); return -EINVAL; } @@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table, r = next_target(spec, next, end, &spec, &target_params); if (r) { - DMWARN("unable to find target"); + DMERR("unable to find target"); return r; } @@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table, (sector_t) spec->length, target_params); if (r) { - DMWARN("error adding target to table"); + DMERR("error adding target to table"); return r; } @@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t)) && !dm_table_get_wildcard_target(t)) { - DMWARN("can't replace immutable target type %s", - immutable_target_type->name); + DMERR("can't replace immutable target type %s", + immutable_target_type->name); r = -EINVAL; goto err_unlock_md_type; } @@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { - DMWARN("unable to set up device queue for new table."); + DMERR("unable to set up device queue for new table."); goto err_unlock_md_type; } } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { - DMWARN("can't change device type (old=%u vs new=%u) after initial table load.", - dm_get_md_type(md), dm_table_get_type(t)); + DMERR("can't change device type (old=%u vs new=%u) after initial table load.", + dm_get_md_type(md), dm_table_get_type(t)); r = -EINVAL; goto err_unlock_md_type; } @@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { - DMWARN("device has been removed from the dev hash table."); + DMERR("device has been removed from the dev hash table."); up_write(&_hash_lock); r = -ENXIO; goto err_destroy_table; @@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para if (tmsg < (struct dm_target_msg *) param->data || invalid_str(tmsg->message, (void *) param + param_size)) { - DMWARN("Invalid target message parameters."); + DMERR("Invalid target message parameters."); r = -EINVAL; goto out; } r = dm_split_args(&argc, &argv, tmsg->message); if (r) { - DMWARN("Failed to split target message parameters"); + DMERR("Failed to split target message parameters"); goto out; } if (!argc) { - DMWARN("Empty message received."); + DMERR("Empty message received."); r = -EINVAL; goto out_argv; } @@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para ti = dm_table_find_target(table, tmsg->sector); if (!ti) { - DMWARN("Target message sector outside device."); + DMERR("Target message sector outside device."); r = -EINVAL; } else if (ti->type->message) r = ti->type->message(ti, argc, argv, result, maxlen); else { - DMWARN("Target type does not support messages"); + DMERR("Target type does not support messages"); r = -EINVAL; } @@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) if ((DM_VERSION_MAJOR != version[0]) || (DM_VERSION_MINOR < version[1])) { - DMWARN("ioctl interface mismatch: " - "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", - DM_VERSION_MAJOR, DM_VERSION_MINOR, - DM_VERSION_PATCHLEVEL, - version[0], version[1], version[2], cmd); + DMERR("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); r = -EINVAL; } @@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param) if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { - DMWARN("name not supplied when creating device"); + DMERR("name not supplied when creating device"); return -EINVAL; } } else if (*param->uuid && *param->name) { - DMWARN("only supply one of name or uuid, cmd(%u)", cmd); + DMERR("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } @@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us fn = lookup_ioctl(cmd, &ioctl_flags); if (!fn) { - DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + DMERR("dm_ctl_ioctl: unknown command 0x%x", command); return -ENOTTY; } @@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, (sector_t) spec_array[i]->length, target_params_array[i]); if (r) { - DMWARN("error adding target to table"); + DMERR("error adding target to table"); goto err_destroy_table; } } @@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { - DMWARN("unable to set up device queue for new table."); + DMERR("unable to set up device queue for new table."); goto err_destroy_table; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c640be453313..54263679a7b1 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) * of the "sync" directive. * * With reshaping capability added, we must ensure that - * that the "sync" directive is disallowed during the reshape. + * the "sync" directive is disallowed during the reshape. */ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) continue; @@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) /* * Adjust data_offset and new_data_offset on all disk members of @rs - * for out of place reshaping if requested by contructor + * for out of place reshaping if requested by constructor * * We need free space at the beginning of each raid disk for forward * and at the end for backward reshapes which userspace has to provide diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3001b10a3fbf..a41209a43506 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) dm_requeue_original_request(tio, true); break; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } @@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio) dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8326f9fe0e91..f105a71915ab 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, return 2; /* this wasn't a stats message */ if (r == -EINVAL) - DMWARN("Invalid parameters for message %s", argv[0]); + DMCRIT("Invalid parameters for message %s", argv[0]); return r; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d8034ff0cb24..078da18bb86d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, return 0; if ((start >= dev_size) || (start + len > dev_size)) { - DMWARN("%s: %pg too small for target: " - "start=%llu, len=%llu, dev_size=%llu", - dm_device_name(ti->table->md), bdev, - (unsigned long long)start, - (unsigned long long)len, - (unsigned long long)dev_size); + DMERR("%s: %pg too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdev, + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); return 1; } @@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, unsigned int zone_sectors = bdev_zone_sectors(bdev); if (start & (zone_sectors - 1)) { - DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)start, - zone_sectors, bdev); + DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)start, + zone_sectors, bdev); return 1; } @@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * the sector range. */ if (len & (zone_sectors - 1)) { - DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)len, - zone_sectors, bdev); + DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)len, + zone_sectors, bdev); return 1; } } @@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, return 0; if (start & (logical_block_size_sectors - 1)) { - DMWARN("%s: start=%llu not aligned to h/w " - "logical block size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)start, - limits->logical_block_size, bdev); + DMERR("%s: start=%llu not aligned to h/w " + "logical block size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)start, + limits->logical_block_size, bdev); return 1; } if (len & (logical_block_size_sectors - 1)) { - DMWARN("%s: len=%llu not aligned to h/w " - "logical block size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)len, - limits->logical_block_size, bdev); + DMERR("%s: len=%llu not aligned to h/w " + "logical block size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)len, + limits->logical_block_size, bdev); return 1; } @@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) } } if (!found) { - DMWARN("%s: device %s not in table devices list", - dm_device_name(ti->table->md), d->name); + DMERR("%s: device %s not in table devices list", + dm_device_name(ti->table->md), d->name); return; } if (refcount_dec_and_test(&dd->count)) { @@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t, } if (remaining) { - DMWARN("%s: table line %u (start sect %llu len %llu) " - "not aligned to h/w logical block size %u", - dm_device_name(t->md), i, - (unsigned long long) ti->begin, - (unsigned long long) ti->len, - limits->logical_block_size); + DMERR("%s: table line %u (start sect %llu len %llu) " + "not aligned to h/w logical block size %u", + dm_device_name(t->md), i, + (unsigned long long) ti->begin, + (unsigned long long) ti->len, + limits->logical_block_size); return -EINVAL; } @@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * struct dm_md_mempools *pools; if (unlikely(type == DM_TYPE_NONE)) { - DMWARN("no table type is set, can't allocate mempools"); + DMERR("no table type is set, can't allocate mempools"); return -EINVAL; } @@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk) * Get a disk whose integrity profile reflects the table's profile. * Returns NULL if integrity support was inconsistent or unavailable. */ -static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) +static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; @@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t) * profile the new profile should not conflict. */ if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { - DMWARN("%s: conflict with existing integrity profile: " - "%s profile mismatch", - dm_device_name(t->md), - template_disk->disk_name); + DMERR("%s: conflict with existing integrity profile: " + "%s profile mismatch", + dm_device_name(t->md), + template_disk->disk_name); return 1; } @@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) if (t->md->queue && !blk_crypto_has_capabilities(profile, t->md->queue->crypto_profile)) { - DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); + DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); dm_destroy_crypto_profile(profile); return -EINVAL; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 8a00cc42e498..ccf5b852fbf7 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) /* WQ_UNBOUND greatly improves performance when running on ramdisk */ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND; - if (v->use_tasklet) { - /* - * Allow verify_wq to preempt softirq since verification in - * tasklet will fall-back to using it for error handling - * (or if the bufio cache doesn't have required hashes). - */ - wq_flags |= WQ_HIGHPRI; - } + /* + * Using WQ_HIGHPRI improves throughput and completion latency by + * reducing wait times when reading from a dm-verity device. + * + * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI + * allows verify_wq to preempt softirq since verification in tasklet + * will fall-back to using it for error handling (or if the bufio cache + * doesn't have required hashes). + */ + wq_flags |= WQ_HIGHPRI; v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus()); if (!v->verify_wq) { ti->error = "Cannot allocate workqueue"; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 60549b65c799..95a1ee3d314e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { - DMWARN("Start sector is beyond the geometry limits."); + DMERR("Start sector is beyond the geometry limits."); return -EINVAL; } @@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio) /* The target will handle the io */ return; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } @@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone) dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } } @@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor) md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { - DMWARN("unable to allocate device, out of memory."); + DMERR("unable to allocate device, out of memory."); return NULL; } @@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor) md->disk->minors = 1; md->disk->flags |= GENHD_FL_NO_PART; md->disk->fops = &dm_blk_dops; - md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index ba6592b3dab2..283b78b5766e 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -24,7 +24,7 @@ if MEDIA_SUPPORT config MEDIA_SUPPORT_FILTER bool "Filter media drivers" - default y if !EMBEDDED && !EXPERT + default y if !EXPERT help Configuring the media subsystem can be complex, as there are hundreds of drivers and other config options. diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 41a79293ee02..4f5ab3cae8a7 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1027,6 +1027,7 @@ static const u8 cec_msg_size[256] = { [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED, [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED, [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH, + [CEC_MSG_SET_AUDIO_VOLUME_LEVEL] = 3 | DIRECTED, [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED, [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED, [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED, diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c index 3b583ed4da9d..6ebedc71d67d 100644 --- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c +++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c @@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec) uint8_t *cec_message = cros_ec->event_data.data.cec_message; unsigned int len = cros_ec->event_size; + if (len > CEC_MAX_MSG_SIZE) + len = CEC_MAX_MSG_SIZE; cros_ec_cec->rx_msg.len = len; memcpy(cros_ec_cec->rx_msg.msg, cec_message, len); @@ -221,6 +223,8 @@ static const struct cec_dmi_match cec_dmi_match_table[] = { { "Google", "Moli", "0000:00:02.0", "Port B" }, /* Google Kinox */ { "Google", "Kinox", "0000:00:02.0", "Port B" }, + /* Google Kuldax */ + { "Google", "Kuldax", "0000:00:02.0", "Port B" }, }; static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev, diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c index ce9a9d922f11..0a30e7acdc10 100644 --- a/drivers/media/cec/platform/s5p/s5p_cec.c +++ b/drivers/media/cec/platform/s5p/s5p_cec.c @@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n"); cec->rx = STATE_BUSY; cec->msg.len = status >> 24; + if (cec->msg.len > CEC_MAX_MSG_SIZE) + cec->msg.len = CEC_MAX_MSG_SIZE; cec->msg.rx_status = CEC_RX_STATUS_OK; s5p_cec_get_rx_buf(cec, cec->msg.len, cec->msg.msg); diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 47d83e0a470c..9807f5411996 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -6660,7 +6660,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr) static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct drxk_state *state = fe->demodulator_priv; - u16 err; + u16 err = 0; dprintk(1, "\n"); diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c index c6ab531532be..e408049f6312 100644 --- a/drivers/media/i2c/ar0521.c +++ b/drivers/media/i2c/ar0521.c @@ -406,7 +406,6 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_format *format) { struct ar0521_dev *sensor = to_ar0521_dev(sd); - int ret = 0; ar0521_adj_fmt(&format->format); @@ -423,7 +422,7 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd, } mutex_unlock(&sensor->lock); - return ret; + return 0; } static int ar0521_s_ctrl(struct v4l2_ctrl *ctrl) @@ -756,10 +755,12 @@ static int ar0521_power_on(struct device *dev) gpiod_set_value(sensor->reset_gpio, 0); usleep_range(4500, 5000); /* min 45000 clocks */ - for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) - if (ar0521_write_regs(sensor, initial_regs[cnt].data, - initial_regs[cnt].count)) + for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) { + ret = ar0521_write_regs(sensor, initial_regs[cnt].data, + initial_regs[cnt].count); + if (ret) goto off; + } ret = ar0521_write_reg(sensor, AR0521_REG_SERIAL_FORMAT, AR0521_REG_SERIAL_FORMAT_MIPI | diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index ee6bbbb977f7..25bf1132dbff 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -238,6 +238,43 @@ static int get_key_knc1(struct IR_i2c *ir, enum rc_proto *protocol, return 1; } +static int get_key_geniatech(struct IR_i2c *ir, enum rc_proto *protocol, + u32 *scancode, u8 *toggle) +{ + int i, rc; + unsigned char b; + + /* poll IR chip */ + for (i = 0; i < 4; i++) { + rc = i2c_master_recv(ir->c, &b, 1); + if (rc == 1) + break; + msleep(20); + } + if (rc != 1) { + dev_dbg(&ir->rc->dev, "read error\n"); + if (rc < 0) + return rc; + return -EIO; + } + + /* don't repeat the key */ + if (ir->old == b) + return 0; + ir->old = b; + + /* decode to RC5 */ + b &= 0x7f; + b = (b - 1) / 2; + + dev_dbg(&ir->rc->dev, "key %02x\n", b); + + *protocol = RC_PROTO_RC5; + *scancode = b; + *toggle = ir->old >> 7; + return 1; +} + static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { @@ -766,6 +803,13 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_EMPTY; break; + case 0x33: + name = "Geniatech"; + ir->get_key = get_key_geniatech; + rc_proto = RC_PROTO_BIT_RC5; + ir_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02; + ir->old = 0xfc; + break; case 0x6b: name = "FusionHDTV"; ir->get_key = get_key_fusionhdtv; @@ -825,6 +869,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) case IR_KBD_GET_KEY_KNC1: ir->get_key = get_key_knc1; break; + case IR_KBD_GET_KEY_GENIATECH: + ir->get_key = get_key_geniatech; + break; case IR_KBD_GET_KEY_FUSIONHDTV: ir->get_key = get_key_fusionhdtv; break; diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c index 246d8d182a8e..20f548a8a054 100644 --- a/drivers/media/i2c/isl7998x.c +++ b/drivers/media/i2c/isl7998x.c @@ -8,7 +8,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_graph.h> diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index fe18e5258d7a..46d91cd0870c 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -633,7 +633,7 @@ static int mt9v111_hw_config(struct mt9v111_dev *mt9v111) /* * Set pixel integration time to the whole frame time. - * This value controls the the shutter delay when running with AE + * This value controls the shutter delay when running with AE * disabled. If longer than frame time, it affects the output * frame rate. */ diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 1852e1cfc7df..2d740397a5d4 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/types.h> @@ -447,8 +448,6 @@ struct ov5640_dev { /* lock to protect all members below */ struct mutex lock; - int power_count; - struct v4l2_mbus_framefmt fmt; bool pending_fmt_change; @@ -2696,39 +2695,24 @@ power_off: return ret; } -/* --------------- Subdev Operations --------------- */ - -static int ov5640_s_power(struct v4l2_subdev *sd, int on) +static int ov5640_sensor_suspend(struct device *dev) { - struct ov5640_dev *sensor = to_ov5640_dev(sd); - int ret = 0; - - mutex_lock(&sensor->lock); - - /* - * If the power count is modified from 0 to != 0 or from != 0 to 0, - * update the power state. - */ - if (sensor->power_count == !on) { - ret = ov5640_set_power(sensor, !!on); - if (ret) - goto out; - } + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov5640_dev *ov5640 = to_ov5640_dev(sd); - /* Update the power count. */ - sensor->power_count += on ? 1 : -1; - WARN_ON(sensor->power_count < 0); -out: - mutex_unlock(&sensor->lock); + return ov5640_set_power(ov5640, false); +} - if (on && !ret && sensor->power_count == 1) { - /* restore controls */ - ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler); - } +static int ov5640_sensor_resume(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov5640_dev *ov5640 = to_ov5640_dev(sd); - return ret; + return ov5640_set_power(ov5640, true); } +/* --------------- Subdev Operations --------------- */ + static int ov5640_try_frame_interval(struct ov5640_dev *sensor, struct v4l2_fract *fi, u32 width, u32 height) @@ -3314,6 +3298,9 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) /* v4l2_ctrl_lock() locks our own mutex */ + if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev)) + return 0; + switch (ctrl->id) { case V4L2_CID_AUTOGAIN: val = ov5640_get_gain(sensor); @@ -3329,6 +3316,8 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) break; } + pm_runtime_put_autosuspend(&sensor->i2c_client->dev); + return 0; } @@ -3358,9 +3347,9 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl) /* * If the device is not powered up by the host driver do * not apply any controls to H/W at this time. Instead - * the controls will be restored right after power-up. + * the controls will be restored at start streaming time. */ - if (sensor->power_count == 0) + if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev)) return 0; switch (ctrl->id) { @@ -3402,6 +3391,8 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl) break; } + pm_runtime_put_autosuspend(&sensor->i2c_client->dev); + return ret; } @@ -3677,6 +3668,18 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) struct ov5640_dev *sensor = to_ov5640_dev(sd); int ret = 0; + if (enable) { + ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev); + if (ret < 0) + return ret; + + ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler); + if (ret) { + pm_runtime_put(&sensor->i2c_client->dev); + return ret; + } + } + mutex_lock(&sensor->lock); if (sensor->streaming == !enable) { @@ -3701,8 +3704,13 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) if (!ret) sensor->streaming = enable; } + out: mutex_unlock(&sensor->lock); + + if (!enable || ret) + pm_runtime_put_autosuspend(&sensor->i2c_client->dev); + return ret; } @@ -3724,7 +3732,6 @@ static int ov5640_init_cfg(struct v4l2_subdev *sd, } static const struct v4l2_subdev_core_ops ov5640_core_ops = { - .s_power = ov5640_s_power, .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, @@ -3770,26 +3777,20 @@ static int ov5640_check_chip_id(struct ov5640_dev *sensor) int ret = 0; u16 chip_id; - ret = ov5640_set_power_on(sensor); - if (ret) - return ret; - ret = ov5640_read_reg16(sensor, OV5640_REG_CHIP_ID, &chip_id); if (ret) { dev_err(&client->dev, "%s: failed to read chip identifier\n", __func__); - goto power_off; + return ret; } if (chip_id != 0x5640) { dev_err(&client->dev, "%s: wrong chip identifier, expected 0x5640, got 0x%x\n", __func__, chip_id); - ret = -ENXIO; + return -ENXIO; } -power_off: - ov5640_set_power_off(sensor); - return ret; + return 0; } static int ov5640_probe(struct i2c_client *client) @@ -3880,26 +3881,43 @@ static int ov5640_probe(struct i2c_client *client) ret = ov5640_get_regulators(sensor); if (ret) - return ret; + goto entity_cleanup; mutex_init(&sensor->lock); - ret = ov5640_check_chip_id(sensor); + ret = ov5640_init_controls(sensor); if (ret) goto entity_cleanup; - ret = ov5640_init_controls(sensor); - if (ret) + ret = ov5640_sensor_resume(dev); + if (ret) { + dev_err(dev, "failed to power on\n"); goto entity_cleanup; + } + + pm_runtime_set_active(dev); + pm_runtime_get_noresume(dev); + pm_runtime_enable(dev); + + ret = ov5640_check_chip_id(sensor); + if (ret) + goto err_pm_runtime; ret = v4l2_async_register_subdev_sensor(&sensor->sd); if (ret) - goto free_ctrls; + goto err_pm_runtime; + + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put_autosuspend(dev); return 0; -free_ctrls: +err_pm_runtime: + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); v4l2_ctrl_handler_free(&sensor->ctrls.handler); + ov5640_sensor_suspend(dev); entity_cleanup: media_entity_cleanup(&sensor->sd.entity); mutex_destroy(&sensor->lock); @@ -3910,6 +3928,12 @@ static void ov5640_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov5640_dev *sensor = to_ov5640_dev(sd); + struct device *dev = &client->dev; + + pm_runtime_disable(dev); + if (!pm_runtime_status_suspended(dev)) + ov5640_sensor_suspend(dev); + pm_runtime_set_suspended(dev); v4l2_async_unregister_subdev(&sensor->sd); media_entity_cleanup(&sensor->sd.entity); @@ -3917,6 +3941,10 @@ static void ov5640_remove(struct i2c_client *client) mutex_destroy(&sensor->lock); } +static const struct dev_pm_ops ov5640_pm_ops = { + SET_RUNTIME_PM_OPS(ov5640_sensor_suspend, ov5640_sensor_resume, NULL) +}; + static const struct i2c_device_id ov5640_id[] = { {"ov5640", 0}, {}, @@ -3933,6 +3961,7 @@ static struct i2c_driver ov5640_i2c_driver = { .driver = { .name = "ov5640", .of_match_table = ov5640_dt_ids, + .pm = &ov5640_pm_ops, }, .id_table = ov5640_id, .probe_new = ov5640_probe, diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c index a233c34b168e..cae1866134a0 100644 --- a/drivers/media/i2c/ov8865.c +++ b/drivers/media/i2c/ov8865.c @@ -3034,11 +3034,13 @@ static int ov8865_probe(struct i2c_client *client) &rate); if (!ret && sensor->extclk) { ret = clk_set_rate(sensor->extclk, rate); - if (ret) - return dev_err_probe(dev, ret, - "failed to set clock rate\n"); + if (ret) { + dev_err_probe(dev, ret, "failed to set clock rate\n"); + goto error_endpoint; + } } else if (ret && !sensor->extclk) { - return dev_err_probe(dev, ret, "invalid clock config\n"); + dev_err_probe(dev, ret, "invalid clock config\n"); + goto error_endpoint; } sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk); diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c index b8176a3b76d3..25020d58eb06 100644 --- a/drivers/media/mc/mc-device.c +++ b/drivers/media/mc/mc-device.c @@ -581,7 +581,7 @@ static void __media_device_unregister_entity(struct media_entity *entity) struct media_device *mdev = entity->graph_obj.mdev; struct media_link *link, *tmp; struct media_interface *intf; - unsigned int i; + struct media_pad *iter; ida_free(&mdev->entity_internal_idx, entity->internal_idx); @@ -597,8 +597,8 @@ static void __media_device_unregister_entity(struct media_entity *entity) __media_entity_remove_links(entity); /* Remove all pads that belong to this entity */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_destroy(&entity->pads[i].graph_obj); + media_entity_for_each_pad(entity, iter) + media_gobj_destroy(&iter->graph_obj); /* Remove the entity */ media_gobj_destroy(&entity->graph_obj); @@ -610,7 +610,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { struct media_entity_notify *notify, *next; - unsigned int i; + struct media_pad *iter; int ret; if (entity->function == MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN || @@ -639,9 +639,8 @@ int __must_check media_device_register_entity(struct media_device *mdev, media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj); /* Initialize objects at the pads */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_create(mdev, MEDIA_GRAPH_PAD, - &entity->pads[i].graph_obj); + media_entity_for_each_pad(entity, iter) + media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj); /* invoke entity_notify callbacks */ list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index afd1bd7ff7b6..b8bcbc734eaf 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -59,10 +59,12 @@ static inline const char *link_type_name(struct media_link *link) } } -__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum, - int idx_max) +__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev) { - idx_max = ALIGN(idx_max, BITS_PER_LONG); + int idx_max; + + idx_max = ALIGN(mdev->entity_internal_idx_max + 1, BITS_PER_LONG); ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL); if (!ent_enum->bmap) return -ENOMEM; @@ -71,7 +73,7 @@ __must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum, return 0; } -EXPORT_SYMBOL_GPL(__media_entity_enum_init); +EXPORT_SYMBOL_GPL(media_entity_enum_init); void media_entity_enum_cleanup(struct media_entity_enum *ent_enum) { @@ -193,7 +195,8 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, struct media_pad *pads) { struct media_device *mdev = entity->graph_obj.mdev; - unsigned int i; + struct media_pad *iter; + unsigned int i = 0; if (num_pads >= MEDIA_ENTITY_MAX_PADS) return -E2BIG; @@ -204,12 +207,12 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, if (mdev) mutex_lock(&mdev->graph_mutex); - for (i = 0; i < num_pads; i++) { - pads[i].entity = entity; - pads[i].index = i; + media_entity_for_each_pad(entity, iter) { + iter->entity = entity; + iter->index = i++; if (mdev) media_gobj_create(mdev, MEDIA_GRAPH_PAD, - &entity->pads[i].graph_obj); + &iter->graph_obj); } if (mdev) @@ -223,6 +226,33 @@ EXPORT_SYMBOL_GPL(media_entity_pads_init); * Graph traversal */ +/* + * This function checks the interdependency inside the entity between @pad0 + * and @pad1. If two pads are interdependent they are part of the same pipeline + * and enabling one of the pads means that the other pad will become "locked" + * and doesn't allow configuration changes. + * + * This function uses the &media_entity_operations.has_pad_interdep() operation + * to check the dependency inside the entity between @pad0 and @pad1. If the + * has_pad_interdep operation is not implemented, all pads of the entity are + * considered to be interdependent. + */ +static bool media_entity_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1) +{ + if (pad0 >= entity->num_pads || pad1 >= entity->num_pads) + return false; + + if (entity->pads[pad0].flags & entity->pads[pad1].flags & + (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) + return false; + + if (!entity->ops || !entity->ops->has_pad_interdep) + return true; + + return entity->ops->has_pad_interdep(entity, pad0, pad1); +} + static struct media_entity * media_entity_other(struct media_entity *entity, struct media_link *link) { @@ -367,139 +397,435 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph) } EXPORT_SYMBOL_GPL(media_graph_walk_next); -int media_entity_get_fwnode_pad(struct media_entity *entity, - struct fwnode_handle *fwnode, - unsigned long direction_flags) +/* ----------------------------------------------------------------------------- + * Pipeline management + */ + +/* + * The pipeline traversal stack stores pads that are reached during graph + * traversal, with a list of links to be visited to continue the traversal. + * When a new pad is reached, an entry is pushed on the top of the stack and + * points to the incoming pad and the first link of the entity. + * + * To find further pads in the pipeline, the traversal algorithm follows + * internal pad dependencies in the entity, and then links in the graph. It + * does so by iterating over all links of the entity, and following enabled + * links that originate from a pad that is internally connected to the incoming + * pad, as reported by the media_entity_has_pad_interdep() function. + */ + +/** + * struct media_pipeline_walk_entry - Entry in the pipeline traversal stack + * + * @pad: The media pad being visited + * @links: Links left to be visited + */ +struct media_pipeline_walk_entry { + struct media_pad *pad; + struct list_head *links; +}; + +/** + * struct media_pipeline_walk - State used by the media pipeline traversal + * algorithm + * + * @mdev: The media device + * @stack: Depth-first search stack + * @stack.size: Number of allocated entries in @stack.entries + * @stack.top: Index of the top stack entry (-1 if the stack is empty) + * @stack.entries: Stack entries + */ +struct media_pipeline_walk { + struct media_device *mdev; + + struct { + unsigned int size; + int top; + struct media_pipeline_walk_entry *entries; + } stack; +}; + +#define MEDIA_PIPELINE_STACK_GROW_STEP 16 + +static struct media_pipeline_walk_entry * +media_pipeline_walk_top(struct media_pipeline_walk *walk) { - struct fwnode_endpoint endpoint; - unsigned int i; + return &walk->stack.entries[walk->stack.top]; +} + +static bool media_pipeline_walk_empty(struct media_pipeline_walk *walk) +{ + return walk->stack.top == -1; +} + +/* Increase the stack size by MEDIA_PIPELINE_STACK_GROW_STEP elements. */ +static int media_pipeline_walk_resize(struct media_pipeline_walk *walk) +{ + struct media_pipeline_walk_entry *entries; + unsigned int new_size; + + /* Safety check, to avoid stack overflows in case of bugs. */ + if (walk->stack.size >= 256) + return -E2BIG; + + new_size = walk->stack.size + MEDIA_PIPELINE_STACK_GROW_STEP; + + entries = krealloc(walk->stack.entries, + new_size * sizeof(*walk->stack.entries), + GFP_KERNEL); + if (!entries) + return -ENOMEM; + + walk->stack.entries = entries; + walk->stack.size = new_size; + + return 0; +} + +/* Push a new entry on the stack. */ +static int media_pipeline_walk_push(struct media_pipeline_walk *walk, + struct media_pad *pad) +{ + struct media_pipeline_walk_entry *entry; int ret; - if (!entity->ops || !entity->ops->get_fwnode_pad) { - for (i = 0; i < entity->num_pads; i++) { - if (entity->pads[i].flags & direction_flags) - return i; + if (walk->stack.top + 1 >= walk->stack.size) { + ret = media_pipeline_walk_resize(walk); + if (ret) + return ret; + } + + walk->stack.top++; + entry = media_pipeline_walk_top(walk); + entry->pad = pad; + entry->links = pad->entity->links.next; + + dev_dbg(walk->mdev->dev, + "media pipeline: pushed entry %u: '%s':%u\n", + walk->stack.top, pad->entity->name, pad->index); + + return 0; +} + +/* + * Move the top entry link cursor to the next link. If all links of the entry + * have been visited, pop the entry itself. + */ +static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) +{ + struct media_pipeline_walk_entry *entry; + + if (WARN_ON(walk->stack.top < 0)) + return; + + entry = media_pipeline_walk_top(walk); + + if (entry->links->next == &entry->pad->entity->links) { + dev_dbg(walk->mdev->dev, + "media pipeline: entry %u has no more links, popping\n", + walk->stack.top); + + walk->stack.top--; + return; + } + + entry->links = entry->links->next; + + dev_dbg(walk->mdev->dev, + "media pipeline: moved entry %u to next link\n", + walk->stack.top); +} + +/* Free all memory allocated while walking the pipeline. */ +static void media_pipeline_walk_destroy(struct media_pipeline_walk *walk) +{ + kfree(walk->stack.entries); +} + +/* Add a pad to the pipeline and push it to the stack. */ +static int media_pipeline_add_pad(struct media_pipeline *pipe, + struct media_pipeline_walk *walk, + struct media_pad *pad) +{ + struct media_pipeline_pad *ppad; + + list_for_each_entry(ppad, &pipe->pads, list) { + if (ppad->pad == pad) { + dev_dbg(pad->graph_obj.mdev->dev, + "media pipeline: already contains pad '%s':%u\n", + pad->entity->name, pad->index); + return 0; } + } - return -ENXIO; + ppad = kzalloc(sizeof(*ppad), GFP_KERNEL); + if (!ppad) + return -ENOMEM; + + ppad->pipe = pipe; + ppad->pad = pad; + + list_add_tail(&ppad->list, &pipe->pads); + + dev_dbg(pad->graph_obj.mdev->dev, + "media pipeline: added pad '%s':%u\n", + pad->entity->name, pad->index); + + return media_pipeline_walk_push(walk, pad); +} + +/* Explore the next link of the entity at the top of the stack. */ +static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + struct media_pipeline_walk *walk) +{ + struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk); + struct media_pad *pad; + struct media_link *link; + struct media_pad *local; + struct media_pad *remote; + int ret; + + pad = entry->pad; + link = list_entry(entry->links, typeof(*link), list); + media_pipeline_walk_pop(walk); + + dev_dbg(walk->mdev->dev, + "media pipeline: exploring link '%s':%u -> '%s':%u\n", + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + /* Skip links that are not enabled. */ + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (disabled)\n"); + return 0; } - ret = fwnode_graph_parse_endpoint(fwnode, &endpoint); + /* Get the local pad and remote pad. */ + if (link->source->entity == pad->entity) { + local = link->source; + remote = link->sink; + } else { + local = link->sink; + remote = link->source; + } + + /* + * Skip links that originate from a different pad than the incoming pad + * that is not connected internally in the entity to the incoming pad. + */ + if (pad != local && + !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (no route)\n"); + return 0; + } + + /* + * Add the local and remote pads of the link to the pipeline and push + * them to the stack, if they're not already present. + */ + ret = media_pipeline_add_pad(pipe, walk, local); if (ret) return ret; - ret = entity->ops->get_fwnode_pad(entity, &endpoint); - if (ret < 0) + ret = media_pipeline_add_pad(pipe, walk, remote); + if (ret) return ret; - if (ret >= entity->num_pads) - return -ENXIO; + return 0; +} - if (!(entity->pads[ret].flags & direction_flags)) - return -ENXIO; +static void media_pipeline_cleanup(struct media_pipeline *pipe) +{ + while (!list_empty(&pipe->pads)) { + struct media_pipeline_pad *ppad; - return ret; + ppad = list_first_entry(&pipe->pads, typeof(*ppad), list); + list_del(&ppad->list); + kfree(ppad); + } } -EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad); -/* ----------------------------------------------------------------------------- - * Pipeline management - */ +static int media_pipeline_populate(struct media_pipeline *pipe, + struct media_pad *pad) +{ + struct media_pipeline_walk walk = { }; + struct media_pipeline_pad *ppad; + int ret; + + /* + * Populate the media pipeline by walking the media graph, starting + * from @pad. + */ + INIT_LIST_HEAD(&pipe->pads); + pipe->mdev = pad->graph_obj.mdev; + + walk.mdev = pipe->mdev; + walk.stack.top = -1; + ret = media_pipeline_add_pad(pipe, &walk, pad); + if (ret) + goto done; + + /* + * Use a depth-first search algorithm: as long as the stack is not + * empty, explore the next link of the top entry. The + * media_pipeline_explore_next_link() function will either move to the + * next link, pop the entry if fully visited, or add new entries on + * top. + */ + while (!media_pipeline_walk_empty(&walk)) { + ret = media_pipeline_explore_next_link(pipe, &walk); + if (ret) + goto done; + } + + dev_dbg(pad->graph_obj.mdev->dev, + "media pipeline populated, found pads:\n"); + + list_for_each_entry(ppad, &pipe->pads, list) + dev_dbg(pad->graph_obj.mdev->dev, "- '%s':%u\n", + ppad->pad->entity->name, ppad->pad->index); + + WARN_ON(walk.stack.top != -1); -__must_check int __media_pipeline_start(struct media_entity *entity, + ret = 0; + +done: + media_pipeline_walk_destroy(&walk); + + if (ret) + media_pipeline_cleanup(pipe); + + return ret; +} + +__must_check int __media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe) { - struct media_device *mdev = entity->graph_obj.mdev; - struct media_graph *graph = &pipe->graph; - struct media_entity *entity_err = entity; - struct media_link *link; + struct media_device *mdev = pad->entity->graph_obj.mdev; + struct media_pipeline_pad *err_ppad; + struct media_pipeline_pad *ppad; int ret; - if (pipe->streaming_count) { - pipe->streaming_count++; + lockdep_assert_held(&mdev->graph_mutex); + + /* + * If the entity is already part of a pipeline, that pipeline must + * be the same as the pipe given to media_pipeline_start(). + */ + if (WARN_ON(pad->pipe && pad->pipe != pipe)) + return -EINVAL; + + /* + * If the pipeline has already been started, it is guaranteed to be + * valid, so just increase the start count. + */ + if (pipe->start_count) { + pipe->start_count++; return 0; } - ret = media_graph_walk_init(&pipe->graph, mdev); + /* + * Populate the pipeline. This populates the media_pipeline pads list + * with media_pipeline_pad instances for each pad found during graph + * walk. + */ + ret = media_pipeline_populate(pipe, pad); if (ret) return ret; - media_graph_walk_start(&pipe->graph, entity); + /* + * Now that all the pads in the pipeline have been gathered, perform + * the validation steps. + */ + + list_for_each_entry(ppad, &pipe->pads, list) { + struct media_pad *pad = ppad->pad; + struct media_entity *entity = pad->entity; + bool has_enabled_link = false; + bool has_link = false; + struct media_link *link; - while ((entity = media_graph_walk_next(graph))) { - DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS); - DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS); + dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name, + pad->index); - if (entity->pipe && entity->pipe != pipe) { - pr_err("Pipe active for %s. Can't start for %s\n", - entity->name, - entity_err->name); + /* + * 1. Ensure that the pad doesn't already belong to a different + * pipeline. + */ + if (pad->pipe) { + dev_dbg(mdev->dev, "Failed to start pipeline: pad '%s':%u busy\n", + pad->entity->name, pad->index); ret = -EBUSY; goto error; } - /* Already streaming --- no need to check. */ - if (entity->pipe) - continue; - - entity->pipe = pipe; - - if (!entity->ops || !entity->ops->link_validate) - continue; - - bitmap_zero(active, entity->num_pads); - bitmap_fill(has_no_links, entity->num_pads); - + /* + * 2. Validate all active links whose sink is the current pad. + * Validation of the source pads is performed in the context of + * the connected sink pad to avoid duplicating checks. + */ for_each_media_entity_data_link(entity, link) { - struct media_pad *pad = link->sink->entity == entity - ? link->sink : link->source; + /* Skip links unrelated to the current pad. */ + if (link->sink != pad && link->source != pad) + continue; - /* Mark that a pad is connected by a link. */ - bitmap_clear(has_no_links, pad->index, 1); + /* Record if the pad has links and enabled links. */ + if (link->flags & MEDIA_LNK_FL_ENABLED) + has_enabled_link = true; + has_link = true; /* - * Pads that either do not need to connect or - * are connected through an enabled link are - * fine. + * Validate the link if it's enabled and has the + * current pad as its sink. */ - if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) || - link->flags & MEDIA_LNK_FL_ENABLED) - bitmap_set(active, pad->index, 1); + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) + continue; - /* - * Link validation will only take place for - * sink ends of the link that are enabled. - */ - if (link->sink != pad || - !(link->flags & MEDIA_LNK_FL_ENABLED)) + if (link->sink != pad) + continue; + + if (!entity->ops || !entity->ops->link_validate) continue; ret = entity->ops->link_validate(link); - if (ret < 0 && ret != -ENOIOCTLCMD) { - dev_dbg(entity->graph_obj.mdev->dev, - "link validation failed for '%s':%u -> '%s':%u, error %d\n", + if (ret) { + dev_dbg(mdev->dev, + "Link '%s':%u -> '%s':%u failed validation: %d\n", link->source->entity->name, link->source->index, - entity->name, link->sink->index, ret); + link->sink->entity->name, + link->sink->index, ret); goto error; } - } - /* Either no links or validated links are fine. */ - bitmap_or(active, active, has_no_links, entity->num_pads); + dev_dbg(mdev->dev, + "Link '%s':%u -> '%s':%u is valid\n", + link->source->entity->name, + link->source->index, + link->sink->entity->name, + link->sink->index); + } - if (!bitmap_full(active, entity->num_pads)) { + /* + * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set, + * ensure that it has either no link or an enabled link. + */ + if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link && + !has_enabled_link) { + dev_dbg(mdev->dev, + "Pad '%s':%u must be connected by an enabled link\n", + pad->entity->name, pad->index); ret = -ENOLINK; - dev_dbg(entity->graph_obj.mdev->dev, - "'%s':%u must be connected by an enabled link\n", - entity->name, - (unsigned)find_first_zero_bit( - active, entity->num_pads)); goto error; } + + /* Validation passed, store the pipe pointer in the pad. */ + pad->pipe = pipe; } - pipe->streaming_count++; + pipe->start_count++; return 0; @@ -508,42 +834,37 @@ error: * Link validation on graph failed. We revert what we did and * return the error. */ - media_graph_walk_start(graph, entity_err); - while ((entity_err = media_graph_walk_next(graph))) { - entity_err->pipe = NULL; - - /* - * We haven't started entities further than this so we quit - * here. - */ - if (entity_err == entity) + list_for_each_entry(err_ppad, &pipe->pads, list) { + if (err_ppad == ppad) break; + + err_ppad->pad->pipe = NULL; } - media_graph_walk_cleanup(graph); + media_pipeline_cleanup(pipe); return ret; } EXPORT_SYMBOL_GPL(__media_pipeline_start); -__must_check int media_pipeline_start(struct media_entity *entity, +__must_check int media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe) { - struct media_device *mdev = entity->graph_obj.mdev; + struct media_device *mdev = pad->entity->graph_obj.mdev; int ret; mutex_lock(&mdev->graph_mutex); - ret = __media_pipeline_start(entity, pipe); + ret = __media_pipeline_start(pad, pipe); mutex_unlock(&mdev->graph_mutex); return ret; } EXPORT_SYMBOL_GPL(media_pipeline_start); -void __media_pipeline_stop(struct media_entity *entity) +void __media_pipeline_stop(struct media_pad *pad) { - struct media_graph *graph = &entity->pipe->graph; - struct media_pipeline *pipe = entity->pipe; + struct media_pipeline *pipe = pad->pipe; + struct media_pipeline_pad *ppad; /* * If the following check fails, the driver has performed an @@ -552,29 +873,65 @@ void __media_pipeline_stop(struct media_entity *entity) if (WARN_ON(!pipe)) return; - if (--pipe->streaming_count) + if (--pipe->start_count) return; - media_graph_walk_start(graph, entity); - - while ((entity = media_graph_walk_next(graph))) - entity->pipe = NULL; + list_for_each_entry(ppad, &pipe->pads, list) + ppad->pad->pipe = NULL; - media_graph_walk_cleanup(graph); + media_pipeline_cleanup(pipe); + if (pipe->allocated) + kfree(pipe); } EXPORT_SYMBOL_GPL(__media_pipeline_stop); -void media_pipeline_stop(struct media_entity *entity) +void media_pipeline_stop(struct media_pad *pad) { - struct media_device *mdev = entity->graph_obj.mdev; + struct media_device *mdev = pad->entity->graph_obj.mdev; mutex_lock(&mdev->graph_mutex); - __media_pipeline_stop(entity); + __media_pipeline_stop(pad); mutex_unlock(&mdev->graph_mutex); } EXPORT_SYMBOL_GPL(media_pipeline_stop); +__must_check int media_pipeline_alloc_start(struct media_pad *pad) +{ + struct media_device *mdev = pad->entity->graph_obj.mdev; + struct media_pipeline *new_pipe = NULL; + struct media_pipeline *pipe; + int ret; + + mutex_lock(&mdev->graph_mutex); + + /* + * Is the entity already part of a pipeline? If not, we need to allocate + * a pipe. + */ + pipe = media_pad_pipeline(pad); + if (!pipe) { + new_pipe = kzalloc(sizeof(*new_pipe), GFP_KERNEL); + if (!new_pipe) { + ret = -ENOMEM; + goto out; + } + + pipe = new_pipe; + pipe->allocated = true; + } + + ret = __media_pipeline_start(pad, pipe); + if (ret) + kfree(new_pipe); + +out: + mutex_unlock(&mdev->graph_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(media_pipeline_alloc_start); + /* ----------------------------------------------------------------------------- * Links management */ @@ -829,7 +1186,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags) { const u32 mask = MEDIA_LNK_FL_ENABLED; struct media_device *mdev; - struct media_entity *source, *sink; + struct media_pad *source, *sink; int ret = -EBUSY; if (link == NULL) @@ -845,12 +1202,11 @@ int __media_entity_setup_link(struct media_link *link, u32 flags) if (link->flags == flags) return 0; - source = link->source->entity; - sink = link->sink->entity; + source = link->source; + sink = link->sink; if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) && - (media_entity_is_streaming(source) || - media_entity_is_streaming(sink))) + (media_pad_is_streaming(source) || media_pad_is_streaming(sink))) return -EBUSY; mdev = source->graph_obj.mdev; @@ -991,6 +1347,60 @@ struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad) } EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique); +int media_entity_get_fwnode_pad(struct media_entity *entity, + struct fwnode_handle *fwnode, + unsigned long direction_flags) +{ + struct fwnode_endpoint endpoint; + unsigned int i; + int ret; + + if (!entity->ops || !entity->ops->get_fwnode_pad) { + for (i = 0; i < entity->num_pads; i++) { + if (entity->pads[i].flags & direction_flags) + return i; + } + + return -ENXIO; + } + + ret = fwnode_graph_parse_endpoint(fwnode, &endpoint); + if (ret) + return ret; + + ret = entity->ops->get_fwnode_pad(entity, &endpoint); + if (ret < 0) + return ret; + + if (ret >= entity->num_pads) + return -ENXIO; + + if (!(entity->pads[ret].flags & direction_flags)) + return -ENXIO; + + return ret; +} +EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad); + +struct media_pipeline *media_entity_pipeline(struct media_entity *entity) +{ + struct media_pad *pad; + + media_entity_for_each_pad(entity, pad) { + if (pad->pipe) + return pad->pipe; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(media_entity_pipeline); + +struct media_pipeline *media_pad_pipeline(struct media_pad *pad) +{ + return pad->pipe; +} +EXPORT_SYMBOL_GPL(media_pad_pipeline); + static void media_interface_init(struct media_device *mdev, struct media_interface *intf, u32 gobj_type, diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c index d3358643fb7d..ee6e71157786 100644 --- a/drivers/media/pci/cx18/cx18-av-core.c +++ b/drivers/media/pci/cx18/cx18-av-core.c @@ -339,7 +339,7 @@ void cx18_av_std_setup(struct cx18 *cx) /* * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is - * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601 + * 864 pixels = 720 active + 144 blanking. ITU-R BT.601 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after * the end of active video to start a horizontal line, so that * leaves 132 pixels of hblank to ignore. @@ -399,7 +399,7 @@ void cx18_av_std_setup(struct cx18 *cx) /* * For a 13.5 Mpps clock and 15,734.26 Hz line rate, a line is - * is 858 pixels = 720 active + 138 blanking. The Hsync leading + * 858 pixels = 720 active + 138 blanking. The Hsync leading * edge should happen 1.2 us * 13.5 Mpps ~= 16 pixels after the * end of active video, leaving 122 pixels of hblank to ignore * before active video starts. diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c index ce0ef0b8186f..a04a1d33fadb 100644 --- a/drivers/media/pci/cx88/cx88-input.c +++ b/drivers/media/pci/cx88/cx88-input.c @@ -586,7 +586,7 @@ void cx88_i2c_init_ir(struct cx88_core *core) { struct i2c_board_info info; static const unsigned short default_addr_list[] = { - 0x18, 0x6b, 0x71, + 0x18, 0x33, 0x6b, 0x71, I2C_CLIENT_END }; static const unsigned short pvr2000_addr_list[] = { diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index b509c2a03852..c0ef03ed74f9 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1388,6 +1388,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, } fallthrough; case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: + case CX88_BOARD_NOTONLYTV_LV3H: request_module("ir-kbd-i2c"); } diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c index a3fe547b7fce..390bd5ea3472 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c @@ -989,7 +989,7 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) return r; } - r = media_pipeline_start(&q->vdev.entity, &q->pipe); + r = video_device_pipeline_start(&q->vdev, &q->pipe); if (r) goto fail_pipeline; @@ -1009,7 +1009,7 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) fail_csi2_subdev: cio2_hw_exit(cio2, q); fail_hw: - media_pipeline_stop(&q->vdev.entity); + video_device_pipeline_stop(&q->vdev); fail_pipeline: dev_dbg(dev, "failed to start streaming (%d)\n", r); cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED); @@ -1030,7 +1030,7 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq) cio2_hw_exit(cio2, q); synchronize_irq(cio2->pci_dev->irq); cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR); - media_pipeline_stop(&q->vdev.entity); + video_device_pipeline_stop(&q->vdev); pm_runtime_put(dev); cio2->streaming = false; } diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c index 8a3eed957ae6..b779e0ba916c 100644 --- a/drivers/media/platform/amphion/vpu_v4l2.c +++ b/drivers/media/platform/amphion/vpu_v4l2.c @@ -603,6 +603,10 @@ static int vpu_v4l2_release(struct vpu_inst *inst) inst->workqueue = NULL; } + if (inst->fh.m2m_ctx) { + v4l2_m2m_ctx_release(inst->fh.m2m_ctx); + inst->fh.m2m_ctx = NULL; + } v4l2_ctrl_handler_free(&inst->ctrl_handler); mutex_destroy(&inst->lock); v4l2_fh_del(&inst->fh); @@ -685,13 +689,6 @@ int vpu_v4l2_close(struct file *file) vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst); - vpu_inst_lock(inst); - if (inst->fh.m2m_ctx) { - v4l2_m2m_ctx_release(inst->fh.m2m_ctx); - inst->fh.m2m_ctx = NULL; - } - vpu_inst_unlock(inst); - call_void_vop(inst, release); vpu_inst_unregister(inst); vpu_inst_put(inst); diff --git a/drivers/media/platform/chips-media/coda-jpeg.c b/drivers/media/platform/chips-media/coda-jpeg.c index a0b22b07f69a..435e7030fc2a 100644 --- a/drivers/media/platform/chips-media/coda-jpeg.c +++ b/drivers/media/platform/chips-media/coda-jpeg.c @@ -421,7 +421,7 @@ static inline void coda9_jpeg_write_huff_values(struct coda_dev *dev, u8 *bits, coda_write(dev, (s32)values[i], CODA9_REG_JPEG_HUFF_DATA); } -static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx) +static void coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx) { struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab; struct coda_dev *dev = ctx->dev; @@ -455,7 +455,6 @@ static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx) coda9_jpeg_write_huff_values(dev, huff_tab->luma_ac, 162); coda9_jpeg_write_huff_values(dev, huff_tab->chroma_ac, 162); coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_CTRL); - return 0; } static inline void coda9_jpeg_write_qmat_tab(struct coda_dev *dev, @@ -1394,14 +1393,8 @@ static int coda9_jpeg_prepare_decode(struct coda_ctx *ctx) coda_write(dev, ctx->params.jpeg_restart_interval, CODA9_REG_JPEG_RST_INTVAL); - if (ctx->params.jpeg_huff_tab) { - ret = coda9_jpeg_dec_huff_setup(ctx); - if (ret < 0) { - v4l2_err(&dev->v4l2_dev, - "failed to set up Huffman tables: %d\n", ret); - return ret; - } - } + if (ctx->params.jpeg_huff_tab) + coda9_jpeg_dec_huff_setup(ctx); coda9_jpeg_qmat_setup(ctx); diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c index 29f6c1cd3de7..86c054600a08 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c @@ -457,7 +457,7 @@ err_cmdq_data: kfree(path); atomic_dec(&mdp->job_count); wake_up(&mdp->callback_wq); - if (cmd->pkt.buf_size > 0) + if (cmd && cmd->pkt.buf_size > 0) mdp_cmdq_pkt_destroy(&cmd->pkt); kfree(comps); kfree(cmd); diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c index e62abf3587bf..d3eaf8884412 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c @@ -682,7 +682,7 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp) int i, ret; if (comp->comp_dev) { - ret = pm_runtime_get_sync(comp->comp_dev); + ret = pm_runtime_resume_and_get(comp->comp_dev); if (ret < 0) { dev_err(dev, "Failed to get power, err %d. type:%d id:%d\n", @@ -699,6 +699,7 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp) dev_err(dev, "Failed to enable clk %d. type:%d id:%d\n", i, comp->type, comp->id); + pm_runtime_put(comp->comp_dev); return ret; } } @@ -869,7 +870,7 @@ static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp, ret = mdp_comp_init(mdp, node, comp, id); if (ret) { - kfree(comp); + devm_kfree(dev, comp); return ERR_PTR(ret); } mdp->comp[id] = comp; @@ -930,7 +931,7 @@ void mdp_comp_destroy(struct mdp_dev *mdp) if (mdp->comp[i]) { pm_runtime_disable(mdp->comp[i]->comp_dev); mdp_comp_deinit(mdp->comp[i]); - kfree(mdp->comp[i]); + devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]); mdp->comp[i] = NULL; } } diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c index cde59579b7ae..c413e59d4286 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c @@ -289,7 +289,8 @@ err_deinit_comp: mdp_comp_destroy(mdp); err_return: for (i = 0; i < MDP_PIPE_MAX; i++) - mtk_mutex_put(mdp->mdp_mutex[i]); + if (mdp) + mtk_mutex_put(mdp->mdp_mutex[i]); kfree(mdp); dev_dbg(dev, "Errno %d\n", ret); return ret; diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c index 9f5844385c8f..a72bed927bb6 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c @@ -173,7 +173,8 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp, /* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */ mem_size = vpu_alloc_size; - if (mdp_vpu_shared_mem_alloc(vpu)) { + err = mdp_vpu_shared_mem_alloc(vpu); + if (err) { dev_err(&mdp->pdev->dev, "VPU memory alloc fail!"); goto err_mem_alloc; } diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c index b3b057798ab6..f6d48c36f386 100644 --- a/drivers/media/platform/nxp/dw100/dw100.c +++ b/drivers/media/platform/nxp/dw100/dw100.c @@ -373,7 +373,7 @@ static const struct v4l2_ctrl_ops dw100_ctrl_ops = { * The coordinates are saved in UQ12.4 fixed point format. */ static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl, - u32 from_idx, u32 elems, + u32 from_idx, union v4l2_ctrl_ptr ptr) { struct dw100_ctx *ctx = @@ -398,7 +398,7 @@ static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl, ctx->map_height = mh; ctx->map_size = mh * mw * sizeof(u32); - for (idx = from_idx; idx < elems; idx++) { + for (idx = from_idx; idx < ctrl->elems; idx++) { qy = min_t(u32, (idx / mw) * qdy, qsh); qx = min_t(u32, (idx % mw) * qdx, qsw); map[idx] = dw100_map_format_coordinates(qx, qy); diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index 290df04c4d02..81fb3a5bc1d5 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -493,7 +493,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) struct v4l2_subdev *subdev; int ret; - ret = media_pipeline_start(&vdev->entity, &video->pipe); + ret = video_device_pipeline_start(vdev, &video->pipe); if (ret < 0) return ret; @@ -522,7 +522,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) return 0; error: - media_pipeline_stop(&vdev->entity); + video_device_pipeline_stop(vdev); video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); @@ -553,7 +553,7 @@ static void video_stop_streaming(struct vb2_queue *q) v4l2_subdev_call(subdev, video, s_stream, 0); } - media_pipeline_stop(&vdev->entity); + video_device_pipeline_stop(vdev); video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR); } diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 60de4200375d..ab6a29ffc81e 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -1800,7 +1800,7 @@ bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt) struct venus_core *core = inst->core; u32 fmt = to_hfi_raw_fmt(v4l2_pixfmt); struct hfi_plat_caps *caps; - u32 buftype; + bool found; if (!fmt) return false; @@ -1809,12 +1809,13 @@ bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt) if (!caps) return false; - if (inst->session_type == VIDC_SESSION_TYPE_DEC) - buftype = HFI_BUFFER_OUTPUT2; - else - buftype = HFI_BUFFER_OUTPUT; + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt); + if (found) + goto done; - return find_fmt_from_caps(caps, buftype, fmt); + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt); +done: + return found; } EXPORT_SYMBOL_GPL(venus_helper_check_format); diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c index 1968f09ad177..e00aedb41d16 100644 --- a/drivers/media/platform/qcom/venus/hfi.c +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -569,8 +569,6 @@ irqreturn_t hfi_isr(int irq, void *dev) int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops) { - int ret; - if (!ops) return -EINVAL; @@ -579,9 +577,8 @@ int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops) core->state = CORE_UNINIT; init_completion(&core->done); pkt_set_version(core->res->hfi_version); - ret = venus_hfi_create(core); - return ret; + return venus_hfi_create(core); } void hfi_destroy(struct venus_core *core) diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index ac0bb45d07f4..4ceaba37e2e5 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -183,6 +183,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) else return NULL; fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; } pixmp->width = clamp(pixmp->width, frame_width_min(inst), diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 86918aea1d24..cdb12546c4fa 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -192,10 +192,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) pixmp->height = clamp(pixmp->height, frame_height_min(inst), frame_height_max(inst)); - if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - pixmp->width = ALIGN(pixmp->width, 128); - pixmp->height = ALIGN(pixmp->height, 32); - } + pixmp->width = ALIGN(pixmp->width, 128); + pixmp->height = ALIGN(pixmp->height, 32); pixmp->width = ALIGN(pixmp->width, 2); pixmp->height = ALIGN(pixmp->height, 2); @@ -392,7 +390,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) struct v4l2_fract *timeperframe = &out->timeperframe; u64 us_per_frame, fps; - if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return -EINVAL; @@ -424,7 +422,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct venus_inst *inst = to_inst(file); - if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return -EINVAL; @@ -509,6 +507,19 @@ static int venc_enum_frameintervals(struct file *file, void *fh, return 0; } +static int venc_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_EOS: + return v4l2_event_subscribe(fh, sub, 2, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + static const struct v4l2_ioctl_ops venc_ioctl_ops = { .vidioc_querycap = venc_querycap, .vidioc_enum_fmt_vid_cap = venc_enum_fmt, @@ -534,8 +545,9 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = { .vidioc_g_parm = venc_g_parm, .vidioc_enum_framesizes = venc_enum_framesizes, .vidioc_enum_frameintervals = venc_enum_frameintervals, - .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_subscribe_event = venc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, }; static int venc_pm_get(struct venus_inst *inst) @@ -686,7 +698,8 @@ static int venc_set_properties(struct venus_inst *inst) return ret; } - if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC && + ctr->profile.hevc == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) { struct hfi_hdr10_pq_sei hdr10; unsigned int c; diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c index ed44e5800759..7468e43800a9 100644 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c @@ -8,6 +8,7 @@ #include "core.h" #include "venc.h" +#include "helpers.h" #define BITRATE_MIN 32000 #define BITRATE_MAX 160000000 @@ -336,8 +337,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) * if we disable 8x8 transform for HP. */ - if (ctrl->val == 0) - return -EINVAL; ctr->h264_8x8_transform = ctrl->val; break; @@ -348,15 +347,41 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } +static int venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct hfi_buffer_requirements bufreq; + enum hfi_version ver = inst->core->res->hfi_version; + int ret; + + switch (ctrl->id) { + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (!ret) + ctrl->val = HFI_BUFREQ_COUNT_MIN(&bufreq, ver); + break; + default: + return -EINVAL; + } + + return 0; +} + static const struct v4l2_ctrl_ops venc_ctrl_ops = { .s_ctrl = venc_op_s_ctrl, + .g_volatile_ctrl = venc_op_g_volatile_ctrl, }; int venc_ctrl_init(struct venus_inst *inst) { int ret; + struct v4l2_ctrl_hdr10_mastering_display p_hdr10_mastering = { + { 34000, 13250, 7500 }, + { 16000, 34500, 3000 }, 15635, 16450, 10000000, 500, + }; + struct v4l2_ctrl_hdr10_cll_info p_hdr10_cll = { 1000, 400 }; - ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 58); + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 59); if (ret) return ret; @@ -437,6 +462,9 @@ int venc_ctrl_init(struct venus_inst *inst) 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0); v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 4, 11, 1, 4); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX, BITRATE_STEP, BITRATE_DEFAULT); @@ -579,11 +607,11 @@ int venc_ctrl_init(struct venus_inst *inst) v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_COLORIMETRY_HDR10_CLL_INFO, - v4l2_ctrl_ptr_create(NULL)); + v4l2_ctrl_ptr_create(&p_hdr10_cll)); v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY, - v4l2_ctrl_ptr_create(NULL)); + v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering)); v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE, diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c index 968a74234e92..2f7daa853ed8 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c +++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c @@ -786,9 +786,8 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags, return 0; /* - * Don't allow link changes if any entity in the graph is - * streaming, modifying the CHSEL register fields can disrupt - * running streams. + * Don't allow link changes if any stream in the graph is active as + * modifying the CHSEL register fields can disrupt running streams. */ media_device_for_each_entity(entity, &group->mdev) if (media_entity_is_streaming(entity)) diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c index 8d37fbdc266a..3aea96d85165 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c @@ -1244,8 +1244,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd, static int rvin_set_stream(struct rvin_dev *vin, int on) { - struct media_pipeline *pipe; - struct media_device *mdev; struct v4l2_subdev *sd; struct media_pad *pad; int ret; @@ -1265,7 +1263,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on) sd = media_entity_to_v4l2_subdev(pad->entity); if (!on) { - media_pipeline_stop(&vin->vdev.entity); + video_device_pipeline_stop(&vin->vdev); return v4l2_subdev_call(sd, video, s_stream, 0); } @@ -1273,17 +1271,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on) if (ret) return ret; - /* - * The graph lock needs to be taken to protect concurrent - * starts of multiple VIN instances as they might share - * a common subdevice down the line and then should use - * the same pipe. - */ - mdev = vin->vdev.entity.graph_obj.mdev; - mutex_lock(&mdev->graph_mutex); - pipe = sd->entity.pipe ? sd->entity.pipe : &vin->vdev.pipe; - ret = __media_pipeline_start(&vin->vdev.entity, pipe); - mutex_unlock(&mdev->graph_mutex); + ret = video_device_pipeline_alloc_start(&vin->vdev); if (ret) return ret; @@ -1291,7 +1279,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on) if (ret == -ENOIOCTLCMD) ret = 0; if (ret) - media_pipeline_stop(&vin->vdev.entity); + video_device_pipeline_stop(&vin->vdev); return ret; } diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c index df1606b49d77..9d24647c8f32 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_video.c +++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c @@ -927,7 +927,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) } mutex_unlock(&pipe->lock); - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); vsp1_video_release_buffers(video); vsp1_video_pipeline_put(pipe); } @@ -1046,7 +1046,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return PTR_ERR(pipe); } - ret = __media_pipeline_start(&video->video.entity, &pipe->pipe); + ret = __video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) { mutex_unlock(&mdev->graph_mutex); goto err_pipe; @@ -1070,7 +1070,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return 0; err_stop: - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); err_pipe: vsp1_video_pipeline_put(pipe); return ret; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c index d5904c96ff3f..d4540684ea9a 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c @@ -913,7 +913,7 @@ static void rkisp1_cap_stream_disable(struct rkisp1_capture *cap) * * Call s_stream(false) in the reverse order from * rkisp1_pipeline_stream_enable() and disable the DMA engine. - * Should be called before media_pipeline_stop() + * Should be called before video_device_pipeline_stop() */ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap) __must_hold(&cap->rkisp1->stream_lock) @@ -926,7 +926,7 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap) * If the other capture is streaming, isp and sensor nodes shouldn't * be disabled, skip them. */ - if (rkisp1->pipe.streaming_count < 2) + if (rkisp1->pipe.start_count < 2) v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, false); v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream, @@ -937,7 +937,7 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap) * rkisp1_pipeline_stream_enable - enable nodes in the pipeline * * Enable the DMA Engine and call s_stream(true) through the pipeline. - * Should be called after media_pipeline_start() + * Should be called after video_device_pipeline_start() */ static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap) __must_hold(&cap->rkisp1->stream_lock) @@ -956,7 +956,7 @@ static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap) * If the other capture is streaming, isp and sensor nodes are already * enabled, skip them. */ - if (rkisp1->pipe.streaming_count > 1) + if (rkisp1->pipe.start_count > 1) return 0; ret = v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, true); @@ -994,7 +994,7 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue) rkisp1_dummy_buf_destroy(cap); - media_pipeline_stop(&node->vdev.entity); + video_device_pipeline_stop(&node->vdev); mutex_unlock(&cap->rkisp1->stream_lock); } @@ -1008,7 +1008,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) mutex_lock(&cap->rkisp1->stream_lock); - ret = media_pipeline_start(entity, &cap->rkisp1->pipe); + ret = video_device_pipeline_start(&cap->vnode.vdev, &cap->rkisp1->pipe); if (ret) { dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret); goto err_ret_buffers; @@ -1044,7 +1044,7 @@ err_pipe_pm_put: err_destroy_dummy: rkisp1_dummy_buf_destroy(cap); err_pipeline_stop: - media_pipeline_stop(entity); + video_device_pipeline_stop(&cap->vnode.vdev); err_ret_buffers: rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED); mutex_unlock(&cap->rkisp1->stream_lock); @@ -1273,11 +1273,12 @@ static int rkisp1_capture_link_validate(struct media_link *link) struct rkisp1_capture *cap = video_get_drvdata(vdev); const struct rkisp1_capture_fmt_cfg *fmt = rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat); - struct v4l2_subdev_format sd_fmt; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = link->source->index, + }; int ret; - sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; - sd_fmt.pad = link->source->index; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h index 8056997d5c29..a1293c45aae1 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h @@ -378,6 +378,7 @@ struct rkisp1_params { struct v4l2_format vdev_fmt; enum v4l2_quantization quantization; + enum v4l2_ycbcr_encoding ycbcr_encoding; enum rkisp1_fmt_raw_pat_type raw_type; }; @@ -556,17 +557,32 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop, */ const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_code(u32 mbus_code); -/* rkisp1_params_configure - configure the params when stream starts. - * This function is called by the isp entity upon stream starts. - * The function applies the initial configuration of the parameters. +/* + * rkisp1_params_pre_configure - Configure the params before stream start * - * @params: pointer to rkisp1_params. + * @params: pointer to rkisp1_params * @bayer_pat: the bayer pattern on the isp video sink pad * @quantization: the quantization configured on the isp's src pad + * @ycbcr_encoding: the ycbcr_encoding configured on the isp's src pad + * + * This function is called by the ISP entity just before the ISP gets started. + * It applies the initial ISP parameters from the first params buffer, but + * skips LSC as it needs to be configured after the ISP is started. + */ +void rkisp1_params_pre_configure(struct rkisp1_params *params, + enum rkisp1_fmt_raw_pat_type bayer_pat, + enum v4l2_quantization quantization, + enum v4l2_ycbcr_encoding ycbcr_encoding); + +/* + * rkisp1_params_post_configure - Configure the params after stream start + * + * @params: pointer to rkisp1_params + * + * This function is called by the ISP entity just after the ISP gets started. + * It applies the initial ISP LSC parameters from the first params buffer. */ -void rkisp1_params_configure(struct rkisp1_params *params, - enum rkisp1_fmt_raw_pat_type bayer_pat, - enum v4l2_quantization quantization); +void rkisp1_params_post_configure(struct rkisp1_params *params); /* rkisp1_params_disable - disable all parameters. * This function is called by the isp entity upon stream start diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c index 383a3ec83ca9..585cf3f53469 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c @@ -231,10 +231,11 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp, struct v4l2_mbus_framefmt *src_frm; src_frm = rkisp1_isp_get_pad_fmt(isp, NULL, - RKISP1_ISP_PAD_SINK_VIDEO, + RKISP1_ISP_PAD_SOURCE_VIDEO, V4L2_SUBDEV_FORMAT_ACTIVE); - rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat, - src_frm->quantization); + rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat, + src_frm->quantization, + src_frm->ycbcr_enc); } return 0; @@ -340,6 +341,9 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp) RKISP1_CIF_ISP_CTRL_ISP_ENABLE | RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE; rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val); + + if (isp->src_fmt->pixel_enc != V4L2_PIXEL_ENC_BAYER) + rkisp1_params_post_configure(&rkisp1->params); } /* ---------------------------------------------------------------------------- @@ -431,12 +435,17 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; struct v4l2_rect *sink_crop, *src_crop; + /* Video. */ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, RKISP1_ISP_PAD_SINK_VIDEO); sink_fmt->width = RKISP1_DEFAULT_WIDTH; sink_fmt->height = RKISP1_DEFAULT_HEIGHT; sink_fmt->field = V4L2_FIELD_NONE; sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT; + sink_fmt->colorspace = V4L2_COLORSPACE_RAW; + sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE; + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; sink_crop = v4l2_subdev_get_try_crop(sd, sd_state, RKISP1_ISP_PAD_SINK_VIDEO); @@ -449,11 +458,16 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd, RKISP1_ISP_PAD_SOURCE_VIDEO); *src_fmt = *sink_fmt; src_fmt->code = RKISP1_DEF_SRC_PAD_FMT; + src_fmt->colorspace = V4L2_COLORSPACE_SRGB; + src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; src_crop = v4l2_subdev_get_try_crop(sd, sd_state, RKISP1_ISP_PAD_SOURCE_VIDEO); *src_crop = *sink_crop; + /* Parameters and statistics. */ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, RKISP1_ISP_PAD_SINK_PARAMS); src_fmt = v4l2_subdev_get_try_format(sd, sd_state, @@ -472,40 +486,105 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp, struct v4l2_mbus_framefmt *format, unsigned int which) { - const struct rkisp1_mbus_info *mbus_info; + const struct rkisp1_mbus_info *sink_info; + const struct rkisp1_mbus_info *src_info; + struct v4l2_mbus_framefmt *sink_fmt; struct v4l2_mbus_framefmt *src_fmt; const struct v4l2_rect *src_crop; + bool set_csc; + sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, + RKISP1_ISP_PAD_SINK_VIDEO, which); src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, RKISP1_ISP_PAD_SOURCE_VIDEO, which); src_crop = rkisp1_isp_get_pad_crop(isp, sd_state, RKISP1_ISP_PAD_SOURCE_VIDEO, which); + /* + * Media bus code. The ISP can operate in pass-through mode (Bayer in, + * Bayer out or YUV in, YUV out) or process Bayer data to YUV, but + * can't convert from YUV to Bayer. + */ + sink_info = rkisp1_mbus_info_get_by_code(sink_fmt->code); + src_fmt->code = format->code; - mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code); - if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) { + src_info = rkisp1_mbus_info_get_by_code(src_fmt->code); + if (!src_info || !(src_info->direction & RKISP1_ISP_SD_SRC)) { src_fmt->code = RKISP1_DEF_SRC_PAD_FMT; - mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code); + src_info = rkisp1_mbus_info_get_by_code(src_fmt->code); } - if (which == V4L2_SUBDEV_FORMAT_ACTIVE) - isp->src_fmt = mbus_info; + + if (sink_info->pixel_enc == V4L2_PIXEL_ENC_YUV && + src_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) { + src_fmt->code = sink_fmt->code; + src_info = sink_info; + } + + /* + * The source width and height must be identical to the source crop + * size. + */ src_fmt->width = src_crop->width; src_fmt->height = src_crop->height; /* - * The CSC API is used to allow userspace to force full - * quantization on YUV formats. + * Copy the color space for the sink pad. When converting from Bayer to + * YUV, default to a limited quantization range. */ - if (format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC && - format->quantization == V4L2_QUANTIZATION_FULL_RANGE && - mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV) - src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; - else if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV) + src_fmt->colorspace = sink_fmt->colorspace; + src_fmt->xfer_func = sink_fmt->xfer_func; + src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc; + + if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER && + src_info->pixel_enc == V4L2_PIXEL_ENC_YUV) src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; else - src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; + src_fmt->quantization = sink_fmt->quantization; + + /* + * Allow setting the source color space fields when the SET_CSC flag is + * set and the source format is YUV. If the sink format is YUV, don't + * set the color primaries, transfer function or YCbCr encoding as the + * ISP is bypassed in that case and passes YUV data through without + * modifications. + * + * The color primaries and transfer function are configured through the + * cross-talk matrix and tone curve respectively. Settings for those + * hardware blocks are conveyed through the ISP parameters buffer, as + * they need to combine color space information with other image tuning + * characteristics and can't thus be computed by the kernel based on the + * color space. The source pad colorspace and xfer_func fields are thus + * ignored by the driver, but can be set by userspace to propagate + * accurate color space information down the pipeline. + */ + set_csc = format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC; + + if (set_csc && src_info->pixel_enc == V4L2_PIXEL_ENC_YUV) { + if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) { + if (format->colorspace != V4L2_COLORSPACE_DEFAULT) + src_fmt->colorspace = format->colorspace; + if (format->xfer_func != V4L2_XFER_FUNC_DEFAULT) + src_fmt->xfer_func = format->xfer_func; + if (format->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT) + src_fmt->ycbcr_enc = format->ycbcr_enc; + } + + if (format->quantization != V4L2_QUANTIZATION_DEFAULT) + src_fmt->quantization = format->quantization; + } *format = *src_fmt; + + /* + * Restore the SET_CSC flag if it was set to indicate support for the + * CSC setting API. + */ + if (set_csc) + format->flags |= V4L2_MBUS_FRAMEFMT_SET_CSC; + + /* Store the source format info when setting the active format. */ + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) + isp->src_fmt = src_info; } static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp, @@ -573,6 +652,7 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, const struct rkisp1_mbus_info *mbus_info; struct v4l2_mbus_framefmt *sink_fmt; struct v4l2_rect *sink_crop; + bool is_yuv; sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, RKISP1_ISP_PAD_SINK_VIDEO, @@ -593,6 +673,36 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, RKISP1_ISP_MIN_HEIGHT, RKISP1_ISP_MAX_HEIGHT); + /* + * Adjust the color space fields. Accept any color primaries and + * transfer function for both YUV and Bayer. For YUV any YCbCr encoding + * and quantization range is also accepted. For Bayer formats, the YCbCr + * encoding isn't applicable, and the quantization range can only be + * full. + */ + is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV; + + sink_fmt->colorspace = format->colorspace ? : + (is_yuv ? V4L2_COLORSPACE_SRGB : + V4L2_COLORSPACE_RAW); + sink_fmt->xfer_func = format->xfer_func ? : + V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace); + if (is_yuv) { + sink_fmt->ycbcr_enc = format->ycbcr_enc ? : + V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace); + sink_fmt->quantization = format->quantization ? : + V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace, + sink_fmt->ycbcr_enc); + } else { + /* + * The YCbCr encoding isn't applicable for non-YUV formats, but + * V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it + * should be ignored by userspace. + */ + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; + } + *format = *sink_fmt; /* Propagate to in crop */ diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c index 9da7dc1bc690..d8731ebbf479 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c @@ -18,6 +18,8 @@ #define RKISP1_ISP_PARAMS_REQ_BUFS_MIN 2 #define RKISP1_ISP_PARAMS_REQ_BUFS_MAX 8 +#define RKISP1_ISP_DPCC_METHODS_SET(n) \ + (RKISP1_CIF_ISP_DPCC_METHODS_SET_1 + 0x4 * (n)) #define RKISP1_ISP_DPCC_LINE_THRESH(n) \ (RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 + 0x14 * (n)) #define RKISP1_ISP_DPCC_LINE_MAD_FAC(n) \ @@ -56,39 +58,47 @@ static void rkisp1_dpcc_config(struct rkisp1_params *params, unsigned int i; u32 mode; - /* avoid to override the old enable value */ + /* + * The enable bit is controlled in rkisp1_isp_isr_other_config() and + * must be preserved. The grayscale mode should be configured + * automatically based on the media bus code on the ISP sink pad, so + * only the STAGE1_ENABLE bit can be set by userspace. + */ mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE); - mode &= RKISP1_CIF_ISP_DPCC_ENA; - mode |= arg->mode & ~RKISP1_CIF_ISP_DPCC_ENA; + mode &= RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE; + mode |= arg->mode & RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE; rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE, mode); + rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_OUTPUT_MODE, - arg->output_mode); + arg->output_mode & RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK); rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_SET_USE, - arg->set_use); - - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_1, - arg->methods[0].method); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_2, - arg->methods[1].method); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_3, - arg->methods[2].method); + arg->set_use & RKISP1_CIF_ISP_DPCC_SET_USE_MASK); + for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) { + rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_METHODS_SET(i), + arg->methods[i].method & + RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_THRESH(i), - arg->methods[i].line_thresh); + arg->methods[i].line_thresh & + RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_MAD_FAC(i), - arg->methods[i].line_mad_fac); + arg->methods[i].line_mad_fac & + RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_PG_FAC(i), - arg->methods[i].pg_fac); + arg->methods[i].pg_fac & + RKISP1_CIF_ISP_DPCC_PG_FAC_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RND_THRESH(i), - arg->methods[i].rnd_thresh); + arg->methods[i].rnd_thresh & + RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RG_FAC(i), - arg->methods[i].rg_fac); + arg->methods[i].rg_fac & + RKISP1_CIF_ISP_DPCC_RG_FAC_MASK); } rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RND_OFFS, - arg->rnd_offs); + arg->rnd_offs & RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK); rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RO_LIMITS, - arg->ro_limits); + arg->ro_limits & RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK); } /* ISP black level subtraction interface function */ @@ -188,149 +198,131 @@ static void rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params, const struct rkisp1_cif_isp_lsc_config *pconfig) { - unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data; + struct rkisp1_device *rkisp1 = params->rkisp1; + u32 lsc_status, sram_addr, lsc_table_sel; + unsigned int i, j; - isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS); + lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS); /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */ - sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? + sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 : RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); /* program data tables (table size is 9 * 17 = 153) */ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) { + const __u16 *r_tbl = pconfig->r_data_tbl[i]; + const __u16 *gr_tbl = pconfig->gr_data_tbl[i]; + const __u16 *gb_tbl = pconfig->gb_data_tbl[i]; + const __u16 *b_tbl = pconfig->b_data_tbl[i]; + /* * 17 sectors with 2 values in one DWORD = 9 * DWORDs (2nd value of last DWORD unused) */ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) { - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], - pconfig->r_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], - pconfig->gr_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], - pconfig->gb_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], - pconfig->b_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + r_tbl[j], r_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + gr_tbl[j], gr_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + gb_tbl[j], gb_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + b_tbl[j], b_tbl[j + 1])); } - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, - data); - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, - data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(r_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gr_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gb_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(b_tbl[j], 0)); } - isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? - RKISP1_CIF_ISP_LSC_TABLE_0 : - RKISP1_CIF_ISP_LSC_TABLE_1; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, - isp_lsc_table_sel); + + lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? + RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1; + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel); } static void rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params, const struct rkisp1_cif_isp_lsc_config *pconfig) { - unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data; + struct rkisp1_device *rkisp1 = params->rkisp1; + u32 lsc_status, sram_addr, lsc_table_sel; + unsigned int i, j; - isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS); + lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS); /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */ - sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? - RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 : - RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); + sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? + RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 : + RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153; + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); /* program data tables (table size is 9 * 17 = 153) */ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) { + const __u16 *r_tbl = pconfig->r_data_tbl[i]; + const __u16 *gr_tbl = pconfig->gr_data_tbl[i]; + const __u16 *gb_tbl = pconfig->gb_data_tbl[i]; + const __u16 *b_tbl = pconfig->b_data_tbl[i]; + /* * 17 sectors with 2 values in one DWORD = 9 * DWORDs (2nd value of last DWORD unused) */ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) { - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->r_data_tbl[i][j], - pconfig->r_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->gr_data_tbl[i][j], - pconfig->gr_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->gb_data_tbl[i][j], - pconfig->gb_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->b_data_tbl[i][j], - pconfig->b_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + r_tbl[j], r_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + gr_tbl[j], gr_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + gb_tbl[j], gb_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + b_tbl[j], b_tbl[j + 1])); } - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, - data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(r_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gr_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gb_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(b_tbl[j], 0)); } - isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? - RKISP1_CIF_ISP_LSC_TABLE_0 : - RKISP1_CIF_ISP_LSC_TABLE_1; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, - isp_lsc_table_sel); + + lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? + RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1; + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel); } static void rkisp1_lsc_config(struct rkisp1_params *params, const struct rkisp1_cif_isp_lsc_config *arg) { - unsigned int i, data; - u32 lsc_ctrl; + struct rkisp1_device *rkisp1 = params->rkisp1; + u32 lsc_ctrl, data; + unsigned int i; /* To config must be off , store the current status firstly */ - lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL); + lsc_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_CTRL); rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); params->ops->lsc_matrix_config(params, arg); @@ -339,38 +331,31 @@ static void rkisp1_lsc_config(struct rkisp1_params *params, /* program x size tables */ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2], arg->x_size_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XSIZE(i), data); /* program x grad tables */ - data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2], + data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->x_grad_tbl[i * 2], arg->x_grad_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XGRAD(i), data); /* program y size tables */ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2], arg->y_size_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YSIZE(i), data); /* program y grad tables */ - data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2], + data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->y_grad_tbl[i * 2], arg->y_grad_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YGRAD(i), data); } /* restore the lsc ctrl status */ - if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) { - rkisp1_param_set_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, + if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); - } else { - rkisp1_param_clear_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, + else + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); - } } /* ISP Filtering function */ @@ -1066,39 +1051,96 @@ static void rkisp1_ie_enable(struct rkisp1_params *params, bool en) } } -static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range) +static void rkisp1_csm_config(struct rkisp1_params *params) { - static const u16 full_range_coeff[] = { - 0x0026, 0x004b, 0x000f, - 0x01ea, 0x01d6, 0x0040, - 0x0040, 0x01ca, 0x01f6 + struct csm_coeffs { + u16 limited[9]; + u16 full[9]; + }; + static const struct csm_coeffs rec601_coeffs = { + .limited = { + 0x0021, 0x0042, 0x000d, + 0x01ed, 0x01db, 0x0038, + 0x0038, 0x01d1, 0x01f7, + }, + .full = { + 0x0026, 0x004b, 0x000f, + 0x01ea, 0x01d6, 0x0040, + 0x0040, 0x01ca, 0x01f6, + }, }; - static const u16 limited_range_coeff[] = { - 0x0021, 0x0040, 0x000d, - 0x01ed, 0x01db, 0x0038, - 0x0038, 0x01d1, 0x01f7, + static const struct csm_coeffs rec709_coeffs = { + .limited = { + 0x0018, 0x0050, 0x0008, + 0x01f3, 0x01d5, 0x0038, + 0x0038, 0x01cd, 0x01fb, + }, + .full = { + 0x001b, 0x005c, 0x0009, + 0x01f1, 0x01cf, 0x0040, + 0x0040, 0x01c6, 0x01fa, + }, }; + static const struct csm_coeffs rec2020_coeffs = { + .limited = { + 0x001d, 0x004c, 0x0007, + 0x01f0, 0x01d8, 0x0038, + 0x0038, 0x01cd, 0x01fb, + }, + .full = { + 0x0022, 0x0057, 0x0008, + 0x01ee, 0x01d2, 0x0040, + 0x0040, 0x01c5, 0x01fb, + }, + }; + static const struct csm_coeffs smpte240m_coeffs = { + .limited = { + 0x0018, 0x004f, 0x000a, + 0x01f3, 0x01d5, 0x0038, + 0x0038, 0x01ce, 0x01fa, + }, + .full = { + 0x001b, 0x005a, 0x000b, + 0x01f1, 0x01cf, 0x0040, + 0x0040, 0x01c7, 0x01f9, + }, + }; + + const struct csm_coeffs *coeffs; + const u16 *csm; unsigned int i; - if (full_range) { - for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++) - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_CC_COEFF_0 + i * 4, - full_range_coeff[i]); + switch (params->ycbcr_encoding) { + case V4L2_YCBCR_ENC_601: + default: + coeffs = &rec601_coeffs; + break; + case V4L2_YCBCR_ENC_709: + coeffs = &rec709_coeffs; + break; + case V4L2_YCBCR_ENC_BT2020: + coeffs = &rec2020_coeffs; + break; + case V4L2_YCBCR_ENC_SMPTE240M: + coeffs = &smpte240m_coeffs; + break; + } + if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) { + csm = coeffs->full; rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA | RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA); } else { - for (i = 0; i < ARRAY_SIZE(limited_range_coeff); i++) - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_CC_COEFF_0 + i * 4, - limited_range_coeff[i]); - + csm = coeffs->limited; rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA | RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA); } + + for (i = 0; i < 9; i++) + rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CC_COEFF_0 + i * 4, + csm[i]); } /* ISP De-noise Pre-Filter(DPF) function */ @@ -1216,11 +1258,11 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, if (module_ens & RKISP1_CIF_ISP_MODULE_DPCC) rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPCC_MODE, - RKISP1_CIF_ISP_DPCC_ENA); + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); else rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE, - RKISP1_CIF_ISP_DPCC_ENA); + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); } /* update bls config */ @@ -1255,22 +1297,6 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA); } - /* update lsc config */ - if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC) - rkisp1_lsc_config(params, - &new_params->others.lsc_config); - - if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) { - if (module_ens & RKISP1_CIF_ISP_MODULE_LSC) - rkisp1_param_set_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, - RKISP1_CIF_ISP_LSC_CTRL_ENA); - else - rkisp1_param_clear_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, - RKISP1_CIF_ISP_LSC_CTRL_ENA); - } - /* update awb gains */ if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) params->ops->awb_gain_config(params, &new_params->others.awb_gain_config); @@ -1387,6 +1413,33 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, } } +static void +rkisp1_isp_isr_lsc_config(struct rkisp1_params *params, + const struct rkisp1_params_cfg *new_params) +{ + unsigned int module_en_update, module_cfg_update, module_ens; + + module_en_update = new_params->module_en_update; + module_cfg_update = new_params->module_cfg_update; + module_ens = new_params->module_ens; + + /* update lsc config */ + if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC) + rkisp1_lsc_config(params, + &new_params->others.lsc_config); + + if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) { + if (module_ens & RKISP1_CIF_ISP_MODULE_LSC) + rkisp1_param_set_bits(params, + RKISP1_CIF_ISP_LSC_CTRL, + RKISP1_CIF_ISP_LSC_CTRL_ENA); + else + rkisp1_param_clear_bits(params, + RKISP1_CIF_ISP_LSC_CTRL, + RKISP1_CIF_ISP_LSC_CTRL_ENA); + } +} + static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params, struct rkisp1_params_cfg *new_params) { @@ -1448,47 +1501,60 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params, } } -static void rkisp1_params_apply_params_cfg(struct rkisp1_params *params, - unsigned int frame_sequence) +static bool rkisp1_params_get_buffer(struct rkisp1_params *params, + struct rkisp1_buffer **buf, + struct rkisp1_params_cfg **cfg) { - struct rkisp1_params_cfg *new_params; - struct rkisp1_buffer *cur_buf = NULL; - if (list_empty(¶ms->params)) - return; - - cur_buf = list_first_entry(¶ms->params, - struct rkisp1_buffer, queue); + return false; - new_params = (struct rkisp1_params_cfg *)vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0); + *buf = list_first_entry(¶ms->params, struct rkisp1_buffer, queue); + *cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0); - rkisp1_isp_isr_other_config(params, new_params); - rkisp1_isp_isr_meas_config(params, new_params); - - /* update shadow register immediately */ - rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + return true; +} - list_del(&cur_buf->queue); +static void rkisp1_params_complete_buffer(struct rkisp1_params *params, + struct rkisp1_buffer *buf, + unsigned int frame_sequence) +{ + list_del(&buf->queue); - cur_buf->vb.sequence = frame_sequence; - vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + buf->vb.sequence = frame_sequence; + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } void rkisp1_params_isr(struct rkisp1_device *rkisp1) { - /* - * This isr is called when the ISR finishes processing a frame (RKISP1_CIF_ISP_FRAME). - * Configurations performed here will be applied on the next frame. - * Since frame_sequence is updated on the vertical sync signal, we should use - * frame_sequence + 1 here to indicate to userspace on which frame these parameters - * are being applied. - */ - unsigned int frame_sequence = rkisp1->isp.frame_sequence + 1; struct rkisp1_params *params = &rkisp1->params; + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf; spin_lock(¶ms->config_lock); - rkisp1_params_apply_params_cfg(params, frame_sequence); + if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + goto unlock; + + rkisp1_isp_isr_other_config(params, new_params); + rkisp1_isp_isr_lsc_config(params, new_params); + rkisp1_isp_isr_meas_config(params, new_params); + + /* update shadow register immediately */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + + /* + * This isr is called when the ISR finishes processing a frame + * (RKISP1_CIF_ISP_FRAME). Configurations performed here will be + * applied on the next frame. Since frame_sequence is updated on the + * vertical sync signal, we should use frame_sequence + 1 here to + * indicate to userspace on which frame these parameters are being + * applied. + */ + rkisp1_params_complete_buffer(params, cur_buf, + rkisp1->isp.frame_sequence + 1); + +unlock: spin_unlock(¶ms->config_lock); } @@ -1531,9 +1597,18 @@ static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config = 14 }; -static void rkisp1_params_config_parameter(struct rkisp1_params *params) +void rkisp1_params_pre_configure(struct rkisp1_params *params, + enum rkisp1_fmt_raw_pat_type bayer_pat, + enum v4l2_quantization quantization, + enum v4l2_ycbcr_encoding ycbcr_encoding) { struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config; + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf; + + params->quantization = quantization; + params->ycbcr_encoding = ycbcr_encoding; + params->raw_type = bayer_pat; params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config); params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config, @@ -1552,27 +1627,55 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params) rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10, rkisp1_hst_params_default_config.mode); - /* set the range */ - if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) - rkisp1_csm_config(params, true); - else - rkisp1_csm_config(params, false); + rkisp1_csm_config(params); spin_lock_irq(¶ms->config_lock); /* apply the first buffer if there is one already */ - rkisp1_params_apply_params_cfg(params, 0); + if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + goto unlock; + + rkisp1_isp_isr_other_config(params, new_params); + rkisp1_isp_isr_meas_config(params, new_params); + + /* update shadow register immediately */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + +unlock: spin_unlock_irq(¶ms->config_lock); } -void rkisp1_params_configure(struct rkisp1_params *params, - enum rkisp1_fmt_raw_pat_type bayer_pat, - enum v4l2_quantization quantization) +void rkisp1_params_post_configure(struct rkisp1_params *params) { - params->quantization = quantization; - params->raw_type = bayer_pat; - rkisp1_params_config_parameter(params); + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf; + + spin_lock_irq(¶ms->config_lock); + + /* + * Apply LSC parameters from the first buffer (if any is already + * available. This must be done after the ISP gets started in the + * ISP8000Nano v18.02 (found in the i.MX8MP) as access to the LSC RAM + * is gated by the ISP_CTRL.ISP_ENABLE bit. As this initialization + * ordering doesn't affect other ISP versions negatively, do so + * unconditionally. + */ + + if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + goto unlock; + + rkisp1_isp_isr_lsc_config(params, new_params); + + /* update shadow register immediately */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + + rkisp1_params_complete_buffer(params, cur_buf, 0); + +unlock: + spin_unlock_irq(¶ms->config_lock); } /* @@ -1582,7 +1685,7 @@ void rkisp1_params_configure(struct rkisp1_params *params, void rkisp1_params_disable(struct rkisp1_params *params) { rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE, - RKISP1_CIF_ISP_DPCC_ENA); + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL, diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h index dd3e6c38be67..421cc73355db 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h @@ -576,7 +576,7 @@ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13)) #define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16)) -#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \ +#define RKISP1_CIF_ISP_LSC_SECT_GRAD(v0, v1) \ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16)) /* LSC: ISP_LSC_TABLE_SEL */ @@ -618,19 +618,18 @@ #define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x) (((x) >> 11) & 1) /* DPCC */ -/* ISP_DPCC_MODE */ -#define RKISP1_CIF_ISP_DPCC_ENA BIT(0) -#define RKISP1_CIF_ISP_DPCC_MODE_MAX 0x07 -#define RKISP1_CIF_ISP_DPCC_OUTPUTMODE_MAX 0x0F -#define RKISP1_CIF_ISP_DPCC_SETUSE_MAX 0x0F -#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RESERVED 0xFFFFE000 -#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RESERVED 0xFFFF0000 -#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RESERVED 0xFFFFC0C0 -#define RKISP1_CIF_ISP_DPCC_PG_FAC_RESERVED 0xFFFFC0C0 -#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RESERVED 0xFFFF0000 -#define RKISP1_CIF_ISP_DPCC_RG_FAC_RESERVED 0xFFFFC0C0 -#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_RESERVED 0xFFFFF000 -#define RKISP1_CIF_ISP_DPCC_RND_OFFS_RESERVED 0xFFFFF000 +#define RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE BIT(0) +#define RKISP1_CIF_ISP_DPCC_MODE_GRAYSCALE_MODE BIT(1) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK GENMASK(3, 0) +#define RKISP1_CIF_ISP_DPCC_SET_USE_MASK GENMASK(3, 0) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK 0x00001f1f +#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK 0x0000ffff +#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK 0x00003f3f +#define RKISP1_CIF_ISP_DPCC_PG_FAC_MASK 0x00003f3f +#define RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK 0x0000ffff +#define RKISP1_CIF_ISP_DPCC_RG_FAC_MASK 0x00003f3f +#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK 0x00000fff +#define RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK 0x00000fff /* BLS */ /* ISP_BLS_CTRL */ @@ -1073,22 +1072,10 @@ #define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000018) #define RKISP1_CIF_ISP_LSC_B_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x0000001C) #define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000020) -#define RKISP1_CIF_ISP_LSC_XGRAD_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000024) -#define RKISP1_CIF_ISP_LSC_XGRAD_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000028) -#define RKISP1_CIF_ISP_LSC_XGRAD_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000002C) -#define RKISP1_CIF_ISP_LSC_XGRAD_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000030) -#define RKISP1_CIF_ISP_LSC_YGRAD_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000034) -#define RKISP1_CIF_ISP_LSC_YGRAD_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000038) -#define RKISP1_CIF_ISP_LSC_YGRAD_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000003C) -#define RKISP1_CIF_ISP_LSC_YGRAD_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000040) -#define RKISP1_CIF_ISP_LSC_XSIZE_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000044) -#define RKISP1_CIF_ISP_LSC_XSIZE_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000048) -#define RKISP1_CIF_ISP_LSC_XSIZE_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000004C) -#define RKISP1_CIF_ISP_LSC_XSIZE_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000050) -#define RKISP1_CIF_ISP_LSC_YSIZE_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000054) -#define RKISP1_CIF_ISP_LSC_YSIZE_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000058) -#define RKISP1_CIF_ISP_LSC_YSIZE_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000005C) -#define RKISP1_CIF_ISP_LSC_YSIZE_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000060) +#define RKISP1_CIF_ISP_LSC_XGRAD(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000024 + (n) * 4) +#define RKISP1_CIF_ISP_LSC_YGRAD(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000034 + (n) * 4) +#define RKISP1_CIF_ISP_LSC_XSIZE(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000044 + (n) * 4) +#define RKISP1_CIF_ISP_LSC_YSIZE(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000054 + (n) * 4) #define RKISP1_CIF_ISP_LSC_TABLE_SEL (RKISP1_CIF_ISP_LSC_BASE + 0x00000064) #define RKISP1_CIF_ISP_LSC_STATUS (RKISP1_CIF_ISP_LSC_BASE + 0x00000068) diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c index f4caa8f684aa..f76afd8112b2 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c @@ -411,6 +411,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd, sink_fmt->height = RKISP1_DEFAULT_HEIGHT; sink_fmt->field = V4L2_FIELD_NONE; sink_fmt->code = RKISP1_DEF_FMT; + sink_fmt->colorspace = V4L2_COLORSPACE_SRGB; + sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; sink_crop = v4l2_subdev_get_try_crop(sd, sd_state, RKISP1_RSZ_PAD_SINK); @@ -503,6 +507,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, const struct rkisp1_mbus_info *mbus_info; struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; struct v4l2_rect *sink_crop; + bool is_yuv; sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK, which); @@ -524,9 +529,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, if (which == V4L2_SUBDEV_FORMAT_ACTIVE) rsz->pixel_enc = mbus_info->pixel_enc; - /* Propagete to source pad */ - src_fmt->code = sink_fmt->code; - sink_fmt->width = clamp_t(u32, format->width, RKISP1_ISP_MIN_WIDTH, RKISP1_ISP_MAX_WIDTH); @@ -534,8 +536,45 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, RKISP1_ISP_MIN_HEIGHT, RKISP1_ISP_MAX_HEIGHT); + /* + * Adjust the color space fields. Accept any color primaries and + * transfer function for both YUV and Bayer. For YUV any YCbCr encoding + * and quantization range is also accepted. For Bayer formats, the YCbCr + * encoding isn't applicable, and the quantization range can only be + * full. + */ + is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV; + + sink_fmt->colorspace = format->colorspace ? : + (is_yuv ? V4L2_COLORSPACE_SRGB : + V4L2_COLORSPACE_RAW); + sink_fmt->xfer_func = format->xfer_func ? : + V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace); + if (is_yuv) { + sink_fmt->ycbcr_enc = format->ycbcr_enc ? : + V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace); + sink_fmt->quantization = format->quantization ? : + V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace, + sink_fmt->ycbcr_enc); + } else { + /* + * The YCbCr encoding isn't applicable for non-YUV formats, but + * V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it + * should be ignored by userspace. + */ + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; + } + *format = *sink_fmt; + /* Propagate the media bus code and color space to the source pad. */ + src_fmt->code = sink_fmt->code; + src_fmt->colorspace = sink_fmt->colorspace; + src_fmt->xfer_func = sink_fmt->xfer_func; + src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc; + src_fmt->quantization = sink_fmt->quantization; + /* Update sink crop */ rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which); } diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c index 03638c8f772d..e3b95a2b7e04 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c @@ -524,7 +524,7 @@ static int fimc_capture_release(struct file *file) mutex_lock(&fimc->lock); if (close && vc->streaming) { - media_pipeline_stop(&vc->ve.vdev.entity); + video_device_pipeline_stop(&vc->ve.vdev); vc->streaming = false; } @@ -1176,7 +1176,6 @@ static int fimc_cap_streamon(struct file *file, void *priv, { struct fimc_dev *fimc = video_drvdata(file); struct fimc_vid_cap *vc = &fimc->vid_cap; - struct media_entity *entity = &vc->ve.vdev.entity; struct fimc_source_info *si = NULL; struct v4l2_subdev *sd; int ret; @@ -1184,7 +1183,7 @@ static int fimc_cap_streamon(struct file *file, void *priv, if (fimc_capture_active(fimc)) return -EBUSY; - ret = media_pipeline_start(entity, &vc->ve.pipe->mp); + ret = video_device_pipeline_start(&vc->ve.vdev, &vc->ve.pipe->mp); if (ret < 0) return ret; @@ -1218,7 +1217,7 @@ static int fimc_cap_streamon(struct file *file, void *priv, } err_p_stop: - media_pipeline_stop(entity); + video_device_pipeline_stop(&vc->ve.vdev); return ret; } @@ -1234,7 +1233,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv, return ret; if (vc->streaming) { - media_pipeline_stop(&vc->ve.vdev.entity); + video_device_pipeline_stop(&vc->ve.vdev); vc->streaming = false; } diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c index 8f12240b0eb7..f6a302fa8d37 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c @@ -312,7 +312,7 @@ static int isp_video_release(struct file *file) is_singular_file = v4l2_fh_is_singular_file(file); if (is_singular_file && ivc->streaming) { - media_pipeline_stop(entity); + video_device_pipeline_stop(&ivc->ve.vdev); ivc->streaming = 0; } @@ -490,10 +490,9 @@ static int isp_video_streamon(struct file *file, void *priv, { struct fimc_isp *isp = video_drvdata(file); struct exynos_video_entity *ve = &isp->video_capture.ve; - struct media_entity *me = &ve->vdev.entity; int ret; - ret = media_pipeline_start(me, &ve->pipe->mp); + ret = video_device_pipeline_start(&ve->vdev, &ve->pipe->mp); if (ret < 0) return ret; @@ -508,7 +507,7 @@ static int isp_video_streamon(struct file *file, void *priv, isp->video_capture.streaming = 1; return 0; p_stop: - media_pipeline_stop(me); + video_device_pipeline_stop(&ve->vdev); return ret; } @@ -523,7 +522,7 @@ static int isp_video_streamoff(struct file *file, void *priv, if (ret < 0) return ret; - media_pipeline_stop(&video->ve.vdev.entity); + video_device_pipeline_stop(&video->ve.vdev); video->streaming = 0; return 0; } diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c index 41b0a4a5929a..e185a40305a8 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c @@ -516,7 +516,7 @@ static int fimc_lite_release(struct file *file) if (v4l2_fh_is_singular_file(file) && atomic_read(&fimc->out_path) == FIMC_IO_DMA) { if (fimc->streaming) { - media_pipeline_stop(entity); + video_device_pipeline_stop(&fimc->ve.vdev); fimc->streaming = false; } fimc_lite_stop_capture(fimc, false); @@ -812,13 +812,12 @@ static int fimc_lite_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); - struct media_entity *entity = &fimc->ve.vdev.entity; int ret; if (fimc_lite_active(fimc)) return -EBUSY; - ret = media_pipeline_start(entity, &fimc->ve.pipe->mp); + ret = video_device_pipeline_start(&fimc->ve.vdev, &fimc->ve.pipe->mp); if (ret < 0) return ret; @@ -835,7 +834,7 @@ static int fimc_lite_streamon(struct file *file, void *priv, } err_p_stop: - media_pipeline_stop(entity); + video_device_pipeline_stop(&fimc->ve.vdev); return 0; } @@ -849,7 +848,7 @@ static int fimc_lite_streamoff(struct file *file, void *priv, if (ret < 0) return ret; - media_pipeline_stop(&fimc->ve.vdev.entity); + video_device_pipeline_stop(&fimc->ve.vdev); fimc->streaming = false; return 0; } diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c index c2d8f1e425d8..db106ebdf870 100644 --- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c +++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c @@ -848,13 +848,13 @@ static int s3c_camif_streamon(struct file *file, void *priv, if (s3c_vp_active(vp)) return 0; - ret = media_pipeline_start(sensor, camif->m_pipeline); + ret = media_pipeline_start(sensor->pads, camif->m_pipeline); if (ret < 0) return ret; ret = camif_pipeline_validate(camif); if (ret < 0) { - media_pipeline_stop(sensor); + media_pipeline_stop(sensor->pads); return ret; } @@ -878,7 +878,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv, ret = vb2_streamoff(&vp->vb_queue, type); if (ret == 0) - media_pipeline_stop(&camif->sensor.sd->entity); + media_pipeline_stop(camif->sensor.sd->entity.pads); return ret; } diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c index 2ca95ab2b0fe..37458d4d9564 100644 --- a/drivers/media/platform/st/stm32/stm32-dcmi.c +++ b/drivers/media/platform/st/stm32/stm32-dcmi.c @@ -751,7 +751,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) goto err_unlocked; } - ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline); + ret = video_device_pipeline_start(dcmi->vdev, &dcmi->pipeline); if (ret < 0) { dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n", __func__, ret); @@ -865,7 +865,7 @@ err_pipeline_stop: dcmi_pipeline_stop(dcmi); err_media_pipeline_stop: - media_pipeline_stop(&dcmi->vdev->entity); + video_device_pipeline_stop(dcmi->vdev); err_pm_put: pm_runtime_put(dcmi->dev); @@ -892,7 +892,7 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) dcmi_pipeline_stop(dcmi); - media_pipeline_stop(&dcmi->vdev->entity); + video_device_pipeline_stop(dcmi->vdev); spin_lock_irq(&dcmi->irqlock); diff --git a/drivers/media/platform/sunxi/sun4i-csi/Kconfig b/drivers/media/platform/sunxi/sun4i-csi/Kconfig index 7960e6836f41..60610c04d6a7 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/Kconfig +++ b/drivers/media/platform/sunxi/sun4i-csi/Kconfig @@ -3,7 +3,7 @@ config VIDEO_SUN4I_CSI tristate "Allwinner A10 CMOS Sensor Interface Support" depends on V4L_PLATFORM_DRIVERS - depends on VIDEO_DEV && COMMON_CLK && HAS_DMA + depends on VIDEO_DEV && COMMON_CLK && RESET_CONTROLLER && HAS_DMA depends on ARCH_SUNXI || COMPILE_TEST select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c index 0912a1b6d525..a3e826a755fc 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c @@ -266,7 +266,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count) goto err_clear_dma_queue; } - ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe); + ret = video_device_pipeline_alloc_start(&csi->vdev); if (ret < 0) goto err_free_scratch_buffer; @@ -330,7 +330,7 @@ err_disable_device: sun4i_csi_capture_stop(csi); err_disable_pipeline: - media_pipeline_stop(&csi->vdev.entity); + video_device_pipeline_stop(&csi->vdev); err_free_scratch_buffer: dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, @@ -359,7 +359,7 @@ static void sun4i_csi_stop_streaming(struct vb2_queue *vq) return_all_buffers(csi, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&csi->qlock, flags); - media_pipeline_stop(&csi->vdev.entity); + video_device_pipeline_stop(&csi->vdev); dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, csi->scratch.paddr); diff --git a/drivers/media/platform/sunxi/sun6i-csi/Kconfig b/drivers/media/platform/sunxi/sun6i-csi/Kconfig index 0345901617d4..886006f6a48a 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/Kconfig +++ b/drivers/media/platform/sunxi/sun6i-csi/Kconfig @@ -1,13 +1,15 @@ # SPDX-License-Identifier: GPL-2.0-only config VIDEO_SUN6I_CSI - tristate "Allwinner V3s Camera Sensor Interface driver" - depends on V4L_PLATFORM_DRIVERS - depends on VIDEO_DEV && COMMON_CLK && HAS_DMA + tristate "Allwinner A31 Camera Sensor Interface (CSI) Driver" + depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST + depends on PM && COMMON_CLK && RESET_CONTROLLER && HAS_DMA select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select VIDEOBUF2_DMA_CONTIG - select REGMAP_MMIO select V4L2_FWNODE + select REGMAP_MMIO help - Support for the Allwinner Camera Sensor Interface Controller on V3s. + Support for the Allwinner A31 Camera Sensor Interface (CSI) + controller, also found on other platforms such as the A83T, H3, + V3/V3s or A64. diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c index a971587dbbd1..8b99c17e8403 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c @@ -23,43 +23,27 @@ #include <linux/sched.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <media/v4l2-mc.h> #include "sun6i_csi.h" #include "sun6i_csi_reg.h" -#define MODULE_NAME "sun6i-csi" - -struct sun6i_csi_dev { - struct sun6i_csi csi; - struct device *dev; - - struct regmap *regmap; - struct clk *clk_mod; - struct clk *clk_ram; - struct reset_control *rstc_bus; - - int planar_offset[3]; -}; - -static inline struct sun6i_csi_dev *sun6i_csi_to_dev(struct sun6i_csi *csi) -{ - return container_of(csi, struct sun6i_csi_dev, csi); -} +/* Helpers */ /* TODO add 10&12 bit YUV, RGB support */ -bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, +bool sun6i_csi_is_format_supported(struct sun6i_csi_device *csi_dev, u32 pixformat, u32 mbus_code) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; /* * Some video receivers have the ability to be compatible with * 8bit and 16bit bus width. * Identify the media bus format from device tree. */ - if ((sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_PARALLEL - || sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656) - && sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) { + if ((v4l2->v4l2_ep.bus_type == V4L2_MBUS_PARALLEL + || v4l2->v4l2_ep.bus_type == V4L2_MBUS_BT656) + && v4l2->v4l2_ep.bus.parallel.bus_width == 16) { switch (pixformat) { case V4L2_PIX_FMT_NV12_16L16: case V4L2_PIX_FMT_NV12: @@ -76,13 +60,14 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, case MEDIA_BUS_FMT_YVYU8_1X16: return true; default: - dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_dbg(csi_dev->dev, + "Unsupported mbus code: 0x%x\n", mbus_code); break; } break; default: - dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", + dev_dbg(csi_dev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; } @@ -139,7 +124,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, case MEDIA_BUS_FMT_YVYU8_2X8: return true; default: - dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_dbg(csi_dev->dev, "Unsupported mbus code: 0x%x\n", mbus_code); break; } @@ -154,67 +139,37 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, return (mbus_code == MEDIA_BUS_FMT_JPEG_1X8); default: - dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); + dev_dbg(csi_dev->dev, "Unsupported pixformat: 0x%x\n", + pixformat); break; } return false; } -int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) +int sun6i_csi_set_power(struct sun6i_csi_device *csi_dev, bool enable) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - struct device *dev = sdev->dev; - struct regmap *regmap = sdev->regmap; + struct device *dev = csi_dev->dev; + struct regmap *regmap = csi_dev->regmap; int ret; if (!enable) { regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0); + pm_runtime_put(dev); - clk_disable_unprepare(sdev->clk_ram); - if (of_device_is_compatible(dev->of_node, - "allwinner,sun50i-a64-csi")) - clk_rate_exclusive_put(sdev->clk_mod); - clk_disable_unprepare(sdev->clk_mod); - reset_control_assert(sdev->rstc_bus); return 0; } - ret = clk_prepare_enable(sdev->clk_mod); - if (ret) { - dev_err(sdev->dev, "Enable csi clk err %d\n", ret); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) return ret; - } - - if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) - clk_set_rate_exclusive(sdev->clk_mod, 300000000); - - ret = clk_prepare_enable(sdev->clk_ram); - if (ret) { - dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret); - goto clk_mod_disable; - } - - ret = reset_control_deassert(sdev->rstc_bus); - if (ret) { - dev_err(sdev->dev, "reset err %d\n", ret); - goto clk_ram_disable; - } regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, CSI_EN_CSI_EN); return 0; - -clk_ram_disable: - clk_disable_unprepare(sdev->clk_ram); -clk_mod_disable: - if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) - clk_rate_exclusive_put(sdev->clk_mod); - clk_disable_unprepare(sdev->clk_mod); - return ret; } -static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev, +static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_device *csi_dev, u32 mbus_code, u32 pixformat) { /* non-YUV */ @@ -232,12 +187,13 @@ static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev, } /* not support YUV420 input format yet */ - dev_dbg(sdev->dev, "Select YUV422 as default input format of CSI.\n"); + dev_dbg(csi_dev->dev, "Select YUV422 as default input format of CSI.\n"); return CSI_INPUT_FORMAT_YUV422; } -static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, - u32 pixformat, u32 field) +static enum csi_output_fmt +get_csi_output_format(struct sun6i_csi_device *csi_dev, u32 pixformat, + u32 field) { bool buf_interlaced = false; @@ -296,14 +252,14 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8; default: - dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); + dev_warn(csi_dev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; } return CSI_FIELD_RAW_8; } -static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, +static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_device *csi_dev, u32 mbus_code, u32 pixformat) { /* Input sequence does not apply to non-YUV formats */ @@ -330,7 +286,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, case MEDIA_BUS_FMT_YVYU8_2X8: return CSI_INPUT_SEQ_YVYU; default: - dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_warn(csi_dev->dev, "Unsupported mbus code: 0x%x\n", mbus_code); break; } @@ -352,7 +308,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, case MEDIA_BUS_FMT_YVYU8_2X8: return CSI_INPUT_SEQ_YUYV; default: - dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_warn(csi_dev->dev, "Unsupported mbus code: 0x%x\n", mbus_code); break; } @@ -362,7 +318,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, return CSI_INPUT_SEQ_YUYV; default: - dev_warn(sdev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n", + dev_warn(csi_dev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n", pixformat); break; } @@ -370,23 +326,23 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, return CSI_INPUT_SEQ_YUYV; } -static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev) +static void sun6i_csi_setup_bus(struct sun6i_csi_device *csi_dev) { - struct v4l2_fwnode_endpoint *endpoint = &sdev->csi.v4l2_ep; - struct sun6i_csi *csi = &sdev->csi; + struct v4l2_fwnode_endpoint *endpoint = &csi_dev->v4l2.v4l2_ep; + struct sun6i_csi_config *config = &csi_dev->config; unsigned char bus_width; u32 flags; u32 cfg; bool input_interlaced = false; - if (csi->config.field == V4L2_FIELD_INTERLACED - || csi->config.field == V4L2_FIELD_INTERLACED_TB - || csi->config.field == V4L2_FIELD_INTERLACED_BT) + if (config->field == V4L2_FIELD_INTERLACED + || config->field == V4L2_FIELD_INTERLACED_TB + || config->field == V4L2_FIELD_INTERLACED_BT) input_interlaced = true; bus_width = endpoint->bus.parallel.bus_width; - regmap_read(sdev->regmap, CSI_IF_CFG_REG, &cfg); + regmap_read(csi_dev->regmap, CSI_IF_CFG_REG, &cfg); cfg &= ~(CSI_IF_CFG_CSI_IF_MASK | CSI_IF_CFG_MIPI_IF_MASK | CSI_IF_CFG_IF_DATA_WIDTH_MASK | @@ -434,7 +390,7 @@ static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev) cfg |= CSI_IF_CFG_CLK_POL_FALLING_EDGE; break; default: - dev_warn(sdev->dev, "Unsupported bus type: %d\n", + dev_warn(csi_dev->dev, "Unsupported bus type: %d\n", endpoint->bus_type); break; } @@ -452,54 +408,54 @@ static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev) case 16: /* No need to configure DATA_WIDTH for 16bit */ break; default: - dev_warn(sdev->dev, "Unsupported bus width: %u\n", bus_width); + dev_warn(csi_dev->dev, "Unsupported bus width: %u\n", bus_width); break; } - regmap_write(sdev->regmap, CSI_IF_CFG_REG, cfg); + regmap_write(csi_dev->regmap, CSI_IF_CFG_REG, cfg); } -static void sun6i_csi_set_format(struct sun6i_csi_dev *sdev) +static void sun6i_csi_set_format(struct sun6i_csi_device *csi_dev) { - struct sun6i_csi *csi = &sdev->csi; + struct sun6i_csi_config *config = &csi_dev->config; u32 cfg; u32 val; - regmap_read(sdev->regmap, CSI_CH_CFG_REG, &cfg); + regmap_read(csi_dev->regmap, CSI_CH_CFG_REG, &cfg); cfg &= ~(CSI_CH_CFG_INPUT_FMT_MASK | CSI_CH_CFG_OUTPUT_FMT_MASK | CSI_CH_CFG_VFLIP_EN | CSI_CH_CFG_HFLIP_EN | CSI_CH_CFG_FIELD_SEL_MASK | CSI_CH_CFG_INPUT_SEQ_MASK); - val = get_csi_input_format(sdev, csi->config.code, - csi->config.pixelformat); + val = get_csi_input_format(csi_dev, config->code, + config->pixelformat); cfg |= CSI_CH_CFG_INPUT_FMT(val); - val = get_csi_output_format(sdev, csi->config.pixelformat, - csi->config.field); + val = get_csi_output_format(csi_dev, config->pixelformat, + config->field); cfg |= CSI_CH_CFG_OUTPUT_FMT(val); - val = get_csi_input_seq(sdev, csi->config.code, - csi->config.pixelformat); + val = get_csi_input_seq(csi_dev, config->code, + config->pixelformat); cfg |= CSI_CH_CFG_INPUT_SEQ(val); - if (csi->config.field == V4L2_FIELD_TOP) + if (config->field == V4L2_FIELD_TOP) cfg |= CSI_CH_CFG_FIELD_SEL_FIELD0; - else if (csi->config.field == V4L2_FIELD_BOTTOM) + else if (config->field == V4L2_FIELD_BOTTOM) cfg |= CSI_CH_CFG_FIELD_SEL_FIELD1; else cfg |= CSI_CH_CFG_FIELD_SEL_BOTH; - regmap_write(sdev->regmap, CSI_CH_CFG_REG, cfg); + regmap_write(csi_dev->regmap, CSI_CH_CFG_REG, cfg); } -static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) +static void sun6i_csi_set_window(struct sun6i_csi_device *csi_dev) { - struct sun6i_csi_config *config = &sdev->csi.config; + struct sun6i_csi_config *config = &csi_dev->config; u32 bytesperline_y; u32 bytesperline_c; - int *planar_offset = sdev->planar_offset; + int *planar_offset = csi_dev->planar_offset; u32 width = config->width; u32 height = config->height; u32 hor_len = width; @@ -509,7 +465,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: - dev_dbg(sdev->dev, + dev_dbg(csi_dev->dev, "Horizontal length should be 2 times of width for packed YUV formats!\n"); hor_len = width * 2; break; @@ -517,10 +473,10 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) break; } - regmap_write(sdev->regmap, CSI_CH_HSIZE_REG, + regmap_write(csi_dev->regmap, CSI_CH_HSIZE_REG, CSI_CH_HSIZE_HOR_LEN(hor_len) | CSI_CH_HSIZE_HOR_START(0)); - regmap_write(sdev->regmap, CSI_CH_VSIZE_REG, + regmap_write(csi_dev->regmap, CSI_CH_VSIZE_REG, CSI_CH_VSIZE_VER_LEN(height) | CSI_CH_VSIZE_VER_START(0)); @@ -552,7 +508,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) bytesperline_c * height; break; default: /* raw */ - dev_dbg(sdev->dev, + dev_dbg(csi_dev->dev, "Calculating pixelformat(0x%x)'s bytesperline as a packed format\n", config->pixelformat); bytesperline_y = (sun6i_csi_get_bpp(config->pixelformat) * @@ -563,46 +519,42 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) break; } - regmap_write(sdev->regmap, CSI_CH_BUF_LEN_REG, + regmap_write(csi_dev->regmap, CSI_CH_BUF_LEN_REG, CSI_CH_BUF_LEN_BUF_LEN_C(bytesperline_c) | CSI_CH_BUF_LEN_BUF_LEN_Y(bytesperline_y)); } -int sun6i_csi_update_config(struct sun6i_csi *csi, +int sun6i_csi_update_config(struct sun6i_csi_device *csi_dev, struct sun6i_csi_config *config) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - if (!config) return -EINVAL; - memcpy(&csi->config, config, sizeof(csi->config)); + memcpy(&csi_dev->config, config, sizeof(csi_dev->config)); - sun6i_csi_setup_bus(sdev); - sun6i_csi_set_format(sdev); - sun6i_csi_set_window(sdev); + sun6i_csi_setup_bus(csi_dev); + sun6i_csi_set_format(csi_dev); + sun6i_csi_set_window(csi_dev); return 0; } -void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr) +void sun6i_csi_update_buf_addr(struct sun6i_csi_device *csi_dev, + dma_addr_t addr) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - - regmap_write(sdev->regmap, CSI_CH_F0_BUFA_REG, - (addr + sdev->planar_offset[0]) >> 2); - if (sdev->planar_offset[1] != -1) - regmap_write(sdev->regmap, CSI_CH_F1_BUFA_REG, - (addr + sdev->planar_offset[1]) >> 2); - if (sdev->planar_offset[2] != -1) - regmap_write(sdev->regmap, CSI_CH_F2_BUFA_REG, - (addr + sdev->planar_offset[2]) >> 2); + regmap_write(csi_dev->regmap, CSI_CH_F0_BUFA_REG, + (addr + csi_dev->planar_offset[0]) >> 2); + if (csi_dev->planar_offset[1] != -1) + regmap_write(csi_dev->regmap, CSI_CH_F1_BUFA_REG, + (addr + csi_dev->planar_offset[1]) >> 2); + if (csi_dev->planar_offset[2] != -1) + regmap_write(csi_dev->regmap, CSI_CH_F2_BUFA_REG, + (addr + csi_dev->planar_offset[2]) >> 2); } -void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable) +void sun6i_csi_set_stream(struct sun6i_csi_device *csi_dev, bool enable) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - struct regmap *regmap = sdev->regmap; + struct regmap *regmap = csi_dev->regmap; if (!enable) { regmap_update_bits(regmap, CSI_CAP_REG, CSI_CAP_CH0_VCAP_ON, 0); @@ -623,10 +575,15 @@ void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable) CSI_CAP_CH0_VCAP_ON); } -/* ----------------------------------------------------------------------------- - * Media Controller and V4L2 - */ -static int sun6i_csi_link_entity(struct sun6i_csi *csi, +/* Media */ + +static const struct media_device_ops sun6i_csi_media_ops = { + .link_notify = v4l2_pipeline_link_notify, +}; + +/* V4L2 */ + +static int sun6i_csi_link_entity(struct sun6i_csi_device *csi_dev, struct media_entity *entity, struct fwnode_handle *fwnode) { @@ -637,24 +594,25 @@ static int sun6i_csi_link_entity(struct sun6i_csi *csi, ret = media_entity_get_fwnode_pad(entity, fwnode, MEDIA_PAD_FL_SOURCE); if (ret < 0) { - dev_err(csi->dev, "%s: no source pad in external entity %s\n", - __func__, entity->name); + dev_err(csi_dev->dev, + "%s: no source pad in external entity %s\n", __func__, + entity->name); return -EINVAL; } src_pad_index = ret; - sink = &csi->video.vdev.entity; - sink_pad = &csi->video.pad; + sink = &csi_dev->video.video_dev.entity; + sink_pad = &csi_dev->video.pad; - dev_dbg(csi->dev, "creating %s:%u -> %s:%u link\n", + dev_dbg(csi_dev->dev, "creating %s:%u -> %s:%u link\n", entity->name, src_pad_index, sink->name, sink_pad->index); ret = media_create_pad_link(entity, src_pad_index, sink, sink_pad->index, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) { - dev_err(csi->dev, "failed to create %s:%u -> %s:%u link\n", + dev_err(csi_dev->dev, "failed to create %s:%u -> %s:%u link\n", entity->name, src_pad_index, sink->name, sink_pad->index); return ret; @@ -665,27 +623,29 @@ static int sun6i_csi_link_entity(struct sun6i_csi *csi, static int sun6i_subdev_notify_complete(struct v4l2_async_notifier *notifier) { - struct sun6i_csi *csi = container_of(notifier, struct sun6i_csi, - notifier); - struct v4l2_device *v4l2_dev = &csi->v4l2_dev; + struct sun6i_csi_device *csi_dev = + container_of(notifier, struct sun6i_csi_device, + v4l2.notifier); + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; + struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev; struct v4l2_subdev *sd; int ret; - dev_dbg(csi->dev, "notify complete, all subdevs registered\n"); + dev_dbg(csi_dev->dev, "notify complete, all subdevs registered\n"); sd = list_first_entry(&v4l2_dev->subdevs, struct v4l2_subdev, list); if (!sd) return -EINVAL; - ret = sun6i_csi_link_entity(csi, &sd->entity, sd->fwnode); + ret = sun6i_csi_link_entity(csi_dev, &sd->entity, sd->fwnode); if (ret < 0) return ret; - ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev); + ret = v4l2_device_register_subdev_nodes(v4l2_dev); if (ret < 0) return ret; - return media_device_register(&csi->media_dev); + return 0; } static const struct v4l2_async_notifier_operations sun6i_csi_async_ops = { @@ -696,7 +656,7 @@ static int sun6i_csi_fwnode_parse(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct v4l2_async_subdev *asd) { - struct sun6i_csi *csi = dev_get_drvdata(dev); + struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); if (vep->base.port || vep->base.id) { dev_warn(dev, "Only support a single port with one endpoint\n"); @@ -706,7 +666,7 @@ static int sun6i_csi_fwnode_parse(struct device *dev, switch (vep->bus_type) { case V4L2_MBUS_PARALLEL: case V4L2_MBUS_BT656: - csi->v4l2_ep = *vep; + csi_dev->v4l2.v4l2_ep = *vep; return 0; default: dev_err(dev, "Unsupported media bus type\n"); @@ -714,87 +674,102 @@ static int sun6i_csi_fwnode_parse(struct device *dev, } } -static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi) -{ - media_device_unregister(&csi->media_dev); - v4l2_async_nf_unregister(&csi->notifier); - v4l2_async_nf_cleanup(&csi->notifier); - sun6i_video_cleanup(&csi->video); - v4l2_device_unregister(&csi->v4l2_dev); - v4l2_ctrl_handler_free(&csi->ctrl_handler); - media_device_cleanup(&csi->media_dev); -} - -static int sun6i_csi_v4l2_init(struct sun6i_csi *csi) +static int sun6i_csi_v4l2_setup(struct sun6i_csi_device *csi_dev) { + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; + struct media_device *media_dev = &v4l2->media_dev; + struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev; + struct v4l2_async_notifier *notifier = &v4l2->notifier; + struct device *dev = csi_dev->dev; int ret; - csi->media_dev.dev = csi->dev; - strscpy(csi->media_dev.model, "Allwinner Video Capture Device", - sizeof(csi->media_dev.model)); - csi->media_dev.hw_revision = 0; + /* Media Device */ + + strscpy(media_dev->model, SUN6I_CSI_DESCRIPTION, + sizeof(media_dev->model)); + media_dev->hw_revision = 0; + media_dev->ops = &sun6i_csi_media_ops; + media_dev->dev = dev; - media_device_init(&csi->media_dev); - v4l2_async_nf_init(&csi->notifier); + media_device_init(media_dev); - ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0); + ret = media_device_register(media_dev); if (ret) { - dev_err(csi->dev, "V4L2 controls handler init failed (%d)\n", - ret); - goto clean_media; + dev_err(dev, "failed to register media device: %d\n", ret); + goto error_media; } - csi->v4l2_dev.mdev = &csi->media_dev; - csi->v4l2_dev.ctrl_handler = &csi->ctrl_handler; - ret = v4l2_device_register(csi->dev, &csi->v4l2_dev); + /* V4L2 Device */ + + v4l2_dev->mdev = media_dev; + + ret = v4l2_device_register(dev, v4l2_dev); if (ret) { - dev_err(csi->dev, "V4L2 device registration failed (%d)\n", - ret); - goto free_ctrl; + dev_err(dev, "failed to register v4l2 device: %d\n", ret); + goto error_media; } - ret = sun6i_video_init(&csi->video, csi, "sun6i-csi"); + /* Video */ + + ret = sun6i_video_setup(csi_dev); if (ret) - goto unreg_v4l2; + goto error_v4l2_device; - ret = v4l2_async_nf_parse_fwnode_endpoints(csi->dev, - &csi->notifier, + /* V4L2 Async */ + + v4l2_async_nf_init(notifier); + notifier->ops = &sun6i_csi_async_ops; + + ret = v4l2_async_nf_parse_fwnode_endpoints(dev, notifier, sizeof(struct v4l2_async_subdev), sun6i_csi_fwnode_parse); if (ret) - goto clean_video; + goto error_video; - csi->notifier.ops = &sun6i_csi_async_ops; - - ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier); + ret = v4l2_async_nf_register(v4l2_dev, notifier); if (ret) { - dev_err(csi->dev, "notifier registration failed\n"); - goto clean_video; + dev_err(dev, "failed to register v4l2 async notifier: %d\n", + ret); + goto error_v4l2_async_notifier; } return 0; -clean_video: - sun6i_video_cleanup(&csi->video); -unreg_v4l2: - v4l2_device_unregister(&csi->v4l2_dev); -free_ctrl: - v4l2_ctrl_handler_free(&csi->ctrl_handler); -clean_media: - v4l2_async_nf_cleanup(&csi->notifier); - media_device_cleanup(&csi->media_dev); +error_v4l2_async_notifier: + v4l2_async_nf_cleanup(notifier); + +error_video: + sun6i_video_cleanup(csi_dev); + +error_v4l2_device: + v4l2_device_unregister(&v4l2->v4l2_dev); + +error_media: + media_device_unregister(media_dev); + media_device_cleanup(media_dev); return ret; } -/* ----------------------------------------------------------------------------- - * Resources and IRQ - */ -static irqreturn_t sun6i_csi_isr(int irq, void *dev_id) +static void sun6i_csi_v4l2_cleanup(struct sun6i_csi_device *csi_dev) { - struct sun6i_csi_dev *sdev = (struct sun6i_csi_dev *)dev_id; - struct regmap *regmap = sdev->regmap; + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; + + media_device_unregister(&v4l2->media_dev); + v4l2_async_nf_unregister(&v4l2->notifier); + v4l2_async_nf_cleanup(&v4l2->notifier); + sun6i_video_cleanup(csi_dev); + v4l2_device_unregister(&v4l2->v4l2_dev); + media_device_cleanup(&v4l2->media_dev); +} + +/* Platform */ + +static irqreturn_t sun6i_csi_interrupt(int irq, void *private) +{ + struct sun6i_csi_device *csi_dev = private; + struct regmap *regmap = csi_dev->regmap; u32 status; regmap_read(regmap, CSI_CH_INT_STA_REG, &status); @@ -814,13 +789,63 @@ static irqreturn_t sun6i_csi_isr(int irq, void *dev_id) } if (status & CSI_CH_INT_STA_FD_PD) - sun6i_video_frame_done(&sdev->csi.video); + sun6i_video_frame_done(csi_dev); regmap_write(regmap, CSI_CH_INT_STA_REG, status); return IRQ_HANDLED; } +static int sun6i_csi_suspend(struct device *dev) +{ + struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); + + reset_control_assert(csi_dev->reset); + clk_disable_unprepare(csi_dev->clock_ram); + clk_disable_unprepare(csi_dev->clock_mod); + + return 0; +} + +static int sun6i_csi_resume(struct device *dev) +{ + struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); + int ret; + + ret = reset_control_deassert(csi_dev->reset); + if (ret) { + dev_err(dev, "failed to deassert reset\n"); + return ret; + } + + ret = clk_prepare_enable(csi_dev->clock_mod); + if (ret) { + dev_err(dev, "failed to enable module clock\n"); + goto error_reset; + } + + ret = clk_prepare_enable(csi_dev->clock_ram); + if (ret) { + dev_err(dev, "failed to enable ram clock\n"); + goto error_clock_mod; + } + + return 0; + +error_clock_mod: + clk_disable_unprepare(csi_dev->clock_mod); + +error_reset: + reset_control_assert(csi_dev->reset); + + return ret; +} + +static const struct dev_pm_ops sun6i_csi_pm_ops = { + .runtime_suspend = sun6i_csi_suspend, + .runtime_resume = sun6i_csi_resume, +}; + static const struct regmap_config sun6i_csi_regmap_config = { .reg_bits = 32, .reg_stride = 4, @@ -828,106 +853,181 @@ static const struct regmap_config sun6i_csi_regmap_config = { .max_register = 0x9c, }; -static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev, - struct platform_device *pdev) +static int sun6i_csi_resources_setup(struct sun6i_csi_device *csi_dev, + struct platform_device *platform_dev) { + struct device *dev = csi_dev->dev; + const struct sun6i_csi_variant *variant; void __iomem *io_base; int ret; int irq; - io_base = devm_platform_ioremap_resource(pdev, 0); + variant = of_device_get_match_data(dev); + if (!variant) + return -EINVAL; + + /* Registers */ + + io_base = devm_platform_ioremap_resource(platform_dev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); - sdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "bus", io_base, - &sun6i_csi_regmap_config); - if (IS_ERR(sdev->regmap)) { - dev_err(&pdev->dev, "Failed to init register map\n"); - return PTR_ERR(sdev->regmap); + csi_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base, + &sun6i_csi_regmap_config); + if (IS_ERR(csi_dev->regmap)) { + dev_err(dev, "failed to init register map\n"); + return PTR_ERR(csi_dev->regmap); } - sdev->clk_mod = devm_clk_get(&pdev->dev, "mod"); - if (IS_ERR(sdev->clk_mod)) { - dev_err(&pdev->dev, "Unable to acquire csi clock\n"); - return PTR_ERR(sdev->clk_mod); + /* Clocks */ + + csi_dev->clock_mod = devm_clk_get(dev, "mod"); + if (IS_ERR(csi_dev->clock_mod)) { + dev_err(dev, "failed to acquire module clock\n"); + return PTR_ERR(csi_dev->clock_mod); } - sdev->clk_ram = devm_clk_get(&pdev->dev, "ram"); - if (IS_ERR(sdev->clk_ram)) { - dev_err(&pdev->dev, "Unable to acquire dram-csi clock\n"); - return PTR_ERR(sdev->clk_ram); + csi_dev->clock_ram = devm_clk_get(dev, "ram"); + if (IS_ERR(csi_dev->clock_ram)) { + dev_err(dev, "failed to acquire ram clock\n"); + return PTR_ERR(csi_dev->clock_ram); } - sdev->rstc_bus = devm_reset_control_get_shared(&pdev->dev, NULL); - if (IS_ERR(sdev->rstc_bus)) { - dev_err(&pdev->dev, "Cannot get reset controller\n"); - return PTR_ERR(sdev->rstc_bus); + ret = clk_set_rate_exclusive(csi_dev->clock_mod, + variant->clock_mod_rate); + if (ret) { + dev_err(dev, "failed to set mod clock rate\n"); + return ret; + } + + /* Reset */ + + csi_dev->reset = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(csi_dev->reset)) { + dev_err(dev, "failed to acquire reset\n"); + ret = PTR_ERR(csi_dev->reset); + goto error_clock_rate_exclusive; } - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return -ENXIO; + /* Interrupt */ - ret = devm_request_irq(&pdev->dev, irq, sun6i_csi_isr, 0, MODULE_NAME, - sdev); + irq = platform_get_irq(platform_dev, 0); + if (irq < 0) { + dev_err(dev, "failed to get interrupt\n"); + ret = -ENXIO; + goto error_clock_rate_exclusive; + } + + ret = devm_request_irq(dev, irq, sun6i_csi_interrupt, 0, SUN6I_CSI_NAME, + csi_dev); if (ret) { - dev_err(&pdev->dev, "Cannot request csi IRQ\n"); - return ret; + dev_err(dev, "failed to request interrupt\n"); + goto error_clock_rate_exclusive; } + /* Runtime PM */ + + pm_runtime_enable(dev); + return 0; + +error_clock_rate_exclusive: + clk_rate_exclusive_put(csi_dev->clock_mod); + + return ret; +} + +static void sun6i_csi_resources_cleanup(struct sun6i_csi_device *csi_dev) +{ + pm_runtime_disable(csi_dev->dev); + clk_rate_exclusive_put(csi_dev->clock_mod); } -static int sun6i_csi_probe(struct platform_device *pdev) +static int sun6i_csi_probe(struct platform_device *platform_dev) { - struct sun6i_csi_dev *sdev; + struct sun6i_csi_device *csi_dev; + struct device *dev = &platform_dev->dev; int ret; - sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL); - if (!sdev) + csi_dev = devm_kzalloc(dev, sizeof(*csi_dev), GFP_KERNEL); + if (!csi_dev) return -ENOMEM; - sdev->dev = &pdev->dev; + csi_dev->dev = &platform_dev->dev; + platform_set_drvdata(platform_dev, csi_dev); - ret = sun6i_csi_resource_request(sdev, pdev); + ret = sun6i_csi_resources_setup(csi_dev, platform_dev); if (ret) return ret; - platform_set_drvdata(pdev, sdev); + ret = sun6i_csi_v4l2_setup(csi_dev); + if (ret) + goto error_resources; + + return 0; - sdev->csi.dev = &pdev->dev; - return sun6i_csi_v4l2_init(&sdev->csi); +error_resources: + sun6i_csi_resources_cleanup(csi_dev); + + return ret; } static int sun6i_csi_remove(struct platform_device *pdev) { - struct sun6i_csi_dev *sdev = platform_get_drvdata(pdev); + struct sun6i_csi_device *csi_dev = platform_get_drvdata(pdev); - sun6i_csi_v4l2_cleanup(&sdev->csi); + sun6i_csi_v4l2_cleanup(csi_dev); + sun6i_csi_resources_cleanup(csi_dev); return 0; } +static const struct sun6i_csi_variant sun6i_a31_csi_variant = { + .clock_mod_rate = 297000000, +}; + +static const struct sun6i_csi_variant sun50i_a64_csi_variant = { + .clock_mod_rate = 300000000, +}; + static const struct of_device_id sun6i_csi_of_match[] = { - { .compatible = "allwinner,sun6i-a31-csi", }, - { .compatible = "allwinner,sun8i-a83t-csi", }, - { .compatible = "allwinner,sun8i-h3-csi", }, - { .compatible = "allwinner,sun8i-v3s-csi", }, - { .compatible = "allwinner,sun50i-a64-csi", }, + { + .compatible = "allwinner,sun6i-a31-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun8i-a83t-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun8i-h3-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun8i-v3s-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun50i-a64-csi", + .data = &sun50i_a64_csi_variant, + }, {}, }; + MODULE_DEVICE_TABLE(of, sun6i_csi_of_match); static struct platform_driver sun6i_csi_platform_driver = { - .probe = sun6i_csi_probe, - .remove = sun6i_csi_remove, - .driver = { - .name = MODULE_NAME, - .of_match_table = of_match_ptr(sun6i_csi_of_match), + .probe = sun6i_csi_probe, + .remove = sun6i_csi_remove, + .driver = { + .name = SUN6I_CSI_NAME, + .of_match_table = of_match_ptr(sun6i_csi_of_match), + .pm = &sun6i_csi_pm_ops, }, }; + module_platform_driver(sun6i_csi_platform_driver); -MODULE_DESCRIPTION("Allwinner V3s Camera Sensor Interface driver"); +MODULE_DESCRIPTION("Allwinner A31 Camera Sensor Interface driver"); MODULE_AUTHOR("Yong Deng <yong.deng@magewell.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h index 3a38d107ae3f..bab705678280 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h @@ -8,13 +8,22 @@ #ifndef __SUN6I_CSI_H__ #define __SUN6I_CSI_H__ -#include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> +#include <media/videobuf2-v4l2.h> #include "sun6i_video.h" -struct sun6i_csi; +#define SUN6I_CSI_NAME "sun6i-csi" +#define SUN6I_CSI_DESCRIPTION "Allwinner A31 CSI Device" + +struct sun6i_csi_buffer { + struct vb2_v4l2_buffer v4l2_buffer; + struct list_head list; + + dma_addr_t dma_addr; + bool queued_to_csi; +}; /** * struct sun6i_csi_config - configs for sun6i csi @@ -32,59 +41,78 @@ struct sun6i_csi_config { u32 height; }; -struct sun6i_csi { - struct device *dev; - struct v4l2_ctrl_handler ctrl_handler; +struct sun6i_csi_v4l2 { struct v4l2_device v4l2_dev; struct media_device media_dev; struct v4l2_async_notifier notifier; - /* video port settings */ struct v4l2_fwnode_endpoint v4l2_ep; +}; - struct sun6i_csi_config config; +struct sun6i_csi_device { + struct device *dev; + struct sun6i_csi_config config; + struct sun6i_csi_v4l2 v4l2; struct sun6i_video video; + + struct regmap *regmap; + struct clk *clock_mod; + struct clk *clock_ram; + struct reset_control *reset; + + int planar_offset[3]; +}; + +struct sun6i_csi_variant { + unsigned long clock_mod_rate; }; /** * sun6i_csi_is_format_supported() - check if the format supported by csi - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @pixformat: v4l2 pixel format (V4L2_PIX_FMT_*) * @mbus_code: media bus format code (MEDIA_BUS_FMT_*) + * + * Return: true if format is supported, false otherwise. */ -bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat, - u32 mbus_code); +bool sun6i_csi_is_format_supported(struct sun6i_csi_device *csi_dev, + u32 pixformat, u32 mbus_code); /** * sun6i_csi_set_power() - power on/off the csi - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @enable: on/off + * + * Return: 0 if successful, error code otherwise. */ -int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable); +int sun6i_csi_set_power(struct sun6i_csi_device *csi_dev, bool enable); /** * sun6i_csi_update_config() - update the csi register settings - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @config: see struct sun6i_csi_config + * + * Return: 0 if successful, error code otherwise. */ -int sun6i_csi_update_config(struct sun6i_csi *csi, +int sun6i_csi_update_config(struct sun6i_csi_device *csi_dev, struct sun6i_csi_config *config); /** * sun6i_csi_update_buf_addr() - update the csi frame buffer address - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @addr: frame buffer's physical address */ -void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr); +void sun6i_csi_update_buf_addr(struct sun6i_csi_device *csi_dev, + dma_addr_t addr); /** * sun6i_csi_set_stream() - start/stop csi streaming - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @enable: start/stop */ -void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable); +void sun6i_csi_set_stream(struct sun6i_csi_device *csi_dev, bool enable); /* get bpp form v4l2 pixformat */ static inline int sun6i_csi_get_bpp(unsigned int pixformat) diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c index 74d64a20ba5b..791583d23a65 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c @@ -23,15 +23,27 @@ #define MAX_WIDTH (4800) #define MAX_HEIGHT (4800) -struct sun6i_csi_buffer { - struct vb2_v4l2_buffer vb; - struct list_head list; +/* Helpers */ - dma_addr_t dma_addr; - bool queued_to_csi; -}; +static struct v4l2_subdev * +sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad) +{ + struct media_pad *remote; + + remote = media_pad_remote_pad_first(&video->pad); + + if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) + return NULL; + + if (pad) + *pad = remote->index; -static const u32 supported_pixformats[] = { + return media_entity_to_v4l2_subdev(remote->entity); +} + +/* Format */ + +static const u32 sun6i_video_formats[] = { V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SGBRG8, V4L2_PIX_FMT_SGRBG8, @@ -61,119 +73,138 @@ static const u32 supported_pixformats[] = { V4L2_PIX_FMT_JPEG, }; -static bool is_pixformat_valid(unsigned int pixformat) +static bool sun6i_video_format_check(u32 format) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(supported_pixformats); i++) - if (supported_pixformats[i] == pixformat) + for (i = 0; i < ARRAY_SIZE(sun6i_video_formats); i++) + if (sun6i_video_formats[i] == format) return true; return false; } -static struct v4l2_subdev * -sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad) -{ - struct media_pad *remote; +/* Video */ - remote = media_pad_remote_pad_first(&video->pad); +static void sun6i_video_buffer_configure(struct sun6i_csi_device *csi_dev, + struct sun6i_csi_buffer *csi_buffer) +{ + csi_buffer->queued_to_csi = true; + sun6i_csi_update_buf_addr(csi_dev, csi_buffer->dma_addr); +} - if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) - return NULL; +static void sun6i_video_configure(struct sun6i_csi_device *csi_dev) +{ + struct sun6i_video *video = &csi_dev->video; + struct sun6i_csi_config config = { 0 }; - if (pad) - *pad = remote->index; + config.pixelformat = video->format.fmt.pix.pixelformat; + config.code = video->mbus_code; + config.field = video->format.fmt.pix.field; + config.width = video->format.fmt.pix.width; + config.height = video->format.fmt.pix.height; - return media_entity_to_v4l2_subdev(remote->entity); + sun6i_csi_update_config(csi_dev, &config); } -static int sun6i_video_queue_setup(struct vb2_queue *vq, - unsigned int *nbuffers, - unsigned int *nplanes, +/* Queue */ + +static int sun6i_video_queue_setup(struct vb2_queue *queue, + unsigned int *buffers_count, + unsigned int *planes_count, unsigned int sizes[], struct device *alloc_devs[]) { - struct sun6i_video *video = vb2_get_drv_priv(vq); - unsigned int size = video->fmt.fmt.pix.sizeimage; + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); + struct sun6i_video *video = &csi_dev->video; + unsigned int size = video->format.fmt.pix.sizeimage; - if (*nplanes) + if (*planes_count) return sizes[0] < size ? -EINVAL : 0; - *nplanes = 1; + *planes_count = 1; sizes[0] = size; return 0; } -static int sun6i_video_buffer_prepare(struct vb2_buffer *vb) +static int sun6i_video_buffer_prepare(struct vb2_buffer *buffer) { - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct sun6i_csi_buffer *buf = - container_of(vbuf, struct sun6i_csi_buffer, vb); - struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue); - unsigned long size = video->fmt.fmt.pix.sizeimage; - - if (vb2_plane_size(vb, 0) < size) { - v4l2_err(video->vdev.v4l2_dev, "buffer too small (%lu < %lu)\n", - vb2_plane_size(vb, 0), size); + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue); + struct sun6i_video *video = &csi_dev->video; + struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev; + struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer); + struct sun6i_csi_buffer *csi_buffer = + container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer); + unsigned long size = video->format.fmt.pix.sizeimage; + + if (vb2_plane_size(buffer, 0) < size) { + v4l2_err(v4l2_dev, "buffer too small (%lu < %lu)\n", + vb2_plane_size(buffer, 0), size); return -EINVAL; } - vb2_set_plane_payload(vb, 0, size); - - buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + vb2_set_plane_payload(buffer, 0, size); - vbuf->field = video->fmt.fmt.pix.field; + csi_buffer->dma_addr = vb2_dma_contig_plane_dma_addr(buffer, 0); + v4l2_buffer->field = video->format.fmt.pix.field; return 0; } -static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count) +static void sun6i_video_buffer_queue(struct vb2_buffer *buffer) +{ + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue); + struct sun6i_video *video = &csi_dev->video; + struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer); + struct sun6i_csi_buffer *csi_buffer = + container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer); + unsigned long flags; + + spin_lock_irqsave(&video->dma_queue_lock, flags); + csi_buffer->queued_to_csi = false; + list_add_tail(&csi_buffer->list, &video->dma_queue); + spin_unlock_irqrestore(&video->dma_queue_lock, flags); +} + +static int sun6i_video_start_streaming(struct vb2_queue *queue, + unsigned int count) { - struct sun6i_video *video = vb2_get_drv_priv(vq); + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); + struct sun6i_video *video = &csi_dev->video; + struct video_device *video_dev = &video->video_dev; struct sun6i_csi_buffer *buf; struct sun6i_csi_buffer *next_buf; - struct sun6i_csi_config config; struct v4l2_subdev *subdev; unsigned long flags; int ret; video->sequence = 0; - ret = media_pipeline_start(&video->vdev.entity, &video->vdev.pipe); + ret = video_device_pipeline_alloc_start(video_dev); if (ret < 0) - goto clear_dma_queue; + goto error_dma_queue_flush; if (video->mbus_code == 0) { ret = -EINVAL; - goto stop_media_pipeline; + goto error_media_pipeline; } subdev = sun6i_video_remote_subdev(video, NULL); if (!subdev) { ret = -EINVAL; - goto stop_media_pipeline; + goto error_media_pipeline; } - config.pixelformat = video->fmt.fmt.pix.pixelformat; - config.code = video->mbus_code; - config.field = video->fmt.fmt.pix.field; - config.width = video->fmt.fmt.pix.width; - config.height = video->fmt.fmt.pix.height; - - ret = sun6i_csi_update_config(video->csi, &config); - if (ret < 0) - goto stop_media_pipeline; + sun6i_video_configure(csi_dev); spin_lock_irqsave(&video->dma_queue_lock, flags); buf = list_first_entry(&video->dma_queue, struct sun6i_csi_buffer, list); - buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, buf->dma_addr); + sun6i_video_buffer_configure(csi_dev, buf); - sun6i_csi_set_stream(video->csi, true); + sun6i_csi_set_stream(csi_dev, true); /* * CSI will lookup the next dma buffer for next frame before the @@ -193,34 +224,37 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count) * would also drop frame when lacking of queued buffer. */ next_buf = list_next_entry(buf, list); - next_buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr); + sun6i_video_buffer_configure(csi_dev, next_buf); spin_unlock_irqrestore(&video->dma_queue_lock, flags); ret = v4l2_subdev_call(subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) - goto stop_csi_stream; + goto error_stream; return 0; -stop_csi_stream: - sun6i_csi_set_stream(video->csi, false); -stop_media_pipeline: - media_pipeline_stop(&video->vdev.entity); -clear_dma_queue: +error_stream: + sun6i_csi_set_stream(csi_dev, false); + +error_media_pipeline: + video_device_pipeline_stop(video_dev); + +error_dma_queue_flush: spin_lock_irqsave(&video->dma_queue_lock, flags); list_for_each_entry(buf, &video->dma_queue, list) - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); + vb2_buffer_done(&buf->v4l2_buffer.vb2_buf, + VB2_BUF_STATE_QUEUED); INIT_LIST_HEAD(&video->dma_queue); spin_unlock_irqrestore(&video->dma_queue_lock, flags); return ret; } -static void sun6i_video_stop_streaming(struct vb2_queue *vq) +static void sun6i_video_stop_streaming(struct vb2_queue *queue) { - struct sun6i_video *video = vb2_get_drv_priv(vq); + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); + struct sun6i_video *video = &csi_dev->video; struct v4l2_subdev *subdev; unsigned long flags; struct sun6i_csi_buffer *buf; @@ -229,45 +263,32 @@ static void sun6i_video_stop_streaming(struct vb2_queue *vq) if (subdev) v4l2_subdev_call(subdev, video, s_stream, 0); - sun6i_csi_set_stream(video->csi, false); + sun6i_csi_set_stream(csi_dev, false); - media_pipeline_stop(&video->vdev.entity); + video_device_pipeline_stop(&video->video_dev); /* Release all active buffers */ spin_lock_irqsave(&video->dma_queue_lock, flags); list_for_each_entry(buf, &video->dma_queue, list) - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->v4l2_buffer.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&video->dma_queue); spin_unlock_irqrestore(&video->dma_queue_lock, flags); } -static void sun6i_video_buffer_queue(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct sun6i_csi_buffer *buf = - container_of(vbuf, struct sun6i_csi_buffer, vb); - struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue); - unsigned long flags; - - spin_lock_irqsave(&video->dma_queue_lock, flags); - buf->queued_to_csi = false; - list_add_tail(&buf->list, &video->dma_queue); - spin_unlock_irqrestore(&video->dma_queue_lock, flags); -} - -void sun6i_video_frame_done(struct sun6i_video *video) +void sun6i_video_frame_done(struct sun6i_csi_device *csi_dev) { + struct sun6i_video *video = &csi_dev->video; struct sun6i_csi_buffer *buf; struct sun6i_csi_buffer *next_buf; - struct vb2_v4l2_buffer *vbuf; + struct vb2_v4l2_buffer *v4l2_buffer; spin_lock(&video->dma_queue_lock); buf = list_first_entry(&video->dma_queue, struct sun6i_csi_buffer, list); if (list_is_last(&buf->list, &video->dma_queue)) { - dev_dbg(video->csi->dev, "Frame dropped!\n"); - goto unlock; + dev_dbg(csi_dev->dev, "Frame dropped!\n"); + goto complete; } next_buf = list_next_entry(buf, list); @@ -277,200 +298,204 @@ void sun6i_video_frame_done(struct sun6i_video *video) * for next ISR call. */ if (!next_buf->queued_to_csi) { - next_buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr); - dev_dbg(video->csi->dev, "Frame dropped!\n"); - goto unlock; + sun6i_video_buffer_configure(csi_dev, next_buf); + dev_dbg(csi_dev->dev, "Frame dropped!\n"); + goto complete; } list_del(&buf->list); - vbuf = &buf->vb; - vbuf->vb2_buf.timestamp = ktime_get_ns(); - vbuf->sequence = video->sequence; - vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); + v4l2_buffer = &buf->v4l2_buffer; + v4l2_buffer->vb2_buf.timestamp = ktime_get_ns(); + v4l2_buffer->sequence = video->sequence; + vb2_buffer_done(&v4l2_buffer->vb2_buf, VB2_BUF_STATE_DONE); /* Prepare buffer for next frame but one. */ if (!list_is_last(&next_buf->list, &video->dma_queue)) { next_buf = list_next_entry(next_buf, list); - next_buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr); + sun6i_video_buffer_configure(csi_dev, next_buf); } else { - dev_dbg(video->csi->dev, "Next frame will be dropped!\n"); + dev_dbg(csi_dev->dev, "Next frame will be dropped!\n"); } -unlock: +complete: video->sequence++; spin_unlock(&video->dma_queue_lock); } -static const struct vb2_ops sun6i_csi_vb2_ops = { +static const struct vb2_ops sun6i_video_queue_ops = { .queue_setup = sun6i_video_queue_setup, - .wait_prepare = vb2_ops_wait_prepare, - .wait_finish = vb2_ops_wait_finish, .buf_prepare = sun6i_video_buffer_prepare, + .buf_queue = sun6i_video_buffer_queue, .start_streaming = sun6i_video_start_streaming, .stop_streaming = sun6i_video_stop_streaming, - .buf_queue = sun6i_video_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, }; -static int vidioc_querycap(struct file *file, void *priv, - struct v4l2_capability *cap) +/* V4L2 Device */ + +static int sun6i_video_querycap(struct file *file, void *private, + struct v4l2_capability *capability) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct video_device *video_dev = &csi_dev->video.video_dev; - strscpy(cap->driver, "sun6i-video", sizeof(cap->driver)); - strscpy(cap->card, video->vdev.name, sizeof(cap->card)); - snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", - video->csi->dev->of_node->name); + strscpy(capability->driver, SUN6I_CSI_NAME, sizeof(capability->driver)); + strscpy(capability->card, video_dev->name, sizeof(capability->card)); + snprintf(capability->bus_info, sizeof(capability->bus_info), + "platform:%s", dev_name(csi_dev->dev)); return 0; } -static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_fmtdesc *f) +static int sun6i_video_enum_fmt(struct file *file, void *private, + struct v4l2_fmtdesc *fmtdesc) { - u32 index = f->index; + u32 index = fmtdesc->index; - if (index >= ARRAY_SIZE(supported_pixformats)) + if (index >= ARRAY_SIZE(sun6i_video_formats)) return -EINVAL; - f->pixelformat = supported_pixformats[index]; + fmtdesc->pixelformat = sun6i_video_formats[index]; return 0; } -static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *fmt) +static int sun6i_video_g_fmt(struct file *file, void *private, + struct v4l2_format *format) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; - *fmt = video->fmt; + *format = video->format; return 0; } -static int sun6i_video_try_fmt(struct sun6i_video *video, - struct v4l2_format *f) +static int sun6i_video_format_try(struct sun6i_video *video, + struct v4l2_format *format) { - struct v4l2_pix_format *pixfmt = &f->fmt.pix; + struct v4l2_pix_format *pix_format = &format->fmt.pix; int bpp; - if (!is_pixformat_valid(pixfmt->pixelformat)) - pixfmt->pixelformat = supported_pixformats[0]; + if (!sun6i_video_format_check(pix_format->pixelformat)) + pix_format->pixelformat = sun6i_video_formats[0]; - v4l_bound_align_image(&pixfmt->width, MIN_WIDTH, MAX_WIDTH, 1, - &pixfmt->height, MIN_HEIGHT, MAX_WIDTH, 1, 1); + v4l_bound_align_image(&pix_format->width, MIN_WIDTH, MAX_WIDTH, 1, + &pix_format->height, MIN_HEIGHT, MAX_WIDTH, 1, 1); - bpp = sun6i_csi_get_bpp(pixfmt->pixelformat); - pixfmt->bytesperline = (pixfmt->width * bpp) >> 3; - pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; + bpp = sun6i_csi_get_bpp(pix_format->pixelformat); + pix_format->bytesperline = (pix_format->width * bpp) >> 3; + pix_format->sizeimage = pix_format->bytesperline * pix_format->height; - if (pixfmt->field == V4L2_FIELD_ANY) - pixfmt->field = V4L2_FIELD_NONE; + if (pix_format->field == V4L2_FIELD_ANY) + pix_format->field = V4L2_FIELD_NONE; - if (pixfmt->pixelformat == V4L2_PIX_FMT_JPEG) - pixfmt->colorspace = V4L2_COLORSPACE_JPEG; + if (pix_format->pixelformat == V4L2_PIX_FMT_JPEG) + pix_format->colorspace = V4L2_COLORSPACE_JPEG; else - pixfmt->colorspace = V4L2_COLORSPACE_SRGB; + pix_format->colorspace = V4L2_COLORSPACE_SRGB; - pixfmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; - pixfmt->quantization = V4L2_QUANTIZATION_DEFAULT; - pixfmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; + pix_format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + pix_format->quantization = V4L2_QUANTIZATION_DEFAULT; + pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } -static int sun6i_video_set_fmt(struct sun6i_video *video, struct v4l2_format *f) +static int sun6i_video_format_set(struct sun6i_video *video, + struct v4l2_format *format) { int ret; - ret = sun6i_video_try_fmt(video, f); + ret = sun6i_video_format_try(video, format); if (ret) return ret; - video->fmt = *f; + video->format = *format; return 0; } -static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int sun6i_video_s_fmt(struct file *file, void *private, + struct v4l2_format *format) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; - if (vb2_is_busy(&video->vb2_vidq)) + if (vb2_is_busy(&video->queue)) return -EBUSY; - return sun6i_video_set_fmt(video, f); + return sun6i_video_format_set(video, format); } -static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int sun6i_video_try_fmt(struct file *file, void *private, + struct v4l2_format *format) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; - return sun6i_video_try_fmt(video, f); + return sun6i_video_format_try(video, format); } -static int vidioc_enum_input(struct file *file, void *fh, - struct v4l2_input *inp) +static int sun6i_video_enum_input(struct file *file, void *private, + struct v4l2_input *input) { - if (inp->index != 0) + if (input->index != 0) return -EINVAL; - strscpy(inp->name, "camera", sizeof(inp->name)); - inp->type = V4L2_INPUT_TYPE_CAMERA; + input->type = V4L2_INPUT_TYPE_CAMERA; + strscpy(input->name, "Camera", sizeof(input->name)); return 0; } -static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) +static int sun6i_video_g_input(struct file *file, void *private, + unsigned int *index) { - *i = 0; + *index = 0; return 0; } -static int vidioc_s_input(struct file *file, void *fh, unsigned int i) +static int sun6i_video_s_input(struct file *file, void *private, + unsigned int index) { - if (i != 0) + if (index != 0) return -EINVAL; return 0; } static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = { - .vidioc_querycap = vidioc_querycap, - .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, - .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, - .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, + .vidioc_querycap = sun6i_video_querycap, + + .vidioc_enum_fmt_vid_cap = sun6i_video_enum_fmt, + .vidioc_g_fmt_vid_cap = sun6i_video_g_fmt, + .vidioc_s_fmt_vid_cap = sun6i_video_s_fmt, + .vidioc_try_fmt_vid_cap = sun6i_video_try_fmt, - .vidioc_enum_input = vidioc_enum_input, - .vidioc_s_input = vidioc_s_input, - .vidioc_g_input = vidioc_g_input, + .vidioc_enum_input = sun6i_video_enum_input, + .vidioc_g_input = sun6i_video_g_input, + .vidioc_s_input = sun6i_video_s_input, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, - .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, - .vidioc_create_bufs = vb2_ioctl_create_bufs, - .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, - - .vidioc_log_status = v4l2_ctrl_log_status, - .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, - .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -/* ----------------------------------------------------------------------------- - * V4L2 file operations - */ +/* V4L2 File */ + static int sun6i_video_open(struct file *file) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; int ret = 0; if (mutex_lock_interruptible(&video->lock)) @@ -478,45 +503,48 @@ static int sun6i_video_open(struct file *file) ret = v4l2_fh_open(file); if (ret < 0) - goto unlock; + goto error_lock; - ret = v4l2_pipeline_pm_get(&video->vdev.entity); + ret = v4l2_pipeline_pm_get(&video->video_dev.entity); if (ret < 0) - goto fh_release; - - /* check if already powered */ - if (!v4l2_fh_is_singular_file(file)) - goto unlock; + goto error_v4l2_fh; - ret = sun6i_csi_set_power(video->csi, true); - if (ret < 0) - goto fh_release; + /* Power on at first open. */ + if (v4l2_fh_is_singular_file(file)) { + ret = sun6i_csi_set_power(csi_dev, true); + if (ret < 0) + goto error_v4l2_fh; + } mutex_unlock(&video->lock); + return 0; -fh_release: +error_v4l2_fh: v4l2_fh_release(file); -unlock: + +error_lock: mutex_unlock(&video->lock); + return ret; } static int sun6i_video_close(struct file *file) { - struct sun6i_video *video = video_drvdata(file); - bool last_fh; + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; + bool last_close; mutex_lock(&video->lock); - last_fh = v4l2_fh_is_singular_file(file); + last_close = v4l2_fh_is_singular_file(file); _vb2_fop_release(file, NULL); + v4l2_pipeline_pm_put(&video->video_dev.entity); - v4l2_pipeline_pm_put(&video->vdev.entity); - - if (last_fh) - sun6i_csi_set_power(video->csi, false); + /* Power off at last close. */ + if (last_close) + sun6i_csi_set_power(csi_dev, false); mutex_unlock(&video->lock); @@ -532,9 +560,8 @@ static const struct v4l2_file_operations sun6i_video_fops = { .poll = vb2_fop_poll }; -/* ----------------------------------------------------------------------------- - * Media Operations - */ +/* Media Entity */ + static int sun6i_video_link_validate_get_format(struct media_pad *pad, struct v4l2_subdev_format *fmt) { @@ -554,15 +581,16 @@ static int sun6i_video_link_validate(struct media_link *link) { struct video_device *vdev = container_of(link->sink->entity, struct video_device, entity); - struct sun6i_video *video = video_get_drvdata(vdev); + struct sun6i_csi_device *csi_dev = video_get_drvdata(vdev); + struct sun6i_video *video = &csi_dev->video; struct v4l2_subdev_format source_fmt; int ret; video->mbus_code = 0; if (!media_pad_remote_pad_first(link->sink->entity->pads)) { - dev_info(video->csi->dev, - "video node %s pad not connected\n", vdev->name); + dev_info(csi_dev->dev, "video node %s pad not connected\n", + vdev->name); return -ENOLINK; } @@ -570,21 +598,21 @@ static int sun6i_video_link_validate(struct media_link *link) if (ret < 0) return ret; - if (!sun6i_csi_is_format_supported(video->csi, - video->fmt.fmt.pix.pixelformat, + if (!sun6i_csi_is_format_supported(csi_dev, + video->format.fmt.pix.pixelformat, source_fmt.format.code)) { - dev_err(video->csi->dev, + dev_err(csi_dev->dev, "Unsupported pixformat: 0x%x with mbus code: 0x%x!\n", - video->fmt.fmt.pix.pixelformat, + video->format.fmt.pix.pixelformat, source_fmt.format.code); return -EPIPE; } - if (source_fmt.format.width != video->fmt.fmt.pix.width || - source_fmt.format.height != video->fmt.fmt.pix.height) { - dev_err(video->csi->dev, + if (source_fmt.format.width != video->format.fmt.pix.width || + source_fmt.format.height != video->format.fmt.pix.height) { + dev_err(csi_dev->dev, "Wrong width or height %ux%u (%ux%u expected)\n", - video->fmt.fmt.pix.width, video->fmt.fmt.pix.height, + video->format.fmt.pix.width, video->format.fmt.pix.height, source_fmt.format.width, source_fmt.format.height); return -EPIPE; } @@ -598,88 +626,108 @@ static const struct media_entity_operations sun6i_video_media_ops = { .link_validate = sun6i_video_link_validate }; -int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi, - const char *name) +/* Video */ + +int sun6i_video_setup(struct sun6i_csi_device *csi_dev) { - struct video_device *vdev = &video->vdev; - struct vb2_queue *vidq = &video->vb2_vidq; - struct v4l2_format fmt = { 0 }; + struct sun6i_video *video = &csi_dev->video; + struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev; + struct video_device *video_dev = &video->video_dev; + struct vb2_queue *queue = &video->queue; + struct media_pad *pad = &video->pad; + struct v4l2_format format = { 0 }; + struct v4l2_pix_format *pix_format = &format.fmt.pix; int ret; - video->csi = csi; + /* Media Entity */ - /* Initialize the media entity... */ - video->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; - vdev->entity.ops = &sun6i_video_media_ops; - ret = media_entity_pads_init(&vdev->entity, 1, &video->pad); + video_dev->entity.ops = &sun6i_video_media_ops; + + /* Media Pad */ + + pad->flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; + + ret = media_entity_pads_init(&video_dev->entity, 1, pad); if (ret < 0) return ret; - mutex_init(&video->lock); + /* DMA queue */ INIT_LIST_HEAD(&video->dma_queue); spin_lock_init(&video->dma_queue_lock); video->sequence = 0; - /* Setup default format */ - fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - fmt.fmt.pix.pixelformat = supported_pixformats[0]; - fmt.fmt.pix.width = 1280; - fmt.fmt.pix.height = 720; - fmt.fmt.pix.field = V4L2_FIELD_NONE; - sun6i_video_set_fmt(video, &fmt); - - /* Initialize videobuf2 queue */ - vidq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - vidq->io_modes = VB2_MMAP | VB2_DMABUF; - vidq->drv_priv = video; - vidq->buf_struct_size = sizeof(struct sun6i_csi_buffer); - vidq->ops = &sun6i_csi_vb2_ops; - vidq->mem_ops = &vb2_dma_contig_memops; - vidq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - vidq->lock = &video->lock; - /* Make sure non-dropped frame */ - vidq->min_buffers_needed = 3; - vidq->dev = csi->dev; - - ret = vb2_queue_init(vidq); + /* Queue */ + + mutex_init(&video->lock); + + queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + queue->io_modes = VB2_MMAP | VB2_DMABUF; + queue->buf_struct_size = sizeof(struct sun6i_csi_buffer); + queue->ops = &sun6i_video_queue_ops; + queue->mem_ops = &vb2_dma_contig_memops; + queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + queue->lock = &video->lock; + queue->dev = csi_dev->dev; + queue->drv_priv = csi_dev; + + /* Make sure non-dropped frame. */ + queue->min_buffers_needed = 3; + + ret = vb2_queue_init(queue); if (ret) { - v4l2_err(&csi->v4l2_dev, "vb2_queue_init failed: %d\n", ret); - goto clean_entity; + v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret); + goto error_media_entity; } - /* Register video device */ - strscpy(vdev->name, name, sizeof(vdev->name)); - vdev->release = video_device_release_empty; - vdev->fops = &sun6i_video_fops; - vdev->ioctl_ops = &sun6i_video_ioctl_ops; - vdev->vfl_type = VFL_TYPE_VIDEO; - vdev->vfl_dir = VFL_DIR_RX; - vdev->v4l2_dev = &csi->v4l2_dev; - vdev->queue = vidq; - vdev->lock = &video->lock; - vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; - video_set_drvdata(vdev, video); - - ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); + /* V4L2 Format */ + + format.type = queue->type; + pix_format->pixelformat = sun6i_video_formats[0]; + pix_format->width = 1280; + pix_format->height = 720; + pix_format->field = V4L2_FIELD_NONE; + + sun6i_video_format_set(video, &format); + + /* Video Device */ + + strscpy(video_dev->name, SUN6I_CSI_NAME, sizeof(video_dev->name)); + video_dev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + video_dev->vfl_dir = VFL_DIR_RX; + video_dev->release = video_device_release_empty; + video_dev->fops = &sun6i_video_fops; + video_dev->ioctl_ops = &sun6i_video_ioctl_ops; + video_dev->v4l2_dev = v4l2_dev; + video_dev->queue = queue; + video_dev->lock = &video->lock; + + video_set_drvdata(video_dev, csi_dev); + + ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1); if (ret < 0) { - v4l2_err(&csi->v4l2_dev, - "video_register_device failed: %d\n", ret); - goto clean_entity; + v4l2_err(v4l2_dev, "failed to register video device: %d\n", + ret); + goto error_media_entity; } return 0; -clean_entity: - media_entity_cleanup(&video->vdev.entity); +error_media_entity: + media_entity_cleanup(&video_dev->entity); + mutex_destroy(&video->lock); + return ret; } -void sun6i_video_cleanup(struct sun6i_video *video) +void sun6i_video_cleanup(struct sun6i_csi_device *csi_dev) { - vb2_video_unregister_device(&video->vdev); - media_entity_cleanup(&video->vdev.entity); + struct sun6i_video *video = &csi_dev->video; + struct video_device *video_dev = &video->video_dev; + + vb2_video_unregister_device(video_dev); + media_entity_cleanup(&video_dev->entity); mutex_destroy(&video->lock); } diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h index b9cd919c24ac..a917d2da6deb 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h @@ -11,28 +11,25 @@ #include <media/v4l2-dev.h> #include <media/videobuf2-core.h> -struct sun6i_csi; +struct sun6i_csi_device; struct sun6i_video { - struct video_device vdev; + struct video_device video_dev; + struct vb2_queue queue; + struct mutex lock; /* Queue lock. */ struct media_pad pad; - struct sun6i_csi *csi; - struct mutex lock; - - struct vb2_queue vb2_vidq; - spinlock_t dma_queue_lock; struct list_head dma_queue; + spinlock_t dma_queue_lock; /* DMA queue lock. */ - unsigned int sequence; - struct v4l2_format fmt; + struct v4l2_format format; u32 mbus_code; + unsigned int sequence; }; -int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi, - const char *name); -void sun6i_video_cleanup(struct sun6i_video *video); +int sun6i_video_setup(struct sun6i_csi_device *csi_dev); +void sun6i_video_cleanup(struct sun6i_csi_device *csi_dev); -void sun6i_video_frame_done(struct sun6i_video *video); +void sun6i_video_frame_done(struct sun6i_csi_device *csi_dev); #endif /* __SUN6I_VIDEO_H__ */ diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig index eb982466abd3..08852f63692b 100644 --- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig +++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig @@ -3,11 +3,11 @@ config VIDEO_SUN6I_MIPI_CSI2 tristate "Allwinner A31 MIPI CSI-2 Controller Driver" depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on PM && COMMON_CLK + depends on PM && COMMON_CLK && RESET_CONTROLLER + depends on PHY_SUN6I_MIPI_DPHY select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE - select PHY_SUN6I_MIPI_DPHY select GENERIC_PHY_MIPI_DPHY select REGMAP_MMIO help diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c index a4e3f9a6b2ff..30d6c0c5161f 100644 --- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c +++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c @@ -661,7 +661,8 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, csi2_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi2_dev->reset)) { dev_err(dev, "failed to get reset controller\n"); - return PTR_ERR(csi2_dev->reset); + ret = PTR_ERR(csi2_dev->reset); + goto error_clock_rate_exclusive; } /* D-PHY */ @@ -669,13 +670,14 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, csi2_dev->dphy = devm_phy_get(dev, "dphy"); if (IS_ERR(csi2_dev->dphy)) { dev_err(dev, "failed to get MIPI D-PHY\n"); - return PTR_ERR(csi2_dev->dphy); + ret = PTR_ERR(csi2_dev->dphy); + goto error_clock_rate_exclusive; } ret = phy_init(csi2_dev->dphy); if (ret) { dev_err(dev, "failed to initialize MIPI D-PHY\n"); - return ret; + goto error_clock_rate_exclusive; } /* Runtime PM */ @@ -683,6 +685,11 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, pm_runtime_enable(dev); return 0; + +error_clock_rate_exclusive: + clk_rate_exclusive_put(csi2_dev->clock_mod); + + return ret; } static void @@ -712,9 +719,14 @@ static int sun6i_mipi_csi2_probe(struct platform_device *platform_dev) ret = sun6i_mipi_csi2_bridge_setup(csi2_dev); if (ret) - return ret; + goto error_resources; return 0; + +error_resources: + sun6i_mipi_csi2_resources_cleanup(csi2_dev); + + return ret; } static int sun6i_mipi_csi2_remove(struct platform_device *platform_dev) diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig index 789d58ee12ea..47a8c0fb7eb9 100644 --- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig @@ -3,7 +3,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2 tristate "Allwinner A83T MIPI CSI-2 Controller and D-PHY Driver" depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on PM && COMMON_CLK + depends on PM && COMMON_CLK && RESET_CONTROLLER select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c index d052ee77ef0a..b032ec13a683 100644 --- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c +++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c @@ -719,13 +719,15 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de csi2_dev->clock_mipi = devm_clk_get(dev, "mipi"); if (IS_ERR(csi2_dev->clock_mipi)) { dev_err(dev, "failed to acquire mipi clock\n"); - return PTR_ERR(csi2_dev->clock_mipi); + ret = PTR_ERR(csi2_dev->clock_mipi); + goto error_clock_rate_exclusive; } csi2_dev->clock_misc = devm_clk_get(dev, "misc"); if (IS_ERR(csi2_dev->clock_misc)) { dev_err(dev, "failed to acquire misc clock\n"); - return PTR_ERR(csi2_dev->clock_misc); + ret = PTR_ERR(csi2_dev->clock_misc); + goto error_clock_rate_exclusive; } /* Reset */ @@ -733,7 +735,8 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de csi2_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi2_dev->reset)) { dev_err(dev, "failed to get reset controller\n"); - return PTR_ERR(csi2_dev->reset); + ret = PTR_ERR(csi2_dev->reset); + goto error_clock_rate_exclusive; } /* D-PHY */ @@ -741,7 +744,7 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de ret = sun8i_a83t_dphy_register(csi2_dev); if (ret) { dev_err(dev, "failed to initialize MIPI D-PHY\n"); - return ret; + goto error_clock_rate_exclusive; } /* Runtime PM */ @@ -749,6 +752,11 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de pm_runtime_enable(dev); return 0; + +error_clock_rate_exclusive: + clk_rate_exclusive_put(csi2_dev->clock_mod); + + return ret; } static void @@ -778,9 +786,14 @@ static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev) ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev); if (ret) - return ret; + goto error_resources; return 0; + +error_resources: + sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev); + + return ret; } static int sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev) diff --git a/drivers/media/platform/sunxi/sun8i-di/Kconfig b/drivers/media/platform/sunxi/sun8i-di/Kconfig index ff71e06ee2df..f688396913b7 100644 --- a/drivers/media/platform/sunxi/sun8i-di/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-di/Kconfig @@ -4,7 +4,7 @@ config VIDEO_SUN8I_DEINTERLACE depends on V4L_MEM2MEM_DRIVERS depends on VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on COMMON_CLK && OF + depends on COMMON_CLK && RESET_CONTROLLER && OF depends on PM select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/sunxi/sun8i-rotate/Kconfig b/drivers/media/platform/sunxi/sun8i-rotate/Kconfig index cfba29072d75..ee2c1f248c64 100644 --- a/drivers/media/platform/sunxi/sun8i-rotate/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-rotate/Kconfig @@ -5,7 +5,7 @@ config VIDEO_SUN8I_ROTATE depends on V4L_MEM2MEM_DRIVERS depends on VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on COMMON_CLK && OF + depends on COMMON_CLK && RESET_CONTROLLER && OF depends on PM select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c index 21e3d0aabf70..4eade409d5d3 100644 --- a/drivers/media/platform/ti/cal/cal-video.c +++ b/drivers/media/platform/ti/cal/cal-video.c @@ -708,7 +708,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) dma_addr_t addr; int ret; - ret = media_pipeline_start(&ctx->vdev.entity, &ctx->phy->pipe); + ret = video_device_pipeline_alloc_start(&ctx->vdev); if (ret < 0) { ctx_err(ctx, "Failed to start media pipeline: %d\n", ret); goto error_release_buffers; @@ -761,7 +761,7 @@ error_stop: cal_ctx_unprepare(ctx); error_pipeline: - media_pipeline_stop(&ctx->vdev.entity); + video_device_pipeline_stop(&ctx->vdev); error_release_buffers: cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED); @@ -782,7 +782,7 @@ static void cal_stop_streaming(struct vb2_queue *vq) cal_release_buffers(ctx, VB2_BUF_STATE_ERROR); - media_pipeline_stop(&ctx->vdev.entity); + video_device_pipeline_stop(&ctx->vdev); } static const struct vb2_ops cal_video_qops = { diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h index 80f2c9c73c71..de73d6d21b6f 100644 --- a/drivers/media/platform/ti/cal/cal.h +++ b/drivers/media/platform/ti/cal/cal.h @@ -174,7 +174,6 @@ struct cal_camerarx { struct device_node *source_ep_node; struct device_node *source_node; struct v4l2_subdev *source; - struct media_pipeline pipe; struct v4l2_subdev subdev; struct media_pad pads[CAL_CAMERARX_NUM_PADS]; diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c index a6052df9bb19..24d2383400b0 100644 --- a/drivers/media/platform/ti/omap3isp/isp.c +++ b/drivers/media/platform/ti/omap3isp/isp.c @@ -937,10 +937,8 @@ static int isp_pipeline_is_last(struct media_entity *me) struct isp_pipeline *pipe; struct media_pad *pad; - if (!me->pipe) - return 0; pipe = to_isp_pipeline(me); - if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED) + if (!pipe || pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED) return 0; pad = media_pad_remote_pad_first(&pipe->output->pad); return pad->entity == me; diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c index cc9a97d5d505..3e5348c63773 100644 --- a/drivers/media/platform/ti/omap3isp/ispvideo.c +++ b/drivers/media/platform/ti/omap3isp/ispvideo.c @@ -1093,8 +1093,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) /* Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ - pipe = video->video.entity.pipe - ? to_isp_pipeline(&video->video.entity) : &video->pipe; + pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe; ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); if (ret) @@ -1104,7 +1103,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); pipe->max_rate = pipe->l3_ick; - ret = media_pipeline_start(&video->video.entity, &pipe->pipe); + ret = video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) goto err_pipeline_start; @@ -1161,7 +1160,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return 0; err_check_format: - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); err_pipeline_start: /* TODO: Implement PM QoS */ /* The DMA queue must be emptied here, otherwise CCDC interrupts that @@ -1228,7 +1227,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) video->error = false; /* TODO: Implement PM QoS */ - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); media_entity_enum_cleanup(&pipe->ent_enum); diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.h b/drivers/media/platform/ti/omap3isp/ispvideo.h index a0908670c0cf..1d23df576e6b 100644 --- a/drivers/media/platform/ti/omap3isp/ispvideo.h +++ b/drivers/media/platform/ti/omap3isp/ispvideo.h @@ -99,8 +99,15 @@ struct isp_pipeline { unsigned int external_width; }; -#define to_isp_pipeline(__e) \ - container_of((__e)->pipe, struct isp_pipeline, pipe) +static inline struct isp_pipeline *to_isp_pipeline(struct media_entity *entity) +{ + struct media_pipeline *pipe = media_entity_pipeline(entity); + + if (!pipe) + return NULL; + + return container_of(pipe, struct isp_pipeline, pipe); +} static inline int isp_pipeline_ready(struct isp_pipeline *pipe) { diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c index 2036f72eeb4a..8cb4a68c9119 100644 --- a/drivers/media/platform/verisilicon/hantro_drv.c +++ b/drivers/media/platform/verisilicon/hantro_drv.c @@ -251,6 +251,11 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) static int hantro_try_ctrl(struct v4l2_ctrl *ctrl) { + struct hantro_ctx *ctx; + + ctx = container_of(ctrl->handler, + struct hantro_ctx, ctrl_handler); + if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) { const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps; @@ -266,12 +271,11 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl) } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) { const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps; - if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8) - /* Luma and chroma bit depth mismatch */ - return -EINVAL; - if (sps->bit_depth_luma_minus8 != 0) - /* Only 8-bit is supported */ + if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2) + /* Only 8-bit and 10-bit are supported */ return -EINVAL; + + ctx->bit_depth = sps->bit_depth_luma_minus8 + 8; } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) { const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame; diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index 233ecd863d5f..a9d4ac84a8d8 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -12,7 +12,7 @@ static size_t hantro_hevc_chroma_offset(struct hantro_ctx *ctx) { - return ctx->dst_fmt.width * ctx->dst_fmt.height; + return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8; } static size_t hantro_hevc_motion_vectors_offset(struct hantro_ctx *ctx) @@ -167,8 +167,6 @@ static void set_params(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_bit_depth_y_minus8, sps->bit_depth_luma_minus8); hantro_reg_write(vpu, &g2_bit_depth_c_minus8, sps->bit_depth_chroma_minus8); - hantro_reg_write(vpu, &g2_output_8_bits, 0); - hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx)); min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3; diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c index b990bc98164c..9383fb7081f6 100644 --- a/drivers/media/platform/verisilicon/hantro_hevc.c +++ b/drivers/media/platform/verisilicon/hantro_hevc.c @@ -104,7 +104,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx) hevc_dec->tile_bsd.cpu = NULL; } - size = VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1); + size = (VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8; hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size, &hevc_dec->tile_filter.dma, GFP_KERNEL); @@ -112,7 +112,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx) goto err_free_tile_buffers; hevc_dec->tile_filter.size = size; - size = VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1); + size = (VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8; hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size, &hevc_dec->tile_sao.dma, GFP_KERNEL); diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c index a0928c508434..09d8cf942689 100644 --- a/drivers/media/platform/verisilicon/hantro_postproc.c +++ b/drivers/media/platform/verisilicon/hantro_postproc.c @@ -114,6 +114,7 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) struct hantro_dev *vpu = ctx->dev; struct vb2_v4l2_buffer *dst_buf; int down_scale = down_scale_factor(ctx); + int out_depth; size_t chroma_offset; dma_addr_t dst_dma; @@ -132,8 +133,9 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma); hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset); } + + out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat); if (ctx->dev->variant->legacy_regs) { - int out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat); u8 pp_shift = 0; if (out_depth > 8) @@ -141,6 +143,9 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth); hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift); + } else { + hantro_reg_write(vpu, &g2_output_8_bits, out_depth > 8 ? 0 : 1); + hantro_reg_write(vpu, &g2_output_format, out_depth > 8 ? 1 : 0); } hantro_reg_write(vpu, &g2_out_rs_e, 1); } diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c index 77f574fdfa77..b390228fd3b4 100644 --- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c +++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c @@ -162,12 +162,39 @@ static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = { .step_height = MB_DIM, }, }, + { + .fourcc = V4L2_PIX_FMT_P010, + .codec_mode = HANTRO_MODE_NONE, + .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = MB_DIM, + }, + }, }; static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12_4L4, .codec_mode = HANTRO_MODE_NONE, + .match_depth = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = TILE_MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = TILE_MB_DIM, + }, + }, + { + .fourcc = V4L2_PIX_FMT_P010_4L4, + .codec_mode = HANTRO_MODE_NONE, + .match_depth = true, .frmsize = { .min_width = FMT_MIN_WIDTH, .max_width = FMT_UHD_WIDTH, diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c index 2d1ef7a25c33..0a7fd8642a65 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.c +++ b/drivers/media/platform/xilinx/xilinx-dma.c @@ -402,10 +402,9 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) * Use the pipeline object embedded in the first DMA object that starts * streaming. */ - pipe = dma->video.entity.pipe - ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe; + pipe = to_xvip_pipeline(&dma->video) ? : &dma->pipe; - ret = media_pipeline_start(&dma->video.entity, &pipe->pipe); + ret = video_device_pipeline_start(&dma->video, &pipe->pipe); if (ret < 0) goto error; @@ -431,7 +430,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) return 0; error_stop: - media_pipeline_stop(&dma->video.entity); + video_device_pipeline_stop(&dma->video); error: /* Give back all queued buffers to videobuf2. */ @@ -448,7 +447,7 @@ error: static void xvip_dma_stop_streaming(struct vb2_queue *vq) { struct xvip_dma *dma = vb2_get_drv_priv(vq); - struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity); + struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video); struct xvip_dma_buffer *buf, *nbuf; /* Stop the pipeline. */ @@ -459,7 +458,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq) /* Cleanup the pipeline and mark it as being stopped. */ xvip_pipeline_cleanup(pipe); - media_pipeline_stop(&dma->video.entity); + video_device_pipeline_stop(&dma->video); /* Give back all queued buffers to videobuf2. */ spin_lock_irq(&dma->queued_lock); diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h index 2378bdae57ae..9c6d4c18d1a9 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.h +++ b/drivers/media/platform/xilinx/xilinx-dma.h @@ -45,9 +45,14 @@ struct xvip_pipeline { struct xvip_dma *output; }; -static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e) +static inline struct xvip_pipeline *to_xvip_pipeline(struct video_device *vdev) { - return container_of(e->pipe, struct xvip_pipeline, pipe); + struct media_pipeline *pipe = video_device_pipeline(vdev); + + if (!pipe) + return NULL; + + return container_of(pipe, struct xvip_pipeline, pipe); } /** diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 0bf99e1cd1d8..171f9cc9ee5e 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -1072,7 +1072,6 @@ done: static int si476x_radio_fops_release(struct file *file) { - int err; struct si476x_radio *radio = video_drvdata(file); if (v4l2_fh_is_singular_file(file) && @@ -1080,9 +1079,7 @@ static int si476x_radio_fops_release(struct file *file) si476x_core_set_power_state(radio->core, SI476X_POWER_DOWN); - err = v4l2_fh_release(file); - - return err; + return v4l2_fh_release(file); } static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf, diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c index 2aec642133a1..93d847c294e8 100644 --- a/drivers/media/radio/si4713/si4713.c +++ b/drivers/media/radio/si4713/si4713.c @@ -14,7 +14,7 @@ #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/slab.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 735b925da998..5edfd8a9e849 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -684,7 +684,6 @@ static int send_packet(struct imon_context *ictx) */ static int send_associate_24g(struct imon_context *ictx) { - int retval; const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20 }; @@ -699,9 +698,8 @@ static int send_associate_24g(struct imon_context *ictx) } memcpy(ictx->usb_tx_buf, packet, sizeof(packet)); - retval = send_packet(ictx); - return retval; + return send_packet(ictx); } /* diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 39d2b03e2631..c76ba24c1f55 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) struct mceusb_dev *ir = dev->priv; unsigned int units; - units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT); + units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT); cmdbuf[2] = units >> 8; cmdbuf[3] = units; diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c index 6c437802f91f..aa944270e716 100644 --- a/drivers/media/test-drivers/vimc/vimc-capture.c +++ b/drivers/media/test-drivers/vimc/vimc-capture.c @@ -241,13 +241,12 @@ static void vimc_capture_return_all_buffers(struct vimc_capture_device *vcapture static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq); - struct media_entity *entity = &vcapture->vdev.entity; int ret; vcapture->sequence = 0; /* Start the media pipeline */ - ret = media_pipeline_start(entity, &vcapture->stream.pipe); + ret = video_device_pipeline_start(&vcapture->vdev, &vcapture->stream.pipe); if (ret) { vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED); return ret; @@ -255,7 +254,7 @@ static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count ret = vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 1); if (ret) { - media_pipeline_stop(entity); + video_device_pipeline_stop(&vcapture->vdev); vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED); return ret; } @@ -274,7 +273,7 @@ static void vimc_capture_stop_streaming(struct vb2_queue *vq) vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 0); /* Stop the media pipeline */ - media_pipeline_stop(&vcapture->vdev.entity); + video_device_pipeline_stop(&vcapture->vdev); /* Release all active buffers */ vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index a04dfd5799f7..d59b4ab77430 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -282,15 +282,13 @@ static int xc4000_tuner_reset(struct dvb_frontend *fe) static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData) { u8 buf[4]; - int result; buf[0] = (regAddr >> 8) & 0xFF; buf[1] = regAddr & 0xFF; buf[2] = (i2cData >> 8) & 0xFF; buf[3] = i2cData & 0xFF; - result = xc_send_i2c_data(priv, buf, 4); - return result; + return xc_send_i2c_data(priv, buf, 4); } static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence) diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index caefac07af92..877e85a451cb 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -410,7 +410,7 @@ static int au0828_enable_source(struct media_entity *entity, goto end; } - ret = __media_pipeline_start(entity, pipe); + ret = __media_pipeline_start(entity->pads, pipe); if (ret) { pr_err("Start Pipeline: %s->%s Error %d\n", source->name, entity->name, ret); @@ -501,12 +501,12 @@ static void au0828_disable_source(struct media_entity *entity) return; /* stop pipeline */ - __media_pipeline_stop(dev->active_link_owner); + __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); ret = __media_pipeline_start( - dev->active_link_user, + dev->active_link_user->pads, dev->active_link_user_pipe); if (ret) { pr_err("Start Pipeline: %s->%s %d\n", @@ -532,7 +532,7 @@ static void au0828_disable_source(struct media_entity *entity) return; /* stop pipeline */ - __media_pipeline_stop(dev->active_link_owner); + __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 5eef37b00a52..1e9c8d01523b 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -1497,7 +1497,7 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap) /* * AF9035 gpiot2 = FC0012 enable * XXX: there seems to be something on gpioh8 too, but on my - * my test I didn't find any difference. + * test I didn't find any difference. */ if (adap->id == 0) { diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index 5a1f2698efb7..9759996ee6a4 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -209,7 +209,7 @@ leave: * * Control bits for previous samples is 32-bit field, containing 16 x 2-bit * numbers. This results one 2-bit number for 8 samples. It is likely used for - * for bit shifting sample by given bits, increasing actual sampling resolution. + * bit shifting sample by given bits, increasing actual sampling resolution. * Number 2 (0b10) was never seen. * * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c index a8c354ad3d23..d0a3aa3806fb 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-api.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -89,7 +89,7 @@ static int req_to_user(struct v4l2_ext_control *c, /* Helper function: copy the initial control value back to the caller */ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { - ctrl->type_ops->init(ctrl, 0, ctrl->elems, ctrl->p_new); + ctrl->type_ops->init(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } @@ -126,7 +126,7 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) if (ctrl->is_dyn_array) ctrl->new_elems = elems; else if (ctrl->is_array) - ctrl->type_ops->init(ctrl, elems, ctrl->elems, ctrl->p_new); + ctrl->type_ops->init(ctrl, elems, ctrl->p_new); return 0; } @@ -494,7 +494,7 @@ EXPORT_SYMBOL(v4l2_g_ext_ctrls); /* Validate a new control */ static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) { - return ctrl->type_ops->validate(ctrl, ctrl->new_elems, p_new); + return ctrl->type_ops->validate(ctrl, p_new); } /* Validate controls. */ @@ -1007,7 +1007,7 @@ int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, ctrl->p_cur.p = p_array + elems * ctrl->elem_size; for (i = 0; i < ctrl->nr_of_dims; i++) ctrl->dims[i] = dims[i]; - ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur); + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | V4L2_EVENT_CTRL_CH_DIMENSIONS); diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c index 01f00093f259..0dab1d7b90f0 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-core.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -65,7 +65,7 @@ void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) v4l2_event_queue_fh(sev->fh, &ev); } -bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2) { unsigned int i; @@ -74,7 +74,7 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, case V4L2_CTRL_TYPE_BUTTON: return false; case V4L2_CTRL_TYPE_STRING: - for (i = 0; i < elems; i++) { + for (i = 0; i < ctrl->elems; i++) { unsigned int idx = i * ctrl->elem_size; /* strings are always 0-terminated */ @@ -84,7 +84,7 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, return true; default: return !memcmp(ptr1.p_const, ptr2.p_const, - elems * ctrl->elem_size); + ctrl->elems * ctrl->elem_size); } } EXPORT_SYMBOL(v4l2_ctrl_type_op_equal); @@ -178,9 +178,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, } void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, - u32 tot_elems, union v4l2_ctrl_ptr ptr) + union v4l2_ctrl_ptr ptr) { unsigned int i; + u32 tot_elems = ctrl->elems; u32 elems = tot_elems - from_idx; if (from_idx >= tot_elems) @@ -995,7 +996,7 @@ static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx, } } -int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems, +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr) { unsigned int i; @@ -1017,11 +1018,11 @@ int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems, case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: - memset(ptr.p_s32, 0, elems * sizeof(s32)); + memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32)); return 0; } - for (i = 0; !ret && i < elems; i++) + for (i = 0; !ret && i < ctrl->new_elems; i++) ret = std_validate_elem(ctrl, i, ptr); return ret; } @@ -1724,7 +1725,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, memcpy(ctrl->p_def.p, p_def.p_const, elem_size); } - ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur); + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); if (handler_new_ref(hdl, ctrl, NULL, false, false)) { @@ -2069,7 +2070,7 @@ static int cluster_changed(struct v4l2_ctrl *master) ctrl_changed = true; if (!ctrl_changed) ctrl_changed = !ctrl->type_ops->equal(ctrl, - ctrl->elems, ctrl->p_cur, ctrl->p_new); + ctrl->p_cur, ctrl->p_new); ctrl->has_changed = ctrl_changed; changed |= ctrl->has_changed; } diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index d00237ee4cae..397d553177fa 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -1095,6 +1095,78 @@ void video_unregister_device(struct video_device *vdev) } EXPORT_SYMBOL(video_unregister_device); +#if defined(CONFIG_MEDIA_CONTROLLER) + +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_start); + +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return __media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_start); + +void video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_stop); + +void __video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return __media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_stop); + +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_alloc_start(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start); + +struct media_pipeline *video_device_pipeline(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return NULL; + + return media_pad_pipeline(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + /* * Initialise video for linux */ diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 9489e80e905a..bdb2ce7ff03b 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -66,6 +66,14 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) goto err_map; } + /* Parse the device's DT node for an endianness specification */ + if (of_property_read_bool(np, "big-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; + else if (of_property_read_bool(np, "little-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; + else if (of_property_read_bool(np, "native-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE; + /* * search for reg-io-width property in DT. If it is not provided, * default to 4 bytes. regmap_init_mmio will return an error if values diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index 5669c92c93f7..c5c3b4e92f28 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) struct qca8k_mgmt_eth_data *mgmt_eth_data; struct qca8k_priv *priv = ds->priv; struct qca_mgmt_ethhdr *mgmt_ethhdr; + u32 command; u8 len, cmd; + int i; mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); mgmt_eth_data = &priv->mgmt_eth_data; - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); + command = get_unaligned_le32(&mgmt_ethhdr->command); + cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command); + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command); /* Make sure the seq match the requested packet */ - if (mgmt_ethhdr->seq == mgmt_eth_data->seq) + if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq) mgmt_eth_data->ack = true; if (cmd == MDIO_READ) { - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; + u32 *val = mgmt_eth_data->data; + + *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data); /* Get the rest of the 12 byte of data. * The read/write function will extract the requested data. */ - if (len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(mgmt_eth_data->data + 1, skb->data, - QCA_HDR_MGMT_DATA2_LEN); + if (len > QCA_HDR_MGMT_DATA1_LEN) { + __le32 *data2 = (__le32 *)skb->data; + int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, + len - QCA_HDR_MGMT_DATA1_LEN); + + val++; + + for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { + *val = get_unaligned_le32(data2); + val++; + data2++; + } + } } complete(&mgmt_eth_data->rw_done); @@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * struct qca_mgmt_ethhdr *mgmt_ethhdr; unsigned int real_len; struct sk_buff *skb; - u32 *data2; + __le32 *data2; + u32 command; u16 hdr; + int i; skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); if (!skb) @@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, + command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); + command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); + command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); + command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, QCA_HDR_MGMT_CHECK_CODE_VAL); + put_unaligned_le32(command, &mgmt_ethhdr->command); + if (cmd == MDIO_WRITE) - mgmt_ethhdr->mdio_data = *val; + put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data); mgmt_ethhdr->hdr = htons(hdr); data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); - if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); + if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) { + int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, + len - QCA_HDR_MGMT_DATA1_LEN); + + val++; + + for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { + put_unaligned_le32(*val, data2); + data2++; + val++; + } + } return skb; } @@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) { struct qca_mgmt_ethhdr *mgmt_ethhdr; + u32 seq; + seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); + put_unaligned_le32(seq, &mgmt_ethhdr->seq); } static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) @@ -1487,9 +1518,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk struct qca8k_priv *priv = ds->priv; const struct qca8k_mib_desc *mib; struct mib_ethhdr *mib_ethhdr; - int i, mib_len, offset = 0; - u64 *data; + __le32 *data2; u8 port; + int i; mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); mib_eth_data = &priv->mib_eth_data; @@ -1501,28 +1532,24 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk if (port != mib_eth_data->req_port) goto exit; - data = mib_eth_data->data; + data2 = (__le32 *)skb->data; for (i = 0; i < priv->info->mib_count; i++) { mib = &ar8327_mib[i]; /* First 3 mib are present in the skb head */ if (i < 3) { - data[i] = mib_ethhdr->data[i]; + mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i); continue; } - mib_len = sizeof(uint32_t); - /* Some mib are 64 bit wide */ if (mib->size == 2) - mib_len = sizeof(uint64_t); - - /* Copy the mib value from packet to the */ - memcpy(data + i, skb->data + offset, mib_len); + mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2); + else + mib_eth_data->data[i] = get_unaligned_le32(data2); - /* Set the offset for the next mib */ - offset += mib_len; + data2 += mib->size; } exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index a36803e79e92..8a6f788f6294 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) { + bool rc = false; u32 datalen; u16 index; u8 *buf; @@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) { NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); - goto err; + goto done; } if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) { NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); - goto err; + goto done; } - return true; + rc = true; -err: +done: kfree(buf); - return false; + return rc; } static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 00fafc0f8512..430eccea8e5e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) hdev->cls_dev.release = hnae_release; (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); ret = device_register(&hdev->cls_dev); - if (ret) + if (ret) { + put_device(&hdev->cls_dev); return ret; + } __module_get(THIS_MODULE); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7e75706f76db..87f36d1ce800 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev, err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) goto rx_unwind; - err = i40e_alloc_rx_bi(&rx_rings[i]); - if (err) - goto rx_unwind; /* now allocate the Rx buffers to make sure the OS * has enough memory, any failure here means abort diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2c07fa8ecfc8..b5dcd15ced36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - kfree(ring->rx_bi); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - ret = i40e_alloc_rx_bi_zc(ring); - if (ret) - return ret; ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); /* For AF_XDP ZC, we disallow packets to span on @@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->queue_index); } else { - ret = i40e_alloc_rx_bi(ring); - if (ret) - return ret; ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, i40e_reset_and_rebuild(pf, true, true); } + if (!i40e_enabled_xdp_vsi(vsi) && prog) { + if (i40e_realloc_rx_bi_zc(vsi, true)) + return -ENOMEM; + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { + if (i40e_realloc_rx_bi_zc(vsi, false)) + return -ENOMEM; + } + for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 69e67eb6aea7..b97c95f89fa0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1457,14 +1457,6 @@ err: return -ENOMEM; } -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; - - rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi ? 0 : -ENOMEM; -} - static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); @@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; + rx_ring->rx_bi = + kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); + if (!rx_ring->rx_bi) + return -ENOMEM; + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 41f86e9535a0..768290dc6f48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 6d4009e0cbd6..cd7b52fb6b46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -10,14 +10,6 @@ #include "i40e_txrx_common.h" #include "i40e_xsk.h" -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - - rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi_zc ? 0 : -ENOMEM; -} - void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi_zc, 0, @@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) } /** + * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer + * @rx_ring: Current rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) +{ + size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : + sizeof(*rx_ring->rx_bi); + void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + + if (!sw_ring) + return -ENOMEM; + + if (pool_present) { + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + rx_ring->rx_bi_zc = sw_ring; + } else { + kfree(rx_ring->rx_bi_zc); + rx_ring->rx_bi_zc = NULL; + rx_ring->rx_bi = sw_ring; + } + return 0; +} + +/** + * i40e_realloc_rx_bi_zc - reallocate rx SW rings + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) +{ + struct i40e_ring *rx_ring; + unsigned long q; + + for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { + rx_ring = vsi->rx_rings[q]; + if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) + return -ENOMEM; + } + return 0; +} + +/** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid * @vsi: Current VSI @@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, if (err) return err; + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); + if (err) + return err; + err = i40e_queue_pair_enable(vsi, qid); if (err) return err; @@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); if (if_running) { + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); + if (err) + return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index bb962987f300..821df248f8be 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc); void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 9809f551fc2e..9ec5f38d38a8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -815,6 +815,7 @@ free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, txsc->hw_flow_id, false); fail: + kfree(txsc); return ERR_PTR(ret); } @@ -870,6 +871,7 @@ free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, rxsc->hw_flow_id, false); fail: + kfree(rxsc); return ERR_PTR(ret); } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4fba7cb0144b..7cd381530aa4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4060,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev) eth->irq[i] = platform_get_irq(pdev, i); if (eth->irq[i] < 0) { dev_err(&pdev->dev, "no IRQ%d resource found\n", i); - return -ENXIO; + err = -ENXIO; + goto err_wed_exit; } } for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { eth->clks[i] = devm_clk_get(eth->dev, mtk_clks_source_name[i]); if (IS_ERR(eth->clks[i])) { - if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_wed_exit; + } if (eth->soc->required_clks & BIT(i)) { dev_err(&pdev->dev, "clock %s not found\n", mtk_clks_source_name[i]); - return -EINVAL; + err = -EINVAL; + goto err_wed_exit; } eth->clks[i] = NULL; } @@ -4083,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev) err = mtk_hw_init(eth); if (err) - return err; + goto err_wed_exit; eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); @@ -4179,6 +4183,8 @@ err_free_dev: mtk_free_dev(eth); err_deinit_hw: mtk_hw_deinit(eth); +err_wed_exit: + mtk_wed_exit(); return err; } @@ -4198,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev) phylink_disconnect_phy(mac->phylink); } + mtk_wed_exit(); mtk_hw_deinit(eth); netif_napi_del(ð->tx_napi); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index ae00e572390d..2d8ca99f2467 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, return 0; } -static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) -{ - return !(entry->ib1 & MTK_FOE_IB1_STATIC) && - FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; -} - static bool mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, struct mtk_foe_entry *data) diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 099b6e0df619..65e01bf4b4d2 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, pdev = of_find_device_by_node(np); if (!pdev) - return; + goto err_of_node_put; get_device(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) - return; + goto err_put_device; regs = syscon_regmap_lookup_by_phandle(np, NULL); if (IS_ERR(regs)) - return; + goto err_put_device; rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); @@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, hw_list[index] = hw; + mutex_unlock(&hw_lock); + + return; + unlock: mutex_unlock(&hw_lock); +err_put_device: + put_device(&pdev->dev); +err_of_node_put: + of_node_put(np); } void mtk_wed_exit(void) @@ -1146,6 +1154,7 @@ void mtk_wed_exit(void) hw_list[i] = NULL; debugfs_remove(hw->debugfs_dir); put_device(hw->dev); + of_node_put(hw->node); kfree(hw); } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 5d58fd99be3c..19d4848df17d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2817,11 +2817,15 @@ err_out: * than the full array, but leave the qcq shells in place */ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { - lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->txqcqs[i]); + if (lif->txqcqs && lif->txqcqs[i]) { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->txqcqs[i]); + } - lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->rxqcqs[i]); + if (lif->rxqcqs && lif->rxqcqs[i]) { + lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->rxqcqs[i]); + } } if (err) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d1e1aa19a68e..7022fb2005a2 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) bool was_enabled = efx->port_enabled; int rc; +#ifdef CONFIG_SFC_SRIOV + /* If this function is a VF and we have access to the parent PF, + * then use the PF control path to attempt to change the VF MAC address. + */ + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac[ETH_ALEN]; + + /* net_dev->dev_addr can be zeroed by efx_net_stop in + * efx_ef10_sriov_set_vf_mac, so pass in a copy. + */ + ether_addr_copy(mac, efx->net_dev->dev_addr); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); + if (!rc) + return 0; + + netif_dbg(efx, drv, efx->net_dev, + "Updating VF mac via PF failed (%d), setting directly\n", + rc); + } +#endif + efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); @@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); efx_device_attach_if_not_resetting(efx); -#ifdef CONFIG_SFC_SRIOV - if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - - if (rc == -EPERM) { - struct efx_nic *efx_pf; - - /* Switch to PF and change MAC address on vport */ - efx_pf = pci_get_drvdata(pci_dev_pf); - - rc = efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr); - } else if (!rc) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); - struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; - unsigned int i; - - /* MAC address successfully changed by VF (with MAC - * spoofing) so update the parent PF if possible. - */ - for (i = 0; i < efx_pf->vf_count; ++i) { - struct ef10_vf *vf = nic_data->vf + i; - - if (vf->efx == efx) { - ether_addr_copy(vf->mac, - efx->net_dev->dev_addr); - return 0; - } - } - } - } else -#endif if (rc == -EPERM) { netif_err(efx, drv, efx->net_dev, "Cannot change MAC address; use sfboot to enable" diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index be72e71da027..5f201a547e5b 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -162,9 +162,9 @@ struct efx_filter_spec { u32 priority:2; u32 flags:6; u32 dmaq_id:12; - u32 vport_id; u32 rss_context; - __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + u32 vport_id; + __be16 outer_vid; __be16 inner_vid; u8 loc_mac[ETH_ALEN]; u8 rem_mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 4826e6a7e4ce..9220afeddee8 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left, (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) return false; - return memcmp(&left->outer_vid, &right->outer_vid, + return memcmp(&left->vport_id, &right->vport_id, sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; + offsetof(struct efx_filter_spec, vport_id)) == 0; } u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) { - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, + BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); + return jhash2((const u32 *)&spec->vport_id, (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, + offsetof(struct efx_filter_spec, vport_id)) / 4, 0); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 65c96773c6d2..8273e6a175c8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1214,6 +1214,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) if (priv->plat->tx_queues_to_use > 1) priv->phylink_config.mac_capabilities &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); + priv->phylink_config.mac_managed_pm = true; phylink = phylink_create(&priv->phylink_config, fwnode, mode, &stmmac_phylink_mac_ops); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 91f10f746dff..1c16548415cd 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp) void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; - const char *bursts; + const char *bursts = "64"; u32 regtmp, rxcfg; /* If auto-negotiation timer is running, kill it. */ diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 11f767a20444..eea777ec2541 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -20,6 +20,7 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/ucs2_string.h> +#include <linux/string.h> #include "hyperv_net.h" #include "netvsc_trace.h" @@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev, if (resp->msg_len <= sizeof(struct rndis_message) + RNDIS_EXT_LEN) { memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id)); - memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id), + unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id), data + RNDIS_HEADER_SIZE + sizeof(*req_id), - resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id)); + resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id), + "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request"); if (request->request_msg.ndis_msg_type == RNDIS_MSG_QUERY && request->request_msg.msg. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 8f8f73099de8..c5cfe8555199 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, } spin_unlock(&port->bc_queue.lock); - schedule_work(&port->bc_work); + queue_work(system_unbound_wq, &port->bc_work); if (err) goto free_nskb; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 8549e0e356c9..b60db8b6f477 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -254,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_EEE_ERROR_CHANGE_INT_EN); if (!dp83822->fx_enabled) - misr_status |= DP83822_MDI_XOVER_INT_EN | - DP83822_ANEG_ERR_INT_EN | + misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; err = phy_write(phydev, MII_DP83822_MISR2, misr_status); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 6939563d3b7c..417527f8bbf5 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -853,6 +853,14 @@ static int dp83867_config_init(struct phy_device *phydev) else val &= ~DP83867_SGMII_TYPE; phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); + + /* This is a SW workaround for link instability if RX_CTRL is + * not strapped to mode 3 or 4 in HW. This is required for SGMII + * in addition to clearing bit 7, handled above. + */ + if (dp83867->rxctrl_strap_quirk) + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(8)); } val = phy_read(phydev, DP83867_CFG3); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 75464df191ef..6547b6cc6cbe 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1661,6 +1661,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, if (phy_interrupt_is_valid(phy)) phy_request_interrupt(phy); + if (pl->config->mac_managed_pm) + phy->mac_managed_pm = true; + return 0; } diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c index ff09a8cedf93..2397a903d8f5 100644 --- a/drivers/net/wwan/wwan_hwsim.c +++ b/drivers/net/wwan/wwan_hwsim.c @@ -311,7 +311,7 @@ err_unreg_dev: return ERR_PTR(err); err_free_dev: - kfree(dev); + put_device(&dev->dev); return ERR_PTR(err); } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 5fc5ea196b40..ff8b083dc5c6 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1039,6 +1039,8 @@ static void apple_nvme_reset_work(struct work_struct *work) dma_max_mapping_size(anv->dev) >> 9); anv->ctrl.max_segments = NVME_MAX_SEGS; + dma_set_max_seg_size(anv->dev, 0xffffffff); + /* * Enable NVMMU and linear submission queues. * While we could keep those disabled and pretend this is slightly diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 059737c1a2c1..dc4220600585 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3262,8 +3262,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl) return ret; if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { + /* + * Do not return errors unless we are in a controller reset, + * the controller works perfectly fine without hwmon. + */ ret = nvme_hwmon_init(ctrl); - if (ret < 0) + if (ret == -EINTR) return ret; } @@ -4846,7 +4850,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return 0; out_cleanup_admin_q: - blk_mq_destroy_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->admin_q); out_free_tagset: blk_mq_free_tag_set(ctrl->admin_tagset); return ret; diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 0a586d712920..9e6e56c20ec9 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -12,7 +12,7 @@ struct nvme_hwmon_data { struct nvme_ctrl *ctrl; - struct nvme_smart_log log; + struct nvme_smart_log *log; struct mutex read_lock; }; @@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, - NVME_CSI_NVM, &data->log, sizeof(data->log), 0); + NVME_CSI_NVM, data->log, sizeof(*data->log), 0); } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); - struct nvme_smart_log *log = &data->log; + struct nvme_smart_log *log = data->log; int temp; int err; @@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log.temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1])) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; @@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, break; case hwmon_temp_input: case hwmon_temp_label: - if (!channel || data->log.temp_sensor[channel - 1]) + if (!channel || data->log->temp_sensor[channel - 1]) return 0444; break; default: @@ -230,7 +230,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - return 0; + return -ENOMEM; + + data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); + if (!data->log) { + err = -ENOMEM; + goto err_free_data; + } data->ctrl = ctrl; mutex_init(&data->read_lock); @@ -238,8 +244,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) err = nvme_hwmon_get_smart_log(data); if (err) { dev_warn(dev, "Failed to read smart log (error %d)\n", err); - kfree(data); - return err; + goto err_free_log; } hwmon = hwmon_device_register_with_info(dev, "nvme", @@ -247,11 +252,17 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); - kfree(data); - return PTR_ERR(hwmon); + err = PTR_ERR(hwmon); + goto err_free_log; } ctrl->hwmon_device = hwmon; return 0; + +err_free_log: + kfree(data->log); +err_free_data: + kfree(data); + return err; } void nvme_hwmon_exit(struct nvme_ctrl *ctrl) @@ -262,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl) hwmon_device_unregister(ctrl->hwmon_device); ctrl->hwmon_device = NULL; + kfree(data->log); kfree(data); } } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bcbef6bc5672..31e577b01257 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3511,6 +3511,16 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index e34a2896fedb..9443ee1d4ae3 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1290,12 +1290,8 @@ static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, const char *page, size_t cnt) { - struct nvmet_port *port = to_nvmet_port(item); u16 qid_max; - if (nvmet_is_port_enabled(port, __func__)) - return -EACCES; - if (sscanf(page, "%hu\n", &qid_max) != 1) return -EINVAL; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 14677145bbba..aecb5853f8da 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1176,7 +1176,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) * reset the keep alive timer when the controller is enabled. */ if (ctrl->kato) - mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 24478ae5a345..8e323e93be91 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -415,6 +415,13 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) * address (access to which generates correct config transaction) falls in * this 4 KiB region. */ +static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, + unsigned int where) +{ + return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | + (PCI_FUNC(devfn) << 8) | (where & 0xff); +} + static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) @@ -436,9 +443,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int offset; u32 base; - offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where) & - ~PCI_CONF1_ENABLE; + offset = tegra_pcie_conf_offset(bus->number, devfn, where); /* move 4 KiB window to offset within the FPCI region */ base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b49c39569386..b535f1fd3010 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4812,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) rc = lpfc_vmid_res_alloc(phba, vport); if (rc) - goto out; + goto out_put_shost; /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); @@ -4830,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) - goto out_put_shost; + goto out_free_vmid; spin_lock_irq(&phba->port_list_lock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->port_list_lock); return vport; -out_put_shost: +out_free_vmid: kfree(vport->vmid); bitmap_free(vport->vmid_priority_range); +out_put_shost: scsi_host_put(shost); out: return NULL; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index c95177ca6ed2..cac7c902cf70 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -828,6 +828,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); + switch (sdev->sdev_state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + mutex_unlock(&sdev->state_mutex); + return -EINVAL; + } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c index 1f3d7039c1de..4d235c8c4924 100644 --- a/drivers/soc/imx/imx93-pd.c +++ b/drivers/soc/imx/imx93-pd.c @@ -135,11 +135,24 @@ static int imx93_pd_probe(struct platform_device *pdev) ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off); if (ret) - return ret; + goto err_clk_unprepare; platform_set_drvdata(pdev, domain); - return of_genpd_add_provider_simple(np, &domain->genpd); + ret = of_genpd_add_provider_simple(np, &domain->genpd); + if (ret) + goto err_genpd_remove; + + return 0; + +err_genpd_remove: + pm_genpd_remove(&domain->genpd); + +err_clk_unprepare: + if (!domain->init_off) + clk_bulk_disable_unprepare(domain->num_clks, domain->clks); + + return ret; } static const struct of_device_id imx93_pd_ids[] = { diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile index fb7b406f50bf..532e12ed72e6 100644 --- a/drivers/staging/media/atomisp/Makefile +++ b/drivers/staging/media/atomisp/Makefile @@ -17,7 +17,6 @@ atomisp-objs += \ pci/atomisp_compat_css20.o \ pci/atomisp_csi2.o \ pci/atomisp_drvfs.o \ - pci/atomisp_file.o \ pci/atomisp_fops.o \ pci/atomisp_ioctl.o \ pci/atomisp_subdev.o \ diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c index 8f48b23be3aa..fa1de45b7a2d 100644 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c +++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c @@ -841,8 +841,6 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, if (!ov2680_info) return -EINVAL; - mutex_lock(&dev->input_lock); - res = v4l2_find_nearest_size(ov2680_res_preview, ARRAY_SIZE(ov2680_res_preview), width, height, fmt->width, fmt->height); @@ -855,19 +853,22 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); return 0; } dev_dbg(&client->dev, "%s: %dx%d\n", __func__, fmt->width, fmt->height); + mutex_lock(&dev->input_lock); + /* s_power has not been called yet for std v4l2 clients (camorama) */ power_up(sd); ret = ov2680_write_reg_array(client, dev->res->regs); - if (ret) + if (ret) { dev_err(&client->dev, "ov2680 write resolution register err: %d\n", ret); + goto err; + } vts = dev->res->lines_per_frame; @@ -876,8 +877,10 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, vts = dev->exposure + OV2680_INTEGRATION_TIME_MARGIN; ret = ov2680_write_reg(client, 2, OV2680_TIMING_VTS_H, vts); - if (ret) + if (ret) { dev_err(&client->dev, "ov2680 write vts err: %d\n", ret); + goto err; + } ret = ov2680_get_intg_factor(client, ov2680_info, res); if (ret) { @@ -894,11 +897,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, if (v_flag) ov2680_v_flip(sd, v_flag); - /* - * ret = startup(sd); - * if (ret) - * dev_err(&client->dev, "ov2680 startup err\n"); - */ + dev->res = res; err: mutex_unlock(&dev->input_lock); return ret; diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h index 385e22fc4a46..c5cbae1d9cf9 100644 --- a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h +++ b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h @@ -65,9 +65,6 @@ #define check_bo_null_return_void(bo) \ check_null_return_void(bo, "NULL hmm buffer object.\n") -#define HMM_MAX_ORDER 3 -#define HMM_MIN_ORDER 0 - #define ISP_VM_START 0x0 #define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */ #define ISP_PTR_NULL NULL @@ -89,8 +86,6 @@ enum hmm_bo_type { #define HMM_BO_VMAPED 0x10 #define HMM_BO_VMAPED_CACHED 0x20 #define HMM_BO_ACTIVE 0x1000 -#define HMM_BO_MEM_TYPE_USER 0x1 -#define HMM_BO_MEM_TYPE_PFN 0x2 struct hmm_bo_device { struct isp_mmu mmu; @@ -126,7 +121,6 @@ struct hmm_buffer_object { enum hmm_bo_type type; int mmap_count; int status; - int mem_type; void *vmap_addr; /* kernel virtual address by vmap */ struct rb_node node; diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h index f96f5adbd9de..3f602b5aaff9 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp.h @@ -740,20 +740,6 @@ enum atomisp_frame_status { ATOMISP_FRAME_STATUS_FLASH_FAILED, }; -/* ISP memories, isp2400 */ -enum atomisp_acc_memory { - ATOMISP_ACC_MEMORY_PMEM0 = 0, - ATOMISP_ACC_MEMORY_DMEM0, - /* for backward compatibility */ - ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0, - ATOMISP_ACC_MEMORY_VMEM0, - ATOMISP_ACC_MEMORY_VAMEM0, - ATOMISP_ACC_MEMORY_VAMEM1, - ATOMISP_ACC_MEMORY_VAMEM2, - ATOMISP_ACC_MEMORY_HMEM0, - ATOMISP_ACC_NR_MEMORY -}; - enum atomisp_ext_isp_id { EXT_ISP_CID_ISO = 0, EXT_ISP_CID_CAPTURE_HDR, diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h index 58e0ea5355a3..5463d11d4295 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h @@ -26,8 +26,6 @@ struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter, int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd); int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def); -int camera_sensor_csi(struct v4l2_subdev *sd, u32 port, - u32 lanes, u32 format, u32 bayer_order, int flag); struct camera_sensor_platform_data * gmin_camera_platform_data( struct v4l2_subdev *subdev, diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h index 8c65733e0255..0253661d4332 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h @@ -141,23 +141,6 @@ struct atomisp_platform_data { struct intel_v4l2_subdev_table *subdevs; }; -/* Describe the capacities of one single sensor. */ -struct atomisp_sensor_caps { - /* The number of streams this sensor can output. */ - int stream_num; - bool is_slave; -}; - -/* Describe the capacities of sensors connected to one camera port. */ -struct atomisp_camera_caps { - /* The number of sensors connected to this camera port. */ - int sensor_num; - /* The capacities of each sensor. */ - struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT]; - /* Define whether stream control is required for multiple streams. */ - bool multi_stream_ctrl; -}; - /* * Sensor of external ISP can send multiple steams with different mipi data * type in the same virtual channel. This information needs to come from the @@ -235,7 +218,6 @@ struct camera_mipi_info { }; const struct atomisp_platform_data *atomisp_get_platform_data(void); -const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void); /* API from old platform_camera.h, new CPUID implementation */ #define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \ diff --git a/drivers/staging/media/atomisp/notes.txt b/drivers/staging/media/atomisp/notes.txt index d128b792e05f..d3cf6ed547ae 100644 --- a/drivers/staging/media/atomisp/notes.txt +++ b/drivers/staging/media/atomisp/notes.txt @@ -28,3 +28,22 @@ Since getting a picture requires multiple processing steps, this means that unlike in fixed pipelines the soft pipelines on the ISP can do multiple processing steps in a single pipeline element (in a single binary). + +### + +The sensor drivers use of v4l2_get_subdev_hostdata(), which returns +a camera_mipi_info struct. This struct is allocated/managed by +the core atomisp code. The most important parts of the struct +are filled by the atomisp core itself, like e.g. the port number. + +The sensor drivers on a set_fmt call do fill in camera_mipi_info.data +which is a atomisp_sensor_mode_data struct. This gets filled from +a function called <sensor_name>_get_intg_factor(). This struct is not +used by the atomisp code at all. It is returned to userspace by +a ATOMISP_IOC_G_SENSOR_MODE_DATA and the Android userspace does use this. + +Other members of camera_mipi_info which are set by some drivers are: +-metadata_width, metadata_height, metadata_effective_width, set by + the ov5693 driver (and used by the atomisp core) +-raw_bayer_order, adjusted by the ov2680 driver when flipping since + flipping can change the bayer order diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index c932f340068f..c72d0e344671 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -80,6 +80,8 @@ union host { } ptr; }; +static int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id); + /* * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field. * subdev->priv is set in mrst.c @@ -98,15 +100,6 @@ struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev) container_of(dev, struct atomisp_video_pipe, vdev); } -/* - * get struct atomisp_acc_pipe from v4l2 video_device - */ -struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev) -{ - return (struct atomisp_acc_pipe *) - container_of(dev, struct atomisp_acc_pipe, vdev); -} - static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd) { struct v4l2_subdev_frame_interval fi = { 0 }; @@ -777,24 +770,6 @@ static struct atomisp_video_pipe *__atomisp_get_pipe( enum ia_css_pipe_id css_pipe_id, enum ia_css_buffer_type buf_type) { - struct atomisp_device *isp = asd->isp; - - if (css_pipe_id == IA_CSS_PIPE_ID_COPY && - isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - switch (stream_id) { - case ATOMISP_INPUT_STREAM_PREVIEW: - return &asd->video_out_preview; - case ATOMISP_INPUT_STREAM_POSTVIEW: - return &asd->video_out_vf; - case ATOMISP_INPUT_STREAM_VIDEO: - return &asd->video_out_video_capture; - case ATOMISP_INPUT_STREAM_CAPTURE: - default: - return &asd->video_out_capture; - } - } - /* video is same in online as in continuouscapture mode */ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { /* @@ -906,7 +881,8 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, enum atomisp_metadata_type md_type; struct atomisp_device *isp = asd->isp; struct v4l2_control ctrl; - bool reset_wdt_timer = false; + + lockdep_assert_held(&isp->mutex); if ( buf_type != IA_CSS_BUFFER_TYPE_METADATA && @@ -1013,9 +989,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, break; case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: - if (IS_ISP2401) - reset_wdt_timer = true; - pipe->buffers_in_css--; frame = buffer.css_buffer.data.frame; if (!frame) { @@ -1068,9 +1041,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, break; case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME: - if (IS_ISP2401) - reset_wdt_timer = true; - pipe->buffers_in_css--; frame = buffer.css_buffer.data.frame; if (!frame) { @@ -1238,8 +1208,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, */ wake_up(&vb->done); } - if (IS_ISP2401) - atomic_set(&pipe->wdt_count, 0); /* * Requeue should only be done for 3a and dis buffers. @@ -1256,19 +1224,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, } if (!error && q_buffers) atomisp_qbuffers_to_css(asd); - - if (IS_ISP2401) { - /* If there are no buffers queued then - * delete wdt timer. */ - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - if (!atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_stop_pipe(pipe, false); - else if (reset_wdt_timer) - /* SOF irq should not reset wdt timer. */ - atomisp_wdt_refresh_pipe(pipe, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } } void atomisp_delayed_init_work(struct work_struct *work) @@ -1307,10 +1262,14 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) bool stream_restart[MAX_STREAM_NUM] = {0}; bool depth_mode = false; int i, ret, depth_cnt = 0; + unsigned long flags; - if (!isp->sw_contex.file_input) - atomisp_css_irq_enable(isp, - IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); + lockdep_assert_held(&isp->mutex); + + if (!atomisp_streaming_count(isp)) + return; + + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); BUG_ON(isp->num_of_streams > MAX_STREAM_NUM); @@ -1331,7 +1290,9 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) stream_restart[asd->index] = true; + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; + spin_unlock_irqrestore(&isp->lock, flags); /* stream off sensor */ ret = v4l2_subdev_call( @@ -1346,7 +1307,9 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) css_pipe_id = atomisp_get_css_pipe_id(asd); atomisp_css_stop(asd, css_pipe_id, true); + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; + spin_unlock_irqrestore(&isp->lock, flags); asd->preview_exp_id = 1; asd->postview_exp_id = 1; @@ -1387,25 +1350,23 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) IA_CSS_INPUT_MODE_BUFFERED_SENSOR); css_pipe_id = atomisp_get_css_pipe_id(asd); - if (atomisp_css_start(asd, css_pipe_id, true)) + if (atomisp_css_start(asd, css_pipe_id, true)) { dev_warn(isp->dev, "start SP failed, so do not set streaming to be enable!\n"); - else + } else { + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED; + spin_unlock_irqrestore(&isp->lock, flags); + } atomisp_csi2_configure(asd); } - if (!isp->sw_contex.file_input) { - atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, - atomisp_css_valid_sof(isp)); + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, + atomisp_css_valid_sof(isp)); - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0) - dev_dbg(isp->dev, "DFS auto failed while recovering!\n"); - } else { - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, true) < 0) - dev_dbg(isp->dev, "DFS max failed while recovering!\n"); - } + if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0) + dev_dbg(isp->dev, "DFS auto failed while recovering!\n"); for (i = 0; i < isp->num_of_streams; i++) { struct atomisp_sub_device *asd; @@ -1454,361 +1415,24 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) } } -void atomisp_wdt_work(struct work_struct *work) +void atomisp_assert_recovery_work(struct work_struct *work) { struct atomisp_device *isp = container_of(work, struct atomisp_device, - wdt_work); - int i; - unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} }; - bool css_recover = true; - - rt_mutex_lock(&isp->mutex); - if (!atomisp_streaming_count(isp)) { - atomic_set(&isp->wdt_work_queued, 0); - rt_mutex_unlock(&isp->mutex); - return; - } - - if (!IS_ISP2401) { - dev_err(isp->dev, "timeout %d of %d\n", - atomic_read(&isp->wdt_count) + 1, - ATOMISP_ISP_MAX_TIMEOUT_COUNT); - } else { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - pipe_wdt_cnt[i][0] += - atomic_read(&asd->video_out_capture.wdt_count); - pipe_wdt_cnt[i][1] += - atomic_read(&asd->video_out_vf.wdt_count); - pipe_wdt_cnt[i][2] += - atomic_read(&asd->video_out_preview.wdt_count); - pipe_wdt_cnt[i][3] += - atomic_read(&asd->video_out_video_capture.wdt_count); - css_recover = - (pipe_wdt_cnt[i][0] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][1] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][2] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][3] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT) - ? true : false; - dev_err(isp->dev, - "pipe on asd%d timeout cnt: (%d, %d, %d, %d) of %d, recover = %d\n", - asd->index, pipe_wdt_cnt[i][0], pipe_wdt_cnt[i][1], - pipe_wdt_cnt[i][2], pipe_wdt_cnt[i][3], - ATOMISP_ISP_MAX_TIMEOUT_COUNT, css_recover); - } - } - - if (css_recover) { - ia_css_debug_dump_sp_sw_debug_info(); - ia_css_debug_dump_debug_info(__func__); - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - dev_err(isp->dev, "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_capture.vdev.name, - asd->video_out_capture. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_vf.vdev.name, - asd->video_out_vf. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_preview.vdev.name, - asd->video_out_preview. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_video_capture.vdev.name, - asd->video_out_video_capture. - buffers_in_css); - dev_err(isp->dev, - "%s, s3a buffers in css preview pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_PREVIEW]); - dev_err(isp->dev, - "%s, s3a buffers in css capture pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_CAPTURE]); - dev_err(isp->dev, - "%s, s3a buffers in css video pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_VIDEO]); - dev_err(isp->dev, - "%s, dis buffers in css: %d\n", - __func__, asd->dis_bufs_in_css); - dev_err(isp->dev, - "%s, metadata buffers in css preview pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [IA_CSS_PIPE_ID_PREVIEW]); - dev_err(isp->dev, - "%s, metadata buffers in css capture pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [IA_CSS_PIPE_ID_CAPTURE]); - dev_err(isp->dev, - "%s, metadata buffers in css video pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [IA_CSS_PIPE_ID_VIDEO]); - if (asd->enable_raw_buffer_lock->val) { - unsigned int j; - - dev_err(isp->dev, "%s, raw_buffer_locked_count %d\n", - __func__, asd->raw_buffer_locked_count); - for (j = 0; j <= ATOMISP_MAX_EXP_ID / 32; j++) - dev_err(isp->dev, "%s, raw_buffer_bitmap[%d]: 0x%x\n", - __func__, j, - asd->raw_buffer_bitmap[j]); - } - } - - /*sh_css_dump_sp_state();*/ - /*sh_css_dump_isp_state();*/ - } else { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming == - ATOMISP_DEVICE_STREAMING_ENABLED) { - atomisp_clear_css_buffer_counters(asd); - atomisp_flush_bufs_and_wakeup(asd); - complete(&asd->init_done); - } - if (IS_ISP2401) - atomisp_wdt_stop(asd, false); - } - - if (!IS_ISP2401) { - atomic_set(&isp->wdt_count, 0); - } else { - isp->isp_fatal_error = true; - atomic_set(&isp->wdt_work_queued, 0); - - rt_mutex_unlock(&isp->mutex); - return; - } - } + assert_recovery_work); + mutex_lock(&isp->mutex); __atomisp_css_recover(isp, true); - if (IS_ISP2401) { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - - atomisp_wdt_refresh(asd, - isp->sw_contex.file_input ? - ATOMISP_ISP_FILE_TIMEOUT_DURATION : - ATOMISP_ISP_TIMEOUT_DURATION); - } - } - - dev_err(isp->dev, "timeout recovery handling done\n"); - atomic_set(&isp->wdt_work_queued, 0); - - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); } void atomisp_css_flush(struct atomisp_device *isp) { - int i; - - if (!atomisp_streaming_count(isp)) - return; - - /* Disable wdt */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - atomisp_wdt_stop(asd, true); - } - /* Start recover */ __atomisp_css_recover(isp, false); - /* Restore wdt */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != - ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - atomisp_wdt_refresh(asd, - isp->sw_contex.file_input ? - ATOMISP_ISP_FILE_TIMEOUT_DURATION : - ATOMISP_ISP_TIMEOUT_DURATION); - } dev_dbg(isp->dev, "atomisp css flush done\n"); } -void atomisp_wdt(struct timer_list *t) -{ - struct atomisp_sub_device *asd; - struct atomisp_device *isp; - - if (!IS_ISP2401) { - asd = from_timer(asd, t, wdt); - isp = asd->isp; - } else { - struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt); - - asd = pipe->asd; - isp = asd->isp; - - atomic_inc(&pipe->wdt_count); - dev_warn(isp->dev, - "[WARNING]asd %d pipe %s ISP timeout %d!\n", - asd->index, pipe->vdev.name, - atomic_read(&pipe->wdt_count)); - } - - if (atomic_read(&isp->wdt_work_queued)) { - dev_dbg(isp->dev, "ISP watchdog was put into workqueue\n"); - return; - } - atomic_set(&isp->wdt_work_queued, 1); - queue_work(isp->wdt_work_queue, &isp->wdt_work); -} - -/* ISP2400 */ -void atomisp_wdt_start(struct atomisp_sub_device *asd) -{ - atomisp_wdt_refresh(asd, ATOMISP_ISP_TIMEOUT_DURATION); -} - -/* ISP2401 */ -void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, - unsigned int delay) -{ - unsigned long next; - - if (!pipe->asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, pipe->vdev.name); - return; - } - - if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) - pipe->wdt_duration = delay; - - next = jiffies + pipe->wdt_duration; - - /* Override next if it has been pushed beyon the "next" time */ - if (atomisp_is_wdt_running(pipe) && time_after(pipe->wdt_expires, next)) - next = pipe->wdt_expires; - - pipe->wdt_expires = next; - - if (atomisp_is_wdt_running(pipe)) - dev_dbg(pipe->asd->isp->dev, "WDT will hit after %d ms (%s)\n", - ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name); - else - dev_dbg(pipe->asd->isp->dev, "WDT starts with %d ms period (%s)\n", - ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name); - - mod_timer(&pipe->wdt, next); -} - -void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay) -{ - if (!IS_ISP2401) { - unsigned long next; - - if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) - asd->wdt_duration = delay; - - next = jiffies + asd->wdt_duration; - - /* Override next if it has been pushed beyon the "next" time */ - if (atomisp_is_wdt_running(asd) && time_after(asd->wdt_expires, next)) - next = asd->wdt_expires; - - asd->wdt_expires = next; - - if (atomisp_is_wdt_running(asd)) - dev_dbg(asd->isp->dev, "WDT will hit after %d ms\n", - ((int)(next - jiffies) * 1000 / HZ)); - else - dev_dbg(asd->isp->dev, "WDT starts with %d ms period\n", - ((int)(next - jiffies) * 1000 / HZ)); - - mod_timer(&asd->wdt, next); - atomic_set(&asd->isp->wdt_count, 0); - } else { - dev_dbg(asd->isp->dev, "WDT refresh all:\n"); - if (atomisp_is_wdt_running(&asd->video_out_capture)) - atomisp_wdt_refresh_pipe(&asd->video_out_capture, delay); - if (atomisp_is_wdt_running(&asd->video_out_preview)) - atomisp_wdt_refresh_pipe(&asd->video_out_preview, delay); - if (atomisp_is_wdt_running(&asd->video_out_vf)) - atomisp_wdt_refresh_pipe(&asd->video_out_vf, delay); - if (atomisp_is_wdt_running(&asd->video_out_video_capture)) - atomisp_wdt_refresh_pipe(&asd->video_out_video_capture, delay); - } -} - -/* ISP2401 */ -void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync) -{ - if (!pipe->asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, pipe->vdev.name); - return; - } - - if (!atomisp_is_wdt_running(pipe)) - return; - - dev_dbg(pipe->asd->isp->dev, - "WDT stop asd %d (%s)\n", pipe->asd->index, pipe->vdev.name); - - if (sync) { - del_timer_sync(&pipe->wdt); - cancel_work_sync(&pipe->asd->isp->wdt_work); - } else { - del_timer(&pipe->wdt); - } -} - -/* ISP 2401 */ -void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe) -{ - atomisp_wdt_refresh_pipe(pipe, ATOMISP_ISP_TIMEOUT_DURATION); -} - -void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync) -{ - dev_dbg(asd->isp->dev, "WDT stop:\n"); - - if (!IS_ISP2401) { - if (sync) { - del_timer_sync(&asd->wdt); - cancel_work_sync(&asd->isp->wdt_work); - } else { - del_timer(&asd->wdt); - } - } else { - atomisp_wdt_stop_pipe(&asd->video_out_capture, sync); - atomisp_wdt_stop_pipe(&asd->video_out_preview, sync); - atomisp_wdt_stop_pipe(&asd->video_out_vf, sync); - atomisp_wdt_stop_pipe(&asd->video_out_video_capture, sync); - } -} - void atomisp_setup_flash(struct atomisp_sub_device *asd) { struct atomisp_device *isp = asd->isp; @@ -1884,7 +1508,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr) * For CSS2.0: we change the way to not dequeue all the event at one * time, instead, dequue one and process one, then another */ - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (atomisp_css_isr_thread(isp, frame_done_found, css_pipe_done)) goto out; @@ -1895,15 +1519,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr) atomisp_setup_flash(asd); } out: - rt_mutex_unlock(&isp->mutex); - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED - && css_pipe_done[asd->index] - && isp->sw_contex.file_input) - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 1); - } + mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "<%s\n", __func__); return IRQ_HANDLED; @@ -2322,7 +1938,6 @@ static void atomisp_update_grid_info(struct atomisp_sub_device *asd, { struct atomisp_device *isp = asd->isp; int err; - u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); if (atomisp_css_get_grid_info(asd, pipe_id, source_pad)) return; @@ -2331,7 +1946,7 @@ static void atomisp_update_grid_info(struct atomisp_sub_device *asd, the grid size. */ atomisp_css_free_stat_buffers(asd); - err = atomisp_alloc_css_stat_bufs(asd, stream_id); + err = atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL); if (err) { dev_err(isp->dev, "stat_buf allocate error\n"); goto err; @@ -4077,6 +3692,8 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) unsigned long irqflags; bool need_to_enqueue_buffer = false; + lockdep_assert_held(&asd->isp->mutex); + if (!asd) { dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", __func__, pipe->vdev.name); @@ -4143,19 +3760,6 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) return; atomisp_qbuffers_to_css(asd); - - if (!IS_ISP2401) { - if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd)) - atomisp_wdt_start(asd); - } else { - if (atomisp_buffers_queued_pipe(pipe)) { - if (!atomisp_is_wdt_running(pipe)) - atomisp_wdt_start_pipe(pipe); - else - atomisp_wdt_refresh_pipe(pipe, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } - } } /* @@ -4170,6 +3774,8 @@ int atomisp_set_parameters(struct video_device *vdev, struct atomisp_css_params *css_param = &asd->params.css_param; int ret; + lockdep_assert_held(&asd->isp->mutex); + if (!asd) { dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", __func__, vdev->name); @@ -4824,8 +4430,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, const struct atomisp_format_bridge *fmt; struct atomisp_input_stream_info *stream_info = (struct atomisp_input_stream_info *)snr_mbus_fmt->reserved; - u16 stream_index; - int source_pad = atomisp_subdev_source_pad(vdev); int ret; if (!asd) { @@ -4837,7 +4441,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, if (!isp->inputs[asd->input_curr].camera) return -EINVAL; - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); fmt = atomisp_get_format_bridge(f->pixelformat); if (!fmt) { dev_err(isp->dev, "unsupported pixelformat!\n"); @@ -4851,7 +4454,7 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, snr_mbus_fmt->width = f->width; snr_mbus_fmt->height = f->height; - __atomisp_init_stream_info(stream_index, stream_info); + __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info); dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n", snr_mbus_fmt->width, snr_mbus_fmt->height); @@ -4886,8 +4489,8 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, return 0; } - if (snr_mbus_fmt->width < f->width - && snr_mbus_fmt->height < f->height) { + if (!res_overflow || (snr_mbus_fmt->width < f->width && + snr_mbus_fmt->height < f->height)) { f->width = snr_mbus_fmt->width; f->height = snr_mbus_fmt->height; /* Set the flag when resolution requested is @@ -4906,41 +4509,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, return 0; } -static int -atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f) -{ - u32 width = f->fmt.pix.width; - u32 height = f->fmt.pix.height; - u32 pixelformat = f->fmt.pix.pixelformat; - enum v4l2_field field = f->fmt.pix.field; - u32 depth; - - if (!atomisp_get_format_bridge(pixelformat)) { - dev_err(isp->dev, "Wrong output pixelformat\n"); - return -EINVAL; - } - - depth = atomisp_get_pixel_depth(pixelformat); - - if (field == V4L2_FIELD_ANY) { - field = V4L2_FIELD_NONE; - } else if (field != V4L2_FIELD_NONE) { - dev_err(isp->dev, "Wrong output field\n"); - return -EINVAL; - } - - f->fmt.pix.field = field; - f->fmt.pix.width = clamp_t(u32, - rounddown(width, (u32)ATOM_ISP_STEP_WIDTH), - ATOM_ISP_MIN_WIDTH, ATOM_ISP_MAX_WIDTH); - f->fmt.pix.height = clamp_t(u32, rounddown(height, - (u32)ATOM_ISP_STEP_HEIGHT), - ATOM_ISP_MIN_HEIGHT, ATOM_ISP_MAX_HEIGHT); - f->fmt.pix.bytesperline = (width * depth) >> 3; - - return 0; -} - enum mipi_port_id __get_mipi_port(struct atomisp_device *isp, enum atomisp_camera_port port) { @@ -5171,7 +4739,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, int (*configure_pp_input)(struct atomisp_sub_device *asd, unsigned int width, unsigned int height) = configure_pp_input_nop; - u16 stream_index; const struct atomisp_in_fmt_conv *fc; int ret, i; @@ -5180,7 +4747,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, __func__, vdev->name); return -EINVAL; } - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); v4l2_fh_init(&fh.vfh, vdev); @@ -5200,7 +4766,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, dev_err(isp->dev, "mipi_info is NULL\n"); return -EINVAL; } - if (atomisp_set_sensor_mipi_to_isp(asd, stream_index, + if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL, mipi_info)) return -EINVAL; fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt( @@ -5284,7 +4850,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, /* ISP2401 new input system need to use copy pipe */ if (asd->copy_mode) { pipe_id = IA_CSS_PIPE_ID_COPY; - atomisp_css_capture_enable_online(asd, stream_index, false); + atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, false); } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { /* video same in continuouscapture and online modes */ configure_output = atomisp_css_video_configure_output; @@ -5316,7 +4882,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, pipe_id = IA_CSS_PIPE_ID_CAPTURE; atomisp_update_capture_mode(asd); - atomisp_css_capture_enable_online(asd, stream_index, false); + atomisp_css_capture_enable_online(asd, + ATOMISP_INPUT_STREAM_GENERAL, + false); } } } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { @@ -5341,7 +4909,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, if (!asd->continuous_mode->val) /* in case of ANR, force capture pipe to offline mode */ - atomisp_css_capture_enable_online(asd, stream_index, + atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, asd->params.low_light ? false : asd->params.online_process); @@ -5372,7 +4940,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, pipe_id = IA_CSS_PIPE_ID_YUVPP; if (asd->copy_mode) - ret = atomisp_css_copy_configure_output(asd, stream_index, + ret = atomisp_css_copy_configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, pix->width, pix->height, format->planar ? pix->bytesperline : pix->bytesperline * 8 / format->depth, @@ -5396,8 +4964,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, return -EINVAL; } if (asd->copy_mode) - ret = atomisp_css_copy_get_output_frame_info(asd, stream_index, - output_info); + ret = atomisp_css_copy_get_output_frame_info(asd, + ATOMISP_INPUT_STREAM_GENERAL, + output_info); else ret = get_frame_info(asd, output_info); if (ret) { @@ -5412,8 +4981,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, ia_css_frame_free(asd->raw_output_frame); asd->raw_output_frame = NULL; - if (!asd->continuous_mode->val && - !asd->params.online_process && !isp->sw_contex.file_input && + if (!asd->continuous_mode->val && !asd->params.online_process && ia_css_frame_allocate_from_info(&asd->raw_output_frame, raw_output_info)) return -ENOMEM; @@ -5462,12 +5030,7 @@ static void atomisp_check_copy_mode(struct atomisp_sub_device *asd, src = atomisp_subdev_get_ffmt(&asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE, source_pad); - if ((sink->code == src->code && - sink->width == f->width && - sink->height == f->height) || - ((asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) && - (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1))) + if (sink->code == src->code && sink->width == f->width && sink->height == f->height) asd->copy_mode = true; else asd->copy_mode = false; @@ -5495,7 +5058,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, struct atomisp_device *isp; struct atomisp_input_stream_info *stream_info = (struct atomisp_input_stream_info *)ffmt->reserved; - u16 stream_index = ATOMISP_INPUT_STREAM_GENERAL; int source_pad = atomisp_subdev_source_pad(vdev); struct v4l2_subdev_fh fh; int ret; @@ -5510,8 +5072,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, v4l2_fh_init(&fh.vfh, vdev); - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - format = atomisp_get_format_bridge(pixelformat); if (!format) return -EINVAL; @@ -5524,7 +5084,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, ffmt->width, ffmt->height, padding_w, padding_h, dvs_env_w, dvs_env_h); - __atomisp_init_stream_info(stream_index, stream_info); + __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info); req_ffmt = ffmt; @@ -5556,7 +5116,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, if (ret) return ret; - __atomisp_update_stream_env(asd, stream_index, stream_info); + __atomisp_update_stream_env(asd, ATOMISP_INPUT_STREAM_GENERAL, stream_info); dev_dbg(isp->dev, "sensor width: %d, height: %d\n", ffmt->width, ffmt->height); @@ -5580,8 +5140,9 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, return css_input_resolution_changed(asd, ffmt); } -int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) +int atomisp_set_fmt(struct file *file, void *unused, struct v4l2_format *f) { + struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); struct atomisp_sub_device *asd = pipe->asd; @@ -5604,20 +5165,13 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) struct v4l2_subdev_fh fh; int ret; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } + ret = atomisp_pipe_check(pipe, true); + if (ret) + return ret; if (source_pad >= ATOMISP_SUBDEV_PADS_NUM) return -EINVAL; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - dev_warn(isp->dev, "ISP does not support set format while at streaming!\n"); - return -EBUSY; - } - dev_dbg(isp->dev, "setting resolution %ux%u on pad %u for asd%d, bytesperline %u\n", f->fmt.pix.width, f->fmt.pix.height, source_pad, @@ -5699,58 +5253,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) f->fmt.pix.height = r.height; } - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) && - (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1)) { - /* For M10MO outputing YUV preview images. */ - u16 video_index = - atomisp_source_pad_to_stream_id(asd, - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO); - - ret = atomisp_css_copy_get_output_frame_info(asd, - video_index, &output_info); - if (ret) { - dev_err(isp->dev, - "copy_get_output_frame_info ret %i", ret); - return -EINVAL; - } - if (!asd->yuvpp_mode) { - /* - * If viewfinder was configured into copy_mode, - * we switch to using yuvpp pipe instead. - */ - asd->yuvpp_mode = true; - ret = atomisp_css_copy_configure_output( - asd, video_index, 0, 0, 0, 0); - if (ret) { - dev_err(isp->dev, - "failed to disable copy pipe"); - return -EINVAL; - } - ret = atomisp_css_yuvpp_configure_output( - asd, video_index, - output_info.res.width, - output_info.res.height, - output_info.padded_width, - output_info.format); - if (ret) { - dev_err(isp->dev, - "failed to set up yuvpp pipe\n"); - return -EINVAL; - } - atomisp_css_video_enable_online(asd, false); - atomisp_css_preview_enable_online(asd, - ATOMISP_INPUT_STREAM_GENERAL, false); - } - atomisp_css_yuvpp_configure_viewfinder(asd, video_index, - f->fmt.pix.width, f->fmt.pix.height, - format_bridge->planar ? f->fmt.pix.bytesperline - : f->fmt.pix.bytesperline * 8 - / format_bridge->depth, format_bridge->sh_fmt); - atomisp_css_yuvpp_get_viewfinder_frame_info( - asd, video_index, &output_info); - } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { + if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { atomisp_css_video_configure_viewfinder(asd, f->fmt.pix.width, f->fmt.pix.height, format_bridge->planar ? f->fmt.pix.bytesperline @@ -6078,55 +5581,6 @@ done: return 0; } -int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f) -{ - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct v4l2_mbus_framefmt ffmt = {0}; - const struct atomisp_format_bridge *format_bridge; - struct v4l2_subdev_fh fh; - int ret; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - v4l2_fh_init(&fh.vfh, vdev); - - dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n", - f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat); - ret = atomisp_try_fmt_file(isp, f); - if (ret) { - dev_err(isp->dev, "atomisp_try_fmt_file err: %d\n", ret); - return ret; - } - - format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); - if (!format_bridge) { - dev_dbg(isp->dev, "atomisp_get_format_bridge err! fmt:0x%x\n", - f->fmt.pix.pixelformat); - return -EINVAL; - } - - pipe->pix = f->fmt.pix; - atomisp_css_input_set_mode(asd, IA_CSS_INPUT_MODE_FIFO); - atomisp_css_input_configure_port(asd, - __get_mipi_port(isp, ATOMISP_CAMERA_PORT_PRIMARY), 2, 0xffff4, - 0, 0, 0, 0); - ffmt.width = f->fmt.pix.width; - ffmt.height = f->fmt.pix.height; - ffmt.code = format_bridge->mbus_code; - - atomisp_subdev_set_ffmt(&asd->subdev, fh.state, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &ffmt); - - return 0; -} - int atomisp_set_shading_table(struct atomisp_sub_device *asd, struct atomisp_shading_table *user_shading_table) { @@ -6275,6 +5729,8 @@ int atomisp_offline_capture_configure(struct atomisp_sub_device *asd, { struct v4l2_ctrl *c; + lockdep_assert_held(&asd->isp->mutex); + /* * In case of M10MO ZSL capture case, we need to issue a separate * capture request to M10MO which will output captured jpeg image @@ -6379,36 +5835,6 @@ int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames) return 0; } -int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd, - uint16_t source_pad) -{ - int stream_id; - struct atomisp_device *isp = asd->isp; - - if (isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num == 1) - return ATOMISP_INPUT_STREAM_GENERAL; - - switch (source_pad) { - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - stream_id = ATOMISP_INPUT_STREAM_CAPTURE; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - stream_id = ATOMISP_INPUT_STREAM_POSTVIEW; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: - stream_id = ATOMISP_INPUT_STREAM_VIDEO; - break; - default: - stream_id = ATOMISP_INPUT_STREAM_GENERAL; - } - - return stream_id; -} - bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe) { struct atomisp_sub_device *asd = pipe->asd; @@ -6459,7 +5885,7 @@ void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd) spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); } -int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) +static int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) { int *bitmap, bit; unsigned long flags; @@ -6549,6 +5975,8 @@ int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id) int value = *exp_id; int ret; + lockdep_assert_held(&isp->mutex); + ret = __is_raw_buffer_locked(asd, value); if (ret) { dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret); @@ -6570,6 +5998,8 @@ int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id) int value = *exp_id; int ret; + lockdep_assert_held(&isp->mutex); + ret = __clear_raw_buffer_bitmap(asd, value); if (ret) { dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret); @@ -6605,6 +6035,8 @@ int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event) if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) return -EINVAL; + lockdep_assert_held(&asd->isp->mutex); + dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n", __func__, *event); @@ -6675,19 +6107,6 @@ int atomisp_get_invalid_frame_num(struct video_device *vdev, struct ia_css_pipe_info p_info; int ret; - if (!asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - if (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - /* External ISP */ - *invalid_frame_num = 0; - return 0; - } - pipe_id = atomisp_get_pipe_id(pipe); if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) { dev_warn(asd->isp->dev, diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h index ebc729468f87..c9f92f1326b6 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h @@ -54,7 +54,6 @@ void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, unsigned int size); struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd); struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev); -struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev); int atomisp_reset(struct atomisp_device *isp); void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd); void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd); @@ -66,8 +65,7 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe); /* Interrupt functions */ void atomisp_msi_irq_init(struct atomisp_device *isp); void atomisp_msi_irq_uninit(struct atomisp_device *isp); -void atomisp_wdt_work(struct work_struct *work); -void atomisp_wdt(struct timer_list *t); +void atomisp_assert_recovery_work(struct work_struct *work); void atomisp_setup_flash(struct atomisp_sub_device *asd); irqreturn_t atomisp_isr(int irq, void *dev); irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr); @@ -268,8 +266,7 @@ int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd, int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, bool *res_overflow); -int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f); -int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f); +int atomisp_set_fmt(struct file *file, void *fh, struct v4l2_format *f); int atomisp_set_shading_table(struct atomisp_sub_device *asd, struct atomisp_shading_table *shading_table); @@ -300,8 +297,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, bool q_buffers, enum atomisp_input_stream_id stream_id); void atomisp_css_flush(struct atomisp_device *isp); -int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd, - uint16_t source_pad); /* Events. Only one event has to be exported for now. */ void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id); @@ -324,8 +319,6 @@ void atomisp_flush_params_queue(struct atomisp_video_pipe *asd); int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id); int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id); -/* Function to update Raw Buffer bitmap */ -int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id); void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd); /* Function to enable/disable zoom for capture pipe */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h index 3393ae6824f0..a6d85d0f9ae5 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat.h +++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h @@ -129,10 +129,6 @@ int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd); void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd); -void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd, - struct atomisp_css_buffer *isp_css_buffer, - struct ia_css_isp_dvs_statistics_map *dvs_map); - void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd, struct atomisp_css_event *current_event); @@ -434,17 +430,11 @@ void atomisp_css_get_morph_table(struct atomisp_sub_device *asd, void atomisp_css_morph_table_free(struct ia_css_morph_table *table); -void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, - unsigned int overlap); - int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, struct atomisp_dis_statistics *stats); int atomisp_css_update_stream(struct atomisp_sub_device *asd); -struct atomisp_acc_fw; -int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw); - int atomisp_css_isr_thread(struct atomisp_device *isp, bool *frame_done_found, bool *css_pipe_done); diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c index 5aa108a1724c..fdc05548d972 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c @@ -1427,7 +1427,6 @@ int atomisp_css_get_grid_info(struct atomisp_sub_device *asd, struct ia_css_pipe_info p_info; struct ia_css_grid_info old_info; struct atomisp_device *isp = asd->isp; - int stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. stream_config.metadata_config.resolution.width; @@ -1435,7 +1434,7 @@ int atomisp_css_get_grid_info(struct atomisp_sub_device *asd, memset(&old_info, 0, sizeof(struct ia_css_grid_info)); if (ia_css_pipe_get_info( - asd->stream_env[stream_index].pipes[pipe_id], + asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id], &p_info) != 0) { dev_err(isp->dev, "ia_css_pipe_get_info failed\n"); return -EINVAL; @@ -1574,20 +1573,6 @@ void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd) } } -void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd, - struct atomisp_css_buffer *isp_css_buffer, - struct ia_css_isp_dvs_statistics_map *dvs_map) -{ - if (asd->params.dvs_stat) { - if (dvs_map) - ia_css_translate_dvs2_statistics( - asd->params.dvs_stat, dvs_map); - else - ia_css_get_dvs2_statistics(asd->params.dvs_stat, - isp_css_buffer->css_buffer.data.stats_dvs); - } -} - void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd, struct atomisp_css_event *current_event) { @@ -2694,11 +2679,11 @@ int atomisp_get_css_frame_info(struct atomisp_sub_device *asd, struct atomisp_device *isp = asd->isp; if (ATOMISP_SOC_CAMERA(asd)) { - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); + stream_index = ATOMISP_INPUT_STREAM_GENERAL; } else { stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ? ATOMISP_INPUT_STREAM_VIDEO : - atomisp_source_pad_to_stream_id(asd, source_pad); + ATOMISP_INPUT_STREAM_GENERAL; } if (0 != ia_css_pipe_get_info(asd->stream_env[stream_index] @@ -3626,6 +3611,8 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, struct atomisp_dis_buf *dis_buf; unsigned long flags; + lockdep_assert_held(&isp->mutex); + if (!asd->params.dvs_stat->hor_prod.odd_real || !asd->params.dvs_stat->hor_prod.odd_imag || !asd->params.dvs_stat->hor_prod.even_real || @@ -3637,12 +3624,8 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, return -EINVAL; /* isp needs to be streaming to get DIS statistics */ - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) { - spin_unlock_irqrestore(&isp->lock, flags); + if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) return -EINVAL; - } - spin_unlock_irqrestore(&isp->lock, flags); if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0) /* If the grid info in the argument differs from the current @@ -3763,32 +3746,6 @@ void atomisp_css_morph_table_free(struct ia_css_morph_table *table) ia_css_morph_table_free(table); } -void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, - unsigned int overlap) -{ - /* CSS 2.0 doesn't support this API. */ - dev_dbg(isp->dev, "set cont prev start time is not supported.\n"); - return; -} - -/* Set the ACC binary arguments */ -int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw) -{ - unsigned int mem; - - for (mem = 0; mem < ATOMISP_ACC_NR_MEMORY; mem++) { - if (acc_fw->args[mem].length == 0) - continue; - - ia_css_isp_param_set_css_mem_init(&acc_fw->fw->mem_initializers, - IA_CSS_PARAM_CLASS_PARAM, mem, - acc_fw->args[mem].css_ptr, - acc_fw->args[mem].length); - } - - return 0; -} - static struct atomisp_sub_device *__get_atomisp_subdev( struct ia_css_pipe *css_pipe, struct atomisp_device *isp, @@ -3824,8 +3781,8 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, enum atomisp_input_stream_id stream_id = 0; struct atomisp_css_event current_event; struct atomisp_sub_device *asd; - bool reset_wdt_timer[MAX_STREAM_NUM] = {false}; - int i; + + lockdep_assert_held(&isp->mutex); while (!ia_css_dequeue_psys_event(¤t_event.event)) { if (current_event.event.type == @@ -3839,14 +3796,8 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, __func__, current_event.event.fw_assert_module_id, current_event.event.fw_assert_line_no); - for (i = 0; i < isp->num_of_streams; i++) - atomisp_wdt_stop(&isp->asd[i], 0); - - if (!IS_ISP2401) - atomisp_wdt(&isp->asd[0].wdt); - else - queue_work(isp->wdt_work_queue, &isp->wdt_work); + queue_work(system_long_wq, &isp->assert_recovery_work); return -EINVAL; } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) { dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n", @@ -3875,20 +3826,12 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, frame_done_found[asd->index] = true; atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, current_event.pipe, true, stream_id); - - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE: dev_dbg(isp->dev, "event: Second output frame done"); frame_done_found[asd->index] = true; atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME, current_event.pipe, true, stream_id); - - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE: dev_dbg(isp->dev, "event: 3A stats frame done"); @@ -3909,19 +3852,12 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, current_event.pipe, true, stream_id); - - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE: dev_dbg(isp->dev, "event: second VF output frame done"); atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME, current_event.pipe, true, stream_id); - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE: dev_dbg(isp->dev, "event: dis stats frame done"); @@ -3944,24 +3880,6 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, } } - if (IS_ISP2401) - return 0; - - /* ISP2400: If there are no buffers queued then delete wdt timer. */ - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (!asd) - continue; - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - if (!atomisp_buffers_queued(asd)) - atomisp_wdt_stop(asd, false); - else if (reset_wdt_timer[i]) - /* SOF irq should not reset wdt timer. */ - atomisp_wdt_refresh(asd, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } - return 0; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp_file.c deleted file mode 100644 index 4570a9ab100b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp_file.c +++ /dev/null @@ -1,229 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include <media/v4l2-event.h> -#include <media/v4l2-mediabus.h> - -#include <media/videobuf-vmalloc.h> -#include <linux/delay.h> - -#include "ia_css.h" - -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_file.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" - -static void file_work(struct work_struct *work) -{ - struct atomisp_file_device *file_dev = - container_of(work, struct atomisp_file_device, work); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - struct atomisp_video_pipe *out_pipe = &asd->video_in; - unsigned short *buf = videobuf_to_vmalloc(out_pipe->outq.bufs[0]); - struct v4l2_mbus_framefmt isp_sink_fmt; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - - dev_dbg(isp->dev, ">%s: ready to start streaming\n", __func__); - isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - while (!ia_css_isp_has_started()) - usleep_range(1000, 1500); - - ia_css_stream_send_input_frame(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - buf, isp_sink_fmt.width, - isp_sink_fmt.height); - dev_dbg(isp->dev, "<%s: streaming done\n", __func__); -} - -static int file_input_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - - dev_dbg(isp->dev, "%s: enable %d\n", __func__, enable); - if (enable) { - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return 0; - - queue_work(file_dev->work_queue, &file_dev->work); - return 0; - } - cancel_work_sync(&file_dev->work); - return 0; -} - -static int file_input_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - struct v4l2_mbus_framefmt *isp_sink_fmt; - - if (format->pad) - return -EINVAL; - isp_sink_fmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - fmt->width = isp_sink_fmt->width; - fmt->height = isp_sink_fmt->height; - fmt->code = isp_sink_fmt->code; - - return 0; -} - -static int file_input_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - - if (format->pad) - return -EINVAL; - file_input_get_fmt(sd, sd_state, format); - if (format->which == V4L2_SUBDEV_FORMAT_TRY) - sd_state->pads->try_fmt = *fmt; - return 0; -} - -static int file_input_log_status(struct v4l2_subdev *sd) -{ - /*to fake*/ - return 0; -} - -static int file_input_s_power(struct v4l2_subdev *sd, int on) -{ - /* to fake */ - return 0; -} - -static int file_input_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_mbus_code_enum *code) -{ - /*to fake*/ - return 0; -} - -static int file_input_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_frame_size_enum *fse) -{ - /*to fake*/ - return 0; -} - -static int file_input_enum_frame_ival(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_frame_interval_enum - *fie) -{ - /*to fake*/ - return 0; -} - -static const struct v4l2_subdev_video_ops file_input_video_ops = { - .s_stream = file_input_s_stream, -}; - -static const struct v4l2_subdev_core_ops file_input_core_ops = { - .log_status = file_input_log_status, - .s_power = file_input_s_power, -}; - -static const struct v4l2_subdev_pad_ops file_input_pad_ops = { - .enum_mbus_code = file_input_enum_mbus_code, - .enum_frame_size = file_input_enum_frame_size, - .enum_frame_interval = file_input_enum_frame_ival, - .get_fmt = file_input_get_fmt, - .set_fmt = file_input_set_fmt, -}; - -static const struct v4l2_subdev_ops file_input_ops = { - .core = &file_input_core_ops, - .video = &file_input_video_ops, - .pad = &file_input_pad_ops, -}; - -void -atomisp_file_input_unregister_entities(struct atomisp_file_device *file_dev) -{ - media_entity_cleanup(&file_dev->sd.entity); - v4l2_device_unregister_subdev(&file_dev->sd); -} - -int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev, - struct v4l2_device *vdev) -{ - /* Register the subdev and video nodes. */ - return v4l2_device_register_subdev(vdev, &file_dev->sd); -} - -void atomisp_file_input_cleanup(struct atomisp_device *isp) -{ - struct atomisp_file_device *file_dev = &isp->file_dev; - - if (file_dev->work_queue) { - destroy_workqueue(file_dev->work_queue); - file_dev->work_queue = NULL; - } -} - -int atomisp_file_input_init(struct atomisp_device *isp) -{ - struct atomisp_file_device *file_dev = &isp->file_dev; - struct v4l2_subdev *sd = &file_dev->sd; - struct media_pad *pads = file_dev->pads; - struct media_entity *me = &sd->entity; - - file_dev->isp = isp; - file_dev->work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1); - if (!file_dev->work_queue) { - dev_err(isp->dev, "Failed to initialize file inject workq\n"); - return -ENOMEM; - } - - INIT_WORK(&file_dev->work, file_work); - - v4l2_subdev_init(sd, &file_input_ops); - sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - strscpy(sd->name, "file_input_subdev", sizeof(sd->name)); - v4l2_set_subdevdata(sd, file_dev); - - pads[0].flags = MEDIA_PAD_FL_SINK; - me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; - - return media_entity_pads_init(me, 1, pads); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp_file.h deleted file mode 100644 index f166a2aefff1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp_file.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_FILE_H__ -#define __ATOMISP_FILE_H__ - -#include <media/media-entity.h> -#include <media/v4l2-subdev.h> - -struct atomisp_device; - -struct atomisp_file_device { - struct v4l2_subdev sd; - struct atomisp_device *isp; - struct media_pad pads[1]; - - struct workqueue_struct *work_queue; - struct work_struct work; -}; - -void atomisp_file_input_cleanup(struct atomisp_device *isp); -int atomisp_file_input_init(struct atomisp_device *isp); -void atomisp_file_input_unregister_entities( - struct atomisp_file_device *file_dev); -int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev, - struct v4l2_device *vdev); -#endif /* __ATOMISP_FILE_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c index 77150e4ae144..84a84e0cdeef 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c @@ -369,45 +369,6 @@ static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd, return IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME; } -static int atomisp_qbuffers_to_css_for_all_pipes(struct atomisp_sub_device *asd) -{ - enum ia_css_buffer_type buf_type; - enum ia_css_pipe_id css_capture_pipe_id = IA_CSS_PIPE_ID_COPY; - enum ia_css_pipe_id css_preview_pipe_id = IA_CSS_PIPE_ID_COPY; - enum ia_css_pipe_id css_video_pipe_id = IA_CSS_PIPE_ID_COPY; - enum atomisp_input_stream_id input_stream_id; - struct atomisp_video_pipe *capture_pipe; - struct atomisp_video_pipe *preview_pipe; - struct atomisp_video_pipe *video_pipe; - - capture_pipe = &asd->video_out_capture; - preview_pipe = &asd->video_out_preview; - video_pipe = &asd->video_out_video_capture; - - buf_type = atomisp_get_css_buf_type( - asd, css_preview_pipe_id, - atomisp_subdev_source_pad(&preview_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - atomisp_q_video_buffers_to_css(asd, preview_pipe, - input_stream_id, - buf_type, css_preview_pipe_id); - - buf_type = atomisp_get_css_buf_type(asd, css_capture_pipe_id, - atomisp_subdev_source_pad(&capture_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - atomisp_q_video_buffers_to_css(asd, capture_pipe, - input_stream_id, - buf_type, css_capture_pipe_id); - - buf_type = atomisp_get_css_buf_type(asd, css_video_pipe_id, - atomisp_subdev_source_pad(&video_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_VIDEO; - atomisp_q_video_buffers_to_css(asd, video_pipe, - input_stream_id, - buf_type, css_video_pipe_id); - return 0; -} - /* queue all available buffers to css */ int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd) { @@ -423,11 +384,6 @@ int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd) bool raw_mode = atomisp_is_mbuscode_raw( asd->fmt[asd->capture_pad].fmt.code); - if (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num == 2 && - !asd->yuvpp_mode) - return atomisp_qbuffers_to_css_for_all_pipes(asd); - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { video_pipe = &asd->video_out_video_capture; css_video_pipe_id = IA_CSS_PIPE_ID_VIDEO; @@ -593,47 +549,6 @@ static void atomisp_buf_release(struct videobuf_queue *vq, atomisp_videobuf_free_buf(vb); } -static int atomisp_buf_setup_output(struct videobuf_queue *vq, - unsigned int *count, unsigned int *size) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - *size = pipe->pix.sizeimage; - - return 0; -} - -static int atomisp_buf_prepare_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb, - enum v4l2_field field) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - vb->size = pipe->pix.sizeimage; - vb->width = pipe->pix.width; - vb->height = pipe->pix.height; - vb->field = field; - vb->state = VIDEOBUF_PREPARED; - - return 0; -} - -static void atomisp_buf_queue_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - list_add_tail(&vb->queue, &pipe->activeq_out); - vb->state = VIDEOBUF_QUEUED; -} - -static void atomisp_buf_release_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - videobuf_vmalloc_free(vb); - vb->state = VIDEOBUF_NEEDS_INIT; -} - static const struct videobuf_queue_ops videobuf_qops = { .buf_setup = atomisp_buf_setup, .buf_prepare = atomisp_buf_prepare, @@ -641,13 +556,6 @@ static const struct videobuf_queue_ops videobuf_qops = { .buf_release = atomisp_buf_release, }; -static const struct videobuf_queue_ops videobuf_qops_output = { - .buf_setup = atomisp_buf_setup_output, - .buf_prepare = atomisp_buf_prepare_output, - .buf_queue = atomisp_buf_queue_output, - .buf_release = atomisp_buf_release_output, -}; - static int atomisp_init_pipe(struct atomisp_video_pipe *pipe) { /* init locks */ @@ -660,15 +568,7 @@ static int atomisp_init_pipe(struct atomisp_video_pipe *pipe) sizeof(struct atomisp_buffer), pipe, NULL); /* ext_lock: NULL */ - videobuf_queue_vmalloc_init(&pipe->outq, &videobuf_qops_output, NULL, - &pipe->irq_lock, - V4L2_BUF_TYPE_VIDEO_OUTPUT, - V4L2_FIELD_NONE, - sizeof(struct atomisp_buffer), pipe, - NULL); /* ext_lock: NULL */ - INIT_LIST_HEAD(&pipe->activeq); - INIT_LIST_HEAD(&pipe->activeq_out); INIT_LIST_HEAD(&pipe->buffers_waiting_for_param); INIT_LIST_HEAD(&pipe->per_frame_params); memset(pipe->frame_request_config_id, 0, @@ -684,7 +584,6 @@ static void atomisp_dev_init_struct(struct atomisp_device *isp) { unsigned int i; - isp->sw_contex.file_input = false; isp->need_gfx_throttle = true; isp->isp_fatal_error = false; isp->mipi_frame_size = 0; @@ -741,9 +640,7 @@ static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd) return asd->video_out_preview.users + asd->video_out_vf.users + asd->video_out_capture.users + - asd->video_out_video_capture.users + - asd->video_acc.users + - asd->video_in.users; + asd->video_out_video_capture.users; } unsigned int atomisp_dev_users(struct atomisp_device *isp) @@ -760,48 +657,18 @@ static int atomisp_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = NULL; - struct atomisp_acc_pipe *acc_pipe = NULL; - struct atomisp_sub_device *asd; - bool acc_node = false; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); + struct atomisp_sub_device *asd = pipe->asd; int ret; dev_dbg(isp->dev, "open device %s\n", vdev->name); - /* - * Ensure that if we are still loading we block. Once the loading - * is over we can proceed. We can't blindly hold the lock until - * that occurs as if the load fails we'll deadlock the unload - */ - rt_mutex_lock(&isp->loading); - /* - * FIXME: revisit this with a better check once the code structure - * is cleaned up a bit more - */ ret = v4l2_fh_open(file); - if (ret) { - dev_err(isp->dev, - "%s: v4l2_fh_open() returned error %d\n", - __func__, ret); - rt_mutex_unlock(&isp->loading); + if (ret) return ret; - } - if (!isp->ready) { - rt_mutex_unlock(&isp->loading); - return -ENXIO; - } - rt_mutex_unlock(&isp->loading); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) { - acc_pipe = atomisp_to_acc_pipe(vdev); - asd = acc_pipe->asd; - } else { - pipe = atomisp_to_video_pipe(vdev); - asd = pipe->asd; - } asd->subdev.devnode = vdev; /* Deferred firmware loading case. */ if (isp->css_env.isp_css_fw.bytes == 0) { @@ -823,14 +690,6 @@ static int atomisp_open(struct file *file) isp->css_env.isp_css_fw.data = NULL; } - if (acc_node && acc_pipe->users) { - dev_dbg(isp->dev, "acc node already opened\n"); - rt_mutex_unlock(&isp->mutex); - return -EBUSY; - } else if (acc_node) { - goto dev_init; - } - if (!isp->input_cnt) { dev_err(isp->dev, "no camera attached\n"); ret = -EINVAL; @@ -842,7 +701,7 @@ static int atomisp_open(struct file *file) */ if (pipe->users) { dev_dbg(isp->dev, "video node already opened\n"); - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return -EBUSY; } @@ -850,7 +709,6 @@ static int atomisp_open(struct file *file) if (ret) goto error; -dev_init: if (atomisp_dev_users(isp)) { dev_dbg(isp->dev, "skip init isp in open\n"); goto init_subdev; @@ -885,16 +743,11 @@ init_subdev: atomisp_subdev_init_struct(asd); done: - - if (acc_node) - acc_pipe->users++; - else - pipe->users++; - rt_mutex_unlock(&isp->mutex); + pipe->users++; + mutex_unlock(&isp->mutex); /* Ensure that a mode is set */ - if (!acc_node) - v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode); + v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode); return 0; @@ -902,7 +755,8 @@ css_error: atomisp_css_uninit(isp); pm_runtime_put(vdev->v4l2_dev->dev); error: - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); + v4l2_fh_release(file); return ret; } @@ -910,13 +764,12 @@ static int atomisp_release(struct file *file) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe; - struct atomisp_acc_pipe *acc_pipe; - struct atomisp_sub_device *asd; - bool acc_node; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); + struct atomisp_sub_device *asd = pipe->asd; struct v4l2_requestbuffers req; struct v4l2_subdev_fh fh; struct v4l2_rect clear_compose = {0}; + unsigned long flags; int ret = 0; v4l2_fh_init(&fh.vfh, vdev); @@ -925,23 +778,12 @@ static int atomisp_release(struct file *file) if (!isp) return -EBADF; - mutex_lock(&isp->streamoff_mutex); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); dev_dbg(isp->dev, "release device %s\n", vdev->name); - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) { - acc_pipe = atomisp_to_acc_pipe(vdev); - asd = acc_pipe->asd; - } else { - pipe = atomisp_to_video_pipe(vdev); - asd = pipe->asd; - } + asd->subdev.devnode = vdev; - if (acc_node) { - acc_pipe->users--; - goto subdev_uninit; - } + pipe->users--; if (pipe->capq.streaming) @@ -950,27 +792,19 @@ static int atomisp_release(struct file *file) __func__); if (pipe->capq.streaming && - __atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) { - dev_err(isp->dev, - "atomisp_streamoff failed on release, driver bug"); + atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) { + dev_err(isp->dev, "atomisp_streamoff failed on release, driver bug"); goto done; } if (pipe->users) goto done; - if (__atomisp_reqbufs(file, NULL, &req)) { - dev_err(isp->dev, - "atomisp_reqbufs failed on release, driver bug"); + if (atomisp_reqbufs(file, NULL, &req)) { + dev_err(isp->dev, "atomisp_reqbufs failed on release, driver bug"); goto done; } - if (pipe->outq.bufs[0]) { - mutex_lock(&pipe->outq.vb_lock); - videobuf_queue_cancel(&pipe->outq); - mutex_unlock(&pipe->outq.vb_lock); - } - /* * A little trick here: * file injection input resolution is recorded in the sink pad, @@ -978,26 +812,17 @@ static int atomisp_release(struct file *file) * The sink pad setting can only be cleared when all device nodes * get released. */ - if (!isp->sw_contex.file_input && asd->fmt_auto->val) { + if (asd->fmt_auto->val) { struct v4l2_mbus_framefmt isp_sink_fmt = { 0 }; atomisp_subdev_set_ffmt(&asd->subdev, fh.state, V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt); } -subdev_uninit: + if (atomisp_subdev_users(asd)) goto done; - /* clear the sink pad for file input */ - if (isp->sw_contex.file_input && asd->fmt_auto->val) { - struct v4l2_mbus_framefmt isp_sink_fmt = { 0 }; - - atomisp_subdev_set_ffmt(&asd->subdev, fh.state, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt); - } - atomisp_css_free_stat_buffers(asd); atomisp_free_internal_buffers(asd); ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, @@ -1007,7 +832,9 @@ subdev_uninit: /* clear the asd field to show this camera is not used */ isp->inputs[asd->input_curr].asd = NULL; + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; + spin_unlock_irqrestore(&isp->lock, flags); if (atomisp_dev_users(isp)) goto done; @@ -1029,15 +856,12 @@ subdev_uninit: dev_err(isp->dev, "Failed to power off device\n"); done: - if (!acc_node) { - atomisp_subdev_set_selection(&asd->subdev, fh.state, - V4L2_SUBDEV_FORMAT_ACTIVE, - atomisp_subdev_source_pad(vdev), - V4L2_SEL_TGT_COMPOSE, 0, - &clear_compose); - } - rt_mutex_unlock(&isp->mutex); - mutex_unlock(&isp->streamoff_mutex); + atomisp_subdev_set_selection(&asd->subdev, fh.state, + V4L2_SUBDEV_FORMAT_ACTIVE, + atomisp_subdev_source_pad(vdev), + V4L2_SEL_TGT_COMPOSE, 0, + &clear_compose); + mutex_unlock(&isp->mutex); return v4l2_fh_release(file); } @@ -1194,7 +1018,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & (VM_WRITE | VM_READ))) return -EACCES; - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (!(vma->vm_flags & VM_SHARED)) { /* Map private buffer. @@ -1205,7 +1029,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) */ vma->vm_flags |= VM_SHARED; ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT); - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return ret; } @@ -1248,7 +1072,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) } raw_virt_addr->data_bytes = origin_size; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return 0; } @@ -1260,24 +1084,16 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) ret = -EINVAL; goto error; } - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return atomisp_videobuf_mmap_mapper(&pipe->capq, vma); error: - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return ret; } -static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_mmap_mapper(&pipe->outq, vma); -} - static __poll_t atomisp_poll(struct file *file, struct poll_table_struct *pt) { @@ -1285,12 +1101,12 @@ static __poll_t atomisp_poll(struct file *file, struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (pipe->capq.streaming != 1) { - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return EPOLLERR; } - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return videobuf_poll_stream(file, &pipe->capq, pt); } @@ -1310,15 +1126,3 @@ const struct v4l2_file_operations atomisp_fops = { #endif .poll = atomisp_poll, }; - -const struct v4l2_file_operations atomisp_file_fops = { - .owner = THIS_MODULE, - .open = atomisp_open, - .release = atomisp_release, - .mmap = atomisp_file_mmap, - .unlocked_ioctl = video_ioctl2, -#ifdef CONFIG_COMPAT - /* .compat_ioctl32 = atomisp_compat_ioctl32, */ -#endif - .poll = atomisp_poll, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index bf527b366ab3..3d41fab661cf 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -134,24 +134,6 @@ static DEFINE_MUTEX(vcm_lock); static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev); -/* - * Legacy/stub behavior copied from upstream platform_camera.c. The - * atomisp driver relies on these values being non-NULL in a few - * places, even though they are hard-coded in all current - * implementations. - */ -const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void) -{ - static const struct atomisp_camera_caps caps = { - .sensor_num = 1, - .sensor = { - { .stream_num = 1, }, - }, - }; - return ∩︀ -} -EXPORT_SYMBOL_GPL(atomisp_get_default_camera_caps); - const struct atomisp_platform_data *atomisp_get_platform_data(void) { return &pdata; @@ -1066,6 +1048,38 @@ static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on) return ret; } +static int camera_sensor_csi_alloc(struct v4l2_subdev *sd, u32 port, u32 lanes, + u32 format, u32 bayer_order) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct camera_mipi_info *csi; + + csi = kzalloc(sizeof(*csi), GFP_KERNEL); + if (!csi) + return -ENOMEM; + + csi->port = port; + csi->num_lanes = lanes; + csi->input_format = format; + csi->raw_bayer_order = bayer_order; + v4l2_set_subdev_hostdata(sd, csi); + csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED; + csi->metadata_effective_width = NULL; + dev_info(&client->dev, + "camera pdata: port: %d lanes: %d order: %8.8x\n", + port, lanes, bayer_order); + + return 0; +} + +static void camera_sensor_csi_free(struct v4l2_subdev *sd) +{ + struct camera_mipi_info *csi; + + csi = v4l2_get_subdev_hostdata(sd); + kfree(csi); +} + static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag) { struct i2c_client *client = v4l2_get_subdevdata(sd); @@ -1074,8 +1088,11 @@ static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag) if (!client || !gs) return -ENODEV; - return camera_sensor_csi(sd, gs->csi_port, gs->csi_lanes, - gs->csi_fmt, gs->csi_bayer, flag); + if (flag) + return camera_sensor_csi_alloc(sd, gs->csi_port, gs->csi_lanes, + gs->csi_fmt, gs->csi_bayer); + camera_sensor_csi_free(sd); + return 0; } static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev, @@ -1207,16 +1224,14 @@ static int gmin_get_config_dsm_var(struct device *dev, if (!strcmp(var, "CamClk")) return -EINVAL; - obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL); + /* Return on unexpected object type */ + obj = acpi_evaluate_dsm_typed(handle, &atomisp_dsm_guid, 0, 0, NULL, + ACPI_TYPE_PACKAGE); if (!obj) { dev_info_once(dev, "Didn't find ACPI _DSM table.\n"); return -EINVAL; } - /* Return on unexpected object type */ - if (obj->type != ACPI_TYPE_PACKAGE) - return -EINVAL; - #if 0 /* Just for debugging purposes */ for (i = 0; i < obj->package.count; i++) { union acpi_object *cur = &obj->package.elements[i]; @@ -1360,35 +1375,6 @@ int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def) } EXPORT_SYMBOL_GPL(gmin_get_var_int); -int camera_sensor_csi(struct v4l2_subdev *sd, u32 port, - u32 lanes, u32 format, u32 bayer_order, int flag) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *csi = NULL; - - if (flag) { - csi = kzalloc(sizeof(*csi), GFP_KERNEL); - if (!csi) - return -ENOMEM; - csi->port = port; - csi->num_lanes = lanes; - csi->input_format = format; - csi->raw_bayer_order = bayer_order; - v4l2_set_subdev_hostdata(sd, (void *)csi); - csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED; - csi->metadata_effective_width = NULL; - dev_info(&client->dev, - "camera pdata: port: %d lanes: %d order: %8.8x\n", - port, lanes, bayer_order); - } else { - csi = v4l2_get_subdev_hostdata(sd); - kfree(csi); - } - - return 0; -} -EXPORT_SYMBOL_GPL(camera_sensor_csi); - /* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't * work. Disable so the kernel framework doesn't hang the device * trying. The driver itself does direct calls to the PUNIT to manage diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h index f71ab1ee6e19..d9d158cdf09e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h @@ -34,7 +34,6 @@ #include "sh_css_legacy.h" #include "atomisp_csi2.h" -#include "atomisp_file.h" #include "atomisp_subdev.h" #include "atomisp_tpg.h" #include "atomisp_compat.h" @@ -86,13 +85,12 @@ #define ATOM_ISP_POWER_DOWN 0 #define ATOM_ISP_POWER_UP 1 -#define ATOM_ISP_MAX_INPUTS 4 +#define ATOM_ISP_MAX_INPUTS 3 #define ATOMISP_SC_TYPE_SIZE 2 #define ATOMISP_ISP_TIMEOUT_DURATION (2 * HZ) #define ATOMISP_EXT_ISP_TIMEOUT_DURATION (6 * HZ) -#define ATOMISP_ISP_FILE_TIMEOUT_DURATION (60 * HZ) #define ATOMISP_WDT_KEEP_CURRENT_DELAY 0 #define ATOMISP_ISP_MAX_TIMEOUT_COUNT 2 #define ATOMISP_CSS_STOP_TIMEOUT_US 200000 @@ -107,9 +105,6 @@ #define ATOMISP_DELAYED_INIT_QUEUED 1 #define ATOMISP_DELAYED_INIT_DONE 2 -#define ATOMISP_CALC_CSS_PREV_OVERLAP(lines) \ - ((lines) * 38 / 100 & 0xfffffe) - /* * Define how fast CPU should be able to serve ISP interrupts. * The bigger the value, the higher risk that the ISP is not @@ -132,9 +127,7 @@ * Moorefield/Baytrail platform. */ #define ATOMISP_SOC_CAMERA(asd) \ - (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA \ - && asd->isp->inputs[asd->input_curr].camera_caps-> \ - sensor[asd->sensor_curr].stream_num == 1) + (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) #define ATOMISP_USE_YUVPP(asd) \ (ATOMISP_SOC_CAMERA(asd) && ATOMISP_CSS_SUPPORT_YUVPP && \ @@ -167,7 +160,6 @@ struct atomisp_input_subdev { */ struct atomisp_sub_device *asd; - const struct atomisp_camera_caps *camera_caps; int sensor_index; }; @@ -203,7 +195,6 @@ struct atomisp_regs { }; struct atomisp_sw_contex { - bool file_input; int power_state; int running_freq; }; @@ -241,24 +232,10 @@ struct atomisp_device { struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS]; struct atomisp_tpg_device tpg; - struct atomisp_file_device file_dev; /* Purpose of mutex is to protect and serialize use of isp data * structures and css API calls. */ - struct rt_mutex mutex; - /* - * This mutex ensures that we don't allow an open to succeed while - * the initialization process is incomplete - */ - struct rt_mutex loading; - /* Set once the ISP is ready to allow opens */ - bool ready; - /* - * Serialise streamoff: mutex is dropped during streamoff to - * cancel the watchdog queue. MUST be acquired BEFORE - * "mutex". - */ - struct mutex streamoff_mutex; + struct mutex mutex; unsigned int input_cnt; struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; @@ -272,15 +249,9 @@ struct atomisp_device { /* isp timeout status flag */ bool isp_timeout; bool isp_fatal_error; - struct workqueue_struct *wdt_work_queue; - struct work_struct wdt_work; - - /* ISP2400 */ - atomic_t wdt_count; - - atomic_t wdt_work_queued; + struct work_struct assert_recovery_work; - spinlock_t lock; /* Just for streaming below */ + spinlock_t lock; /* Protects asd[i].streaming */ bool need_gfx_throttle; @@ -296,20 +267,4 @@ struct atomisp_device { extern struct device *atomisp_dev; -#define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt) - -/* ISP2401 */ -void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, - unsigned int delay); -void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay); - -/* ISP2400 */ -void atomisp_wdt_start(struct atomisp_sub_device *asd); - -/* ISP2401 */ -void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe); -void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync); - -void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync); - #endif /* __ATOMISP_INTERNAL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 459645c2e2a7..0ddb0ed42dd9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -535,6 +535,32 @@ atomisp_get_format_bridge_from_mbus(u32 mbus_code) return NULL; } +int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool settings_change) +{ + lockdep_assert_held(&pipe->isp->mutex); + + if (pipe->isp->isp_fatal_error) + return -EIO; + + switch (pipe->asd->streaming) { + case ATOMISP_DEVICE_STREAMING_DISABLED: + break; + case ATOMISP_DEVICE_STREAMING_ENABLED: + if (settings_change) { + dev_err(pipe->isp->dev, "Set fmt/input IOCTL while streaming\n"); + return -EBUSY; + } + break; + case ATOMISP_DEVICE_STREAMING_STOPPING: + dev_err(pipe->isp->dev, "IOCTL issued while stopping\n"); + return -EBUSY; + default: + return -EINVAL; + } + + return 0; +} + /* * v4l2 ioctls * return ISP capabilities @@ -609,8 +635,7 @@ atomisp_subdev_streaming_count(struct atomisp_sub_device *asd) return asd->video_out_preview.capq.streaming + asd->video_out_capture.capq.streaming + asd->video_out_video_capture.capq.streaming - + asd->video_out_vf.capq.streaming - + asd->video_in.capq.streaming; + + asd->video_out_vf.capq.streaming; } unsigned int atomisp_streaming_count(struct atomisp_device *isp) @@ -630,19 +655,9 @@ unsigned int atomisp_streaming_count(struct atomisp_device *isp) static int atomisp_g_input(struct file *file, void *fh, unsigned int *input) { struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); *input = asd->input_curr; - rt_mutex_unlock(&isp->mutex); - return 0; } @@ -653,22 +668,19 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); + struct atomisp_sub_device *asd = pipe->asd; struct v4l2_subdev *camera = NULL; struct v4l2_subdev *motor; int ret; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } + ret = atomisp_pipe_check(pipe, true); + if (ret) + return ret; - rt_mutex_lock(&isp->mutex); if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) { dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt); - ret = -EINVAL; - goto error; + return -EINVAL; } /* @@ -680,22 +692,13 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) dev_err(isp->dev, "%s, camera is already used by stream: %d\n", __func__, isp->inputs[input].asd->index); - ret = -EBUSY; - goto error; + return -EBUSY; } camera = isp->inputs[input].camera; if (!camera) { dev_err(isp->dev, "%s, no camera\n", __func__); - ret = -EINVAL; - goto error; - } - - if (atomisp_subdev_streaming_count(asd)) { - dev_err(isp->dev, - "ISP is still streaming, stop first\n"); - ret = -EINVAL; - goto error; + return -EINVAL; } /* power off the current owned sensor, as it is not used this time */ @@ -714,7 +717,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1); if (ret) { dev_err(isp->dev, "Failed to power-on sensor\n"); - goto error; + return ret; } /* * Some sensor driver resets the run mode during power-on, thus force @@ -727,7 +730,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) 0, isp->inputs[input].sensor_index, 0); if (ret && (ret != -ENOIOCTLCMD)) { dev_err(isp->dev, "Failed to select sensor\n"); - goto error; + return ret; } if (!IS_ISP2401) { @@ -738,20 +741,14 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) ret = v4l2_subdev_call(motor, core, s_power, 1); } - if (!isp->sw_contex.file_input && motor) + if (motor) ret = v4l2_subdev_call(motor, core, init, 1); asd->input_curr = input; /* mark this camera is used by the current stream */ isp->inputs[input].asd = asd; - rt_mutex_unlock(&isp->mutex); return 0; - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; } static int atomisp_enum_framesizes(struct file *file, void *priv, @@ -819,12 +816,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, unsigned int i, fi = 0; int rval; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - camera = isp->inputs[asd->input_curr].camera; if(!camera) { dev_err(isp->dev, "%s(): camera is NULL, device is %s\n", @@ -832,15 +823,12 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, return -EINVAL; } - rt_mutex_lock(&isp->mutex); - rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code); if (rval == -ENOIOCTLCMD) { dev_warn(isp->dev, "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n", camera->name); } - rt_mutex_unlock(&isp->mutex); if (rval) return rval; @@ -872,20 +860,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, return -EINVAL; } -static int atomisp_g_fmt_file(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - rt_mutex_lock(&isp->mutex); - f->fmt.pix = pipe->pix; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - static int atomisp_adjust_fmt(struct v4l2_format *f) { const struct atomisp_format_bridge *format_bridge; @@ -957,13 +931,16 @@ static int atomisp_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); int ret; - rt_mutex_lock(&isp->mutex); - ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL); - rt_mutex_unlock(&isp->mutex); + /* + * atomisp_try_fmt() gived results with padding included, note + * (this gets removed again by the atomisp_adjust_fmt() call below. + */ + f->fmt.pix.width += pad_w; + f->fmt.pix.height += pad_h; + ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL); if (ret) return ret; @@ -974,12 +951,9 @@ static int atomisp_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe; - rt_mutex_lock(&isp->mutex); pipe = atomisp_to_video_pipe(vdev); - rt_mutex_unlock(&isp->mutex); f->fmt.pix = pipe->pix; @@ -994,37 +968,6 @@ static int atomisp_g_fmt_cap(struct file *file, void *fh, return atomisp_try_fmt_cap(file, fh, f); } -static int atomisp_s_fmt_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - rt_mutex_unlock(&isp->mutex); - return ret; - } - ret = atomisp_set_fmt(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_s_fmt_file(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = atomisp_set_fmt_file(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - /* * Free videobuffer buffer priv data */ @@ -1160,8 +1103,7 @@ error: /* * Initiate Memory Mapping or User Pointer I/O */ -int __atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req) +int atomisp_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req) { struct video_device *vdev = video_devdata(file); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); @@ -1170,16 +1112,8 @@ int __atomisp_reqbufs(struct file *file, void *fh, struct ia_css_frame *frame; struct videobuf_vmalloc_memory *vm_mem; u16 source_pad = atomisp_subdev_source_pad(vdev); - u16 stream_id; int ret = 0, i = 0; - if (!asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); - if (req->count == 0) { mutex_lock(&pipe->capq.vb_lock); if (!list_empty(&pipe->capq.stream)) @@ -1200,7 +1134,7 @@ int __atomisp_reqbufs(struct file *file, void *fh, if (ret) return ret; - atomisp_alloc_css_stat_bufs(asd, stream_id); + atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL); /* * for user pointer type, buffers are not really allocated here, @@ -1238,36 +1172,6 @@ error: return -ENOMEM; } -int atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = __atomisp_reqbufs(file, fh, req); - rt_mutex_unlock(&isp->mutex); - - return ret; -} - -static int atomisp_reqbufs_file(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - if (req->count == 0) { - mutex_lock(&pipe->outq.vb_lock); - atomisp_videobuf_free_queue(&pipe->outq); - mutex_unlock(&pipe->outq.vb_lock); - return 0; - } - - return videobuf_reqbufs(&pipe->outq, req); -} - /* application query the status of a buffer */ static int atomisp_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) @@ -1278,15 +1182,6 @@ static int atomisp_querybuf(struct file *file, void *fh, return videobuf_querybuf(&pipe->capq, buf); } -static int atomisp_querybuf_file(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_querybuf(&pipe->outq, buf); -} - /* * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or * filled (output) buffer in the drivers incoming queue. @@ -1305,32 +1200,16 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct ia_css_frame *handle = NULL; u32 length; u32 pgnr; - int ret = 0; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto error; - } + int ret; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - dev_err(isp->dev, "%s: reject, as ISP at stopping.\n", - __func__); - ret = -EIO; - goto error; - } + ret = atomisp_pipe_check(pipe, false); + if (ret) + return ret; if (!buf || buf->index >= VIDEO_MAX_FRAME || !pipe->capq.bufs[buf->index]) { dev_err(isp->dev, "Invalid index for qbuf.\n"); - ret = -EINVAL; - goto error; + return -EINVAL; } /* @@ -1338,12 +1217,15 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) * address and reprograme out page table properly */ if (buf->memory == V4L2_MEMORY_USERPTR) { + if (offset_in_page(buf->m.userptr)) { + dev_err(isp->dev, "Error userptr is not page aligned.\n"); + return -EINVAL; + } + vb = pipe->capq.bufs[buf->index]; vm_mem = vb->priv; - if (!vm_mem) { - ret = -EINVAL; - goto error; - } + if (!vm_mem) + return -EINVAL; length = vb->bsize; pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT; @@ -1352,17 +1234,15 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) goto done; if (atomisp_get_css_frame_info(asd, - atomisp_subdev_source_pad(vdev), &frame_info)) { - ret = -EIO; - goto error; - } + atomisp_subdev_source_pad(vdev), &frame_info)) + return -EIO; ret = ia_css_frame_map(&handle, &frame_info, (void __user *)buf->m.userptr, pgnr); if (ret) { dev_err(isp->dev, "Failed to map user buffer\n"); - goto error; + return ret; } if (vm_mem->vaddr) { @@ -1406,12 +1286,11 @@ done: pipe->frame_params[buf->index] = NULL; - rt_mutex_unlock(&isp->mutex); - + mutex_unlock(&isp->mutex); ret = videobuf_qbuf(&pipe->capq, buf); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (ret) - goto error; + return ret; /* TODO: do this better, not best way to queue to css */ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { @@ -1419,15 +1298,6 @@ done: atomisp_handle_parameter_and_buffer(pipe); } else { atomisp_qbuffers_to_css(asd); - - if (!IS_ISP2401) { - if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd)) - atomisp_wdt_start(asd); - } else { - if (!atomisp_is_wdt_running(pipe) && - atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_start_pipe(pipe); - } } } @@ -1449,58 +1319,11 @@ done: asd->pending_capture_request++; dev_dbg(isp->dev, "Add one pending capture request.\n"); } - rt_mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "qbuf buffer %d (%s) for asd%d\n", buf->index, vdev->name, asd->index); - return ret; - -error: - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_qbuf_file(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto error; - } - - if (!buf || buf->index >= VIDEO_MAX_FRAME || - !pipe->outq.bufs[buf->index]) { - dev_err(isp->dev, "Invalid index for qbuf.\n"); - ret = -EINVAL; - goto error; - } - - if (buf->memory != V4L2_MEMORY_MMAP) { - dev_err(isp->dev, "Unsupported memory method\n"); - ret = -EINVAL; - goto error; - } - - if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { - dev_err(isp->dev, "Unsupported buffer type\n"); - ret = -EINVAL; - goto error; - } - rt_mutex_unlock(&isp->mutex); - - return videobuf_qbuf(&pipe->outq, buf); - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; + return 0; } static int __get_frame_exp_id(struct atomisp_video_pipe *pipe, @@ -1529,37 +1352,21 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); struct atomisp_sub_device *asd = pipe->asd; struct atomisp_device *isp = video_get_drvdata(vdev); - int ret = 0; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - - if (isp->isp_fatal_error) { - rt_mutex_unlock(&isp->mutex); - return -EIO; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - rt_mutex_unlock(&isp->mutex); - dev_err(isp->dev, "%s: reject, as ISP at stopping.\n", - __func__); - return -EIO; - } + int ret; - rt_mutex_unlock(&isp->mutex); + ret = atomisp_pipe_check(pipe, false); + if (ret) + return ret; + mutex_unlock(&isp->mutex); ret = videobuf_dqbuf(&pipe->capq, buf, file->f_flags & O_NONBLOCK); + mutex_lock(&isp->mutex); if (ret) { if (ret != -EAGAIN) dev_dbg(isp->dev, "<%s: %d\n", __func__, ret); return ret; } - rt_mutex_lock(&isp->mutex); + buf->bytesused = pipe->pix.sizeimage; buf->reserved = asd->frame_status[buf->index]; @@ -1573,7 +1380,6 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if (!(buf->flags & V4L2_BUF_FLAG_ERROR)) buf->reserved |= __get_frame_exp_id(pipe, buf) << 16; buf->reserved2 = pipe->frame_config_id[buf->index]; - rt_mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n", @@ -1622,16 +1428,6 @@ enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd) static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd) { - struct atomisp_device *isp = asd->isp; - - if (isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - if (asd->high_speed_mode) - return 1; - else - return 2; - } - if (asd->vfpp->val != ATOMISP_VFPP_ENABLE || asd->copy_mode) return 1; @@ -1650,31 +1446,15 @@ static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd) int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, bool isp_timeout) { - unsigned int master = -1, slave = -1, delay_slave = 0; - int i, ret; - - /* - * ISP only support 2 streams now so ignore multiple master/slave - * case to reduce the delay between 2 stream_on calls. - */ - for (i = 0; i < isp->num_of_streams; i++) { - int sensor_index = isp->asd[i].input_curr; - - if (isp->inputs[sensor_index].camera_caps-> - sensor[isp->asd[i].sensor_curr].is_slave) - slave = sensor_index; - else - master = sensor_index; - } + unsigned int master, slave, delay_slave = 0; + int ret; - if (master == -1 || slave == -1) { - master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR; - slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR; - dev_warn(isp->dev, - "depth mode use default master=%s.slave=%s.\n", - isp->inputs[master].camera->name, - isp->inputs[slave].camera->name); - } + master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR; + slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR; + dev_warn(isp->dev, + "depth mode use default master=%s.slave=%s.\n", + isp->inputs[master].camera->name, + isp->inputs[slave].camera->name); ret = v4l2_subdev_call(isp->inputs[master].camera, core, ioctl, ATOMISP_IOC_G_DEPTH_SYNC_COMP, @@ -1708,51 +1488,6 @@ int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, return 0; } -/* FIXME! ISP2400 */ -static void __wdt_on_master_slave_sensor(struct atomisp_device *isp, - unsigned int wdt_duration) -{ - if (atomisp_buffers_queued(&isp->asd[0])) - atomisp_wdt_refresh(&isp->asd[0], wdt_duration); - if (atomisp_buffers_queued(&isp->asd[1])) - atomisp_wdt_refresh(&isp->asd[1], wdt_duration); -} - -/* FIXME! ISP2401 */ -static void __wdt_on_master_slave_sensor_pipe(struct atomisp_video_pipe *pipe, - unsigned int wdt_duration, - bool enable) -{ - static struct atomisp_video_pipe *pipe0; - - if (enable) { - if (atomisp_buffers_queued_pipe(pipe0)) - atomisp_wdt_refresh_pipe(pipe0, wdt_duration); - if (atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_refresh_pipe(pipe, wdt_duration); - } else { - pipe0 = pipe; - } -} - -static void atomisp_pause_buffer_event(struct atomisp_device *isp) -{ - struct v4l2_event event = {0}; - int i; - - event.type = V4L2_EVENT_ATOMISP_PAUSE_BUFFER; - - for (i = 0; i < isp->num_of_streams; i++) { - int sensor_index = isp->asd[i].input_curr; - - if (isp->inputs[sensor_index].camera_caps-> - sensor[isp->asd[i].sensor_curr].is_slave) { - v4l2_event_queue(isp->asd[i].subdev.devnode, &event); - break; - } - } -} - /* Input system HW workaround */ /* Input system address translation corrupts burst during */ /* invalidate. SW workaround for this is to set burst length */ @@ -1784,15 +1519,8 @@ static int atomisp_streamon(struct file *file, void *fh, struct pci_dev *pdev = to_pci_dev(isp->dev); enum ia_css_pipe_id css_pipe_id; unsigned int sensor_start_stream; - unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION; - int ret = 0; unsigned long irqflags; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } + int ret; dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); @@ -1802,19 +1530,12 @@ static int atomisp_streamon(struct file *file, void *fh, return -EINVAL; } - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto out; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - ret = -EBUSY; - goto out; - } + ret = atomisp_pipe_check(pipe, false); + if (ret) + return ret; if (pipe->capq.streaming) - goto out; + return 0; /* Input system HW workaround */ atomisp_dma_burst_len_cfg(asd); @@ -1829,20 +1550,18 @@ static int atomisp_streamon(struct file *file, void *fh, if (list_empty(&pipe->capq.stream)) { spin_unlock_irqrestore(&pipe->irq_lock, irqflags); dev_dbg(isp->dev, "no buffer in the queue\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } spin_unlock_irqrestore(&pipe->irq_lock, irqflags); ret = videobuf_streamon(&pipe->capq); if (ret) - goto out; + return ret; /* Reset pending capture request count. */ asd->pending_capture_request = 0; - if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) && - (!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) { + if (atomisp_subdev_streaming_count(asd) > sensor_start_stream) { /* trigger still capture */ if (asd->continuous_mode->val && atomisp_subdev_source_pad(vdev) @@ -1856,11 +1575,11 @@ static int atomisp_streamon(struct file *file, void *fh, if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) { flush_work(&asd->delayed_init_work); - rt_mutex_unlock(&isp->mutex); - if (wait_for_completion_interruptible( - &asd->init_done) != 0) + mutex_unlock(&isp->mutex); + ret = wait_for_completion_interruptible(&asd->init_done); + mutex_lock(&isp->mutex); + if (ret != 0) return -ERESTARTSYS; - rt_mutex_lock(&isp->mutex); } /* handle per_frame_setting parameter and buffers */ @@ -1882,16 +1601,12 @@ static int atomisp_streamon(struct file *file, void *fh, asd->params.offline_parm.num_captures, asd->params.offline_parm.skip_frames, asd->params.offline_parm.offset); - if (ret) { - ret = -EINVAL; - goto out; - } - if (asd->depth_mode->val) - atomisp_pause_buffer_event(isp); + if (ret) + return -EINVAL; } } atomisp_qbuffers_to_css(asd); - goto out; + return 0; } if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { @@ -1917,14 +1632,14 @@ static int atomisp_streamon(struct file *file, void *fh, ret = atomisp_css_start(asd, css_pipe_id, false); if (ret) - goto out; + return ret; + spin_lock_irqsave(&isp->lock, irqflags); asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED; + spin_unlock_irqrestore(&isp->lock, irqflags); atomic_set(&asd->sof_count, -1); atomic_set(&asd->sequence, -1); atomic_set(&asd->sequence_temp, -1); - if (isp->sw_contex.file_input) - wdt_duration = ATOMISP_ISP_FILE_TIMEOUT_DURATION; asd->params.dis_proj_data_valid = false; asd->latest_preview_exp_id = 0; @@ -1938,7 +1653,7 @@ static int atomisp_streamon(struct file *file, void *fh, /* Only start sensor when the last streaming instance started */ if (atomisp_subdev_streaming_count(asd) < sensor_start_stream) - goto out; + return 0; start_sensor: if (isp->flash) { @@ -1947,26 +1662,21 @@ start_sensor: atomisp_setup_flash(asd); } - if (!isp->sw_contex.file_input) { - atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, - atomisp_css_valid_sof(isp)); - atomisp_csi2_configure(asd); - /* - * set freq to max when streaming count > 1 which indicate - * dual camera would run - */ - if (atomisp_streaming_count(isp) > 1) { - if (atomisp_freq_scaling(isp, - ATOMISP_DFS_MODE_MAX, false) < 0) - dev_dbg(isp->dev, "DFS max mode failed!\n"); - } else { - if (atomisp_freq_scaling(isp, - ATOMISP_DFS_MODE_AUTO, false) < 0) - dev_dbg(isp->dev, "DFS auto mode failed!\n"); - } - } else { - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false) < 0) + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, + atomisp_css_valid_sof(isp)); + atomisp_csi2_configure(asd); + /* + * set freq to max when streaming count > 1 which indicate + * dual camera would run + */ + if (atomisp_streaming_count(isp) > 1) { + if (atomisp_freq_scaling(isp, + ATOMISP_DFS_MODE_MAX, false) < 0) dev_dbg(isp->dev, "DFS max mode failed!\n"); + } else { + if (atomisp_freq_scaling(isp, + ATOMISP_DFS_MODE_AUTO, false) < 0) + dev_dbg(isp->dev, "DFS auto mode failed!\n"); } if (asd->depth_mode->val && atomisp_streaming_count(isp) == @@ -1974,17 +1684,11 @@ start_sensor: ret = atomisp_stream_on_master_slave_sensor(isp, false); if (ret) { dev_err(isp->dev, "master slave sensor stream on failed!\n"); - goto out; + return ret; } - if (!IS_ISP2401) - __wdt_on_master_slave_sensor(isp, wdt_duration); - else - __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, true); goto start_delay_wq; } else if (asd->depth_mode->val && (atomisp_streaming_count(isp) < ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) { - if (IS_ISP2401) - __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, false); goto start_delay_wq; } @@ -1999,41 +1703,29 @@ start_sensor: ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, video, s_stream, 1); if (ret) { + spin_lock_irqsave(&isp->lock, irqflags); asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; - ret = -EINVAL; - goto out; - } - - if (!IS_ISP2401) { - if (atomisp_buffers_queued(asd)) - atomisp_wdt_refresh(asd, wdt_duration); - } else { - if (atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_refresh_pipe(pipe, wdt_duration); + spin_unlock_irqrestore(&isp->lock, irqflags); + return -EINVAL; } start_delay_wq: if (asd->continuous_mode->val) { - struct v4l2_mbus_framefmt *sink; - - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); + atomisp_subdev_get_ffmt(&asd->subdev, NULL, + V4L2_SUBDEV_FORMAT_ACTIVE, + ATOMISP_SUBDEV_PAD_SINK); reinit_completion(&asd->init_done); asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED; queue_work(asd->delayed_init_workq, &asd->delayed_init_work); - atomisp_css_set_cont_prev_start_time(isp, - ATOMISP_CALC_CSS_PREV_OVERLAP(sink->height)); } else { asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; } -out: - rt_mutex_unlock(&isp->mutex); - return ret; + + return 0; } -int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) +int atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); @@ -2050,17 +1742,10 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) unsigned long flags; bool first_streamoff = false; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); lockdep_assert_held(&isp->mutex); - lockdep_assert_held(&isp->streamoff_mutex); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_dbg(isp->dev, "unsupported v4l2 buf type\n"); @@ -2071,17 +1756,10 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) * do only videobuf_streamoff for capture & vf pipes in * case of continuous capture */ - if ((asd->continuous_mode->val || - isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) && - atomisp_subdev_source_pad(vdev) != - ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - atomisp_subdev_source_pad(vdev) != - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { - if (isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) { - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - } else if (atomisp_subdev_source_pad(vdev) - == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { + if (asd->continuous_mode->val && + atomisp_subdev_source_pad(vdev) != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && + atomisp_subdev_source_pad(vdev) != ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { + if (atomisp_subdev_source_pad(vdev) == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { /* stop continuous still capture if needed */ if (asd->params.offline_parm.num_captures == -1) atomisp_css_offline_capture_configure(asd, @@ -2118,32 +1796,14 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) if (!pipe->capq.streaming) return 0; - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; + if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) first_streamoff = true; - } - spin_unlock_irqrestore(&isp->lock, flags); - - if (first_streamoff) { - /* if other streams are running, should not disable watch dog */ - rt_mutex_unlock(&isp->mutex); - atomisp_wdt_stop(asd, true); - - /* - * must stop sending pixels into GP_FIFO before stop - * the pipeline. - */ - if (isp->sw_contex.file_input) - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - - rt_mutex_lock(&isp->mutex); - } spin_lock_irqsave(&isp->lock, flags); if (atomisp_subdev_streaming_count(asd) == 1) asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; + else + asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; spin_unlock_irqrestore(&isp->lock, flags); if (!first_streamoff) { @@ -2154,19 +1814,16 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) } atomisp_clear_css_buffer_counters(asd); - - if (!isp->sw_contex.file_input) - atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, - false); + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) { cancel_work_sync(&asd->delayed_init_work); asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; } - if (first_streamoff) { - css_pipe_id = atomisp_get_css_pipe_id(asd); - atomisp_css_stop(asd, css_pipe_id, false); - } + + css_pipe_id = atomisp_get_css_pipe_id(asd); + atomisp_css_stop(asd, css_pipe_id, false); + /* cancel work queue*/ if (asd->video_out_capture.users) { capture_pipe = &asd->video_out_capture; @@ -2210,9 +1867,8 @@ stopsensor: != atomisp_sensor_start_stream(asd)) return 0; - if (!isp->sw_contex.file_input) - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); + ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, + video, s_stream, 0); if (isp->flash) { asd->params.num_flash_frames = 0; @@ -2284,22 +1940,6 @@ stopsensor: return ret; } -static int atomisp_streamoff(struct file *file, void *fh, - enum v4l2_buf_type type) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int rval; - - mutex_lock(&isp->streamoff_mutex); - rt_mutex_lock(&isp->mutex); - rval = __atomisp_streamoff(file, fh, type); - rt_mutex_unlock(&isp->mutex); - mutex_unlock(&isp->streamoff_mutex); - - return rval; -} - /* * To get the current value of a control. * applications initialize the id field of a struct v4l2_control and @@ -2313,12 +1953,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2329,8 +1963,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, if (ret) return ret; - rt_mutex_lock(&isp->mutex); - switch (control->id) { case V4L2_CID_IRIS_ABSOLUTE: case V4L2_CID_EXPOSURE_ABSOLUTE: @@ -2352,7 +1984,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, case V4L2_CID_TEST_PATTERN_COLOR_GR: case V4L2_CID_TEST_PATTERN_COLOR_GB: case V4L2_CID_TEST_PATTERN_COLOR_B: - rt_mutex_unlock(&isp->mutex); return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera-> ctrl_handler, control); case V4L2_CID_COLORFX: @@ -2381,7 +2012,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, break; } - rt_mutex_unlock(&isp->mutex); return ret; } @@ -2398,12 +2028,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2414,7 +2038,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, if (ret) return ret; - rt_mutex_lock(&isp->mutex); switch (control->id) { case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: case V4L2_CID_EXPOSURE: @@ -2435,7 +2058,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, case V4L2_CID_TEST_PATTERN_COLOR_GR: case V4L2_CID_TEST_PATTERN_COLOR_GB: case V4L2_CID_TEST_PATTERN_COLOR_B: - rt_mutex_unlock(&isp->mutex); return v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera-> ctrl_handler, control); @@ -2467,7 +2089,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, ret = -EINVAL; break; } - rt_mutex_unlock(&isp->mutex); return ret; } @@ -2485,12 +2106,6 @@ static int atomisp_queryctl(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - switch (qc->id) { case V4L2_CID_FOCUS_ABSOLUTE: case V4L2_CID_FOCUS_RELATIVE: @@ -2536,12 +2151,6 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2592,9 +2201,7 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, &ctrl); break; case V4L2_CID_ZOOM_ABSOLUTE: - rt_mutex_lock(&isp->mutex); ret = atomisp_digital_zoom(asd, 0, &ctrl.value); - rt_mutex_unlock(&isp->mutex); break; case V4L2_CID_G_SKIP_FRAMES: ret = v4l2_subdev_call( @@ -2653,12 +2260,6 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2707,7 +2308,6 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, case V4L2_CID_FLASH_STROBE: case V4L2_CID_FLASH_MODE: case V4L2_CID_FLASH_STATUS_REGISTER: - rt_mutex_lock(&isp->mutex); if (isp->flash) { ret = v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, @@ -2722,12 +2322,9 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, asd->params.num_flash_frames = 0; } } - rt_mutex_unlock(&isp->mutex); break; case V4L2_CID_ZOOM_ABSOLUTE: - rt_mutex_lock(&isp->mutex); ret = atomisp_digital_zoom(asd, 1, &ctrl.value); - rt_mutex_unlock(&isp->mutex); break; default: ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id); @@ -2784,20 +2381,12 @@ static int atomisp_g_parm(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; } - rt_mutex_lock(&isp->mutex); parm->parm.capture.capturemode = asd->run_mode->val; - rt_mutex_unlock(&isp->mutex); return 0; } @@ -2812,19 +2401,11 @@ static int atomisp_s_parm(struct file *file, void *fh, int rval; int fps; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; } - rt_mutex_lock(&isp->mutex); - asd->high_speed_mode = false; switch (parm->parm.capture.capturemode) { case CI_MODE_NONE: { @@ -2843,7 +2424,7 @@ static int atomisp_s_parm(struct file *file, void *fh, asd->high_speed_mode = true; } - goto out; + return rval == -ENOIOCTLCMD ? 0 : rval; } case CI_MODE_VIDEO: mode = ATOMISP_RUN_MODE_VIDEO; @@ -2858,76 +2439,29 @@ static int atomisp_s_parm(struct file *file, void *fh, mode = ATOMISP_RUN_MODE_PREVIEW; break; default: - rval = -EINVAL; - goto out; + return -EINVAL; } rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode); -out: - rt_mutex_unlock(&isp->mutex); - return rval == -ENOIOCTLCMD ? 0 : rval; } -static int atomisp_s_parm_file(struct file *file, void *fh, - struct v4l2_streamparm *parm) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - - if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { - dev_err(isp->dev, "unsupported v4l2 buf type for output\n"); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - isp->sw_contex.file_input = true; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - static long atomisp_vidioc_default(struct file *file, void *fh, bool valid_prio, unsigned int cmd, void *arg) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd; + struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct v4l2_subdev *motor; - bool acc_node; int err; - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) - asd = atomisp_to_acc_pipe(vdev)->asd; - else - asd = atomisp_to_video_pipe(vdev)->asd; - if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else motor = isp->motor; switch (cmd) { - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: - case ATOMISP_IOC_S_SENSOR_EE_CONFIG: - case ATOMISP_IOC_G_UPDATE_EXPOSURE: - /* we do not need take isp->mutex for these IOCTLs */ - break; - default: - rt_mutex_lock(&isp->mutex); - break; - } - switch (cmd) { case ATOMISP_IOC_S_SENSOR_RUNMODE: if (IS_ISP2401) err = atomisp_set_sensor_runmode(asd, arg); @@ -3173,22 +2707,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh, break; } - switch (cmd) { - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: - case ATOMISP_IOC_G_UPDATE_EXPOSURE: - break; - default: - rt_mutex_unlock(&isp->mutex); - break; - } return err; } @@ -3207,7 +2725,7 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = { .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap, .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap, .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap, - .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap, + .vidioc_s_fmt_vid_cap = atomisp_set_fmt, .vidioc_reqbufs = atomisp_reqbufs, .vidioc_querybuf = atomisp_querybuf, .vidioc_qbuf = atomisp_qbuf, @@ -3218,13 +2736,3 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = { .vidioc_s_parm = atomisp_s_parm, .vidioc_g_parm = atomisp_g_parm, }; - -const struct v4l2_ioctl_ops atomisp_file_ioctl_ops = { - .vidioc_querycap = atomisp_querycap, - .vidioc_g_fmt_vid_out = atomisp_g_fmt_file, - .vidioc_s_fmt_vid_out = atomisp_s_fmt_file, - .vidioc_s_parm = atomisp_s_parm_file, - .vidioc_reqbufs = atomisp_reqbufs_file, - .vidioc_querybuf = atomisp_querybuf_file, - .vidioc_qbuf = atomisp_qbuf_file, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h index d85e0d697a4e..c660f631d371 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h @@ -34,27 +34,21 @@ atomisp_format_bridge *atomisp_get_format_bridge(unsigned int pixelformat); const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32 mbus_code); +int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool streaming_ok); + int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd, uint16_t stream_id); -int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); -int __atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req); - -int atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req); +int atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); +int atomisp_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req); enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd); void atomisp_videobuf_free_buf(struct videobuf_buffer *vb); -extern const struct v4l2_file_operations atomisp_file_fops; - extern const struct v4l2_ioctl_ops atomisp_ioctl_ops; -extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops; - unsigned int atomisp_streaming_count(struct atomisp_device *isp); /* compat_ioctl for 32bit userland app and 64bit kernel */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c index 394fe6959033..847dfee6ad78 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c @@ -373,16 +373,12 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd, struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); struct atomisp_device *isp = isp_sd->isp; struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM]; - u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode); struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM], *comp[ATOMISP_SUBDEV_PADS_NUM]; - enum atomisp_input_stream_id stream_id; unsigned int i; unsigned int padding_w = pad_w; unsigned int padding_h = pad_h; - stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad); - isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp); dev_dbg(isp->dev, @@ -478,9 +474,10 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd, dvs_w = dvs_h = 0; } atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h); - atomisp_css_input_set_effective_resolution(isp_sd, stream_id, - crop[pad]->width, crop[pad]->height); - + atomisp_css_input_set_effective_resolution(isp_sd, + ATOMISP_INPUT_STREAM_GENERAL, + crop[pad]->width, + crop[pad]->height); break; } case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: @@ -523,14 +520,14 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd, if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height < crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height) atomisp_css_input_set_effective_resolution(isp_sd, - stream_id, + ATOMISP_INPUT_STREAM_GENERAL, rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]-> height * r->width / r->height, ATOM_ISP_STEP_WIDTH), crop[ATOMISP_SUBDEV_PAD_SINK]->height); else atomisp_css_input_set_effective_resolution(isp_sd, - stream_id, + ATOMISP_INPUT_STREAM_GENERAL, crop[ATOMISP_SUBDEV_PAD_SINK]->width, rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]-> width * r->height / r->width, @@ -620,16 +617,12 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd, struct atomisp_device *isp = isp_sd->isp; struct v4l2_mbus_framefmt *__ffmt = atomisp_subdev_get_ffmt(sd, sd_state, which, pad); - u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode); - enum atomisp_input_stream_id stream_id; dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n", atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code, which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY" : "V4L2_SUBDEV_FORMAT_ACTIVE"); - stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad); - switch (pad) { case ATOMISP_SUBDEV_PAD_SINK: { const struct atomisp_in_fmt_conv *fc = @@ -649,15 +642,15 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd, if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { atomisp_css_input_set_resolution(isp_sd, - stream_id, ffmt); + ATOMISP_INPUT_STREAM_GENERAL, ffmt); atomisp_css_input_set_binning_factor(isp_sd, - stream_id, + ATOMISP_INPUT_STREAM_GENERAL, atomisp_get_sensor_bin_factor(isp_sd)); - atomisp_css_input_set_bayer_order(isp_sd, stream_id, + atomisp_css_input_set_bayer_order(isp_sd, ATOMISP_INPUT_STREAM_GENERAL, fc->bayer_order); - atomisp_css_input_set_format(isp_sd, stream_id, + atomisp_css_input_set_format(isp_sd, ATOMISP_INPUT_STREAM_GENERAL, fc->atomisp_in_fmt); - atomisp_css_set_default_isys_config(isp_sd, stream_id, + atomisp_css_set_default_isys_config(isp_sd, ATOMISP_INPUT_STREAM_GENERAL, ffmt); } @@ -874,12 +867,18 @@ static int s_ctrl(struct v4l2_ctrl *ctrl) { struct atomisp_sub_device *asd = container_of( ctrl->handler, struct atomisp_sub_device, ctrl_handler); + unsigned int streaming; + unsigned long flags; switch (ctrl->id) { case V4L2_CID_RUN_MODE: return __atomisp_update_run_mode(asd); case V4L2_CID_DEPTH_MODE: - if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) { + /* Use spinlock instead of mutex to avoid possible locking issues */ + spin_lock_irqsave(&asd->isp->lock, flags); + streaming = asd->streaming; + spin_unlock_irqrestore(&asd->isp->lock, flags); + if (streaming != ATOMISP_DEVICE_STREAMING_DISABLED) { dev_err(asd->isp->dev, "ISP is streaming, it is not supported to change the depth mode\n"); return -EINVAL; @@ -1066,7 +1065,6 @@ static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd, pipe->isp = asd->isp; spin_lock_init(&pipe->irq_lock); INIT_LIST_HEAD(&pipe->activeq); - INIT_LIST_HEAD(&pipe->activeq_out); INIT_LIST_HEAD(&pipe->buffers_waiting_for_param); INIT_LIST_HEAD(&pipe->per_frame_params); memset(pipe->frame_request_config_id, @@ -1076,13 +1074,6 @@ static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd, sizeof(struct atomisp_css_params_with_list *)); } -static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_pipe *pipe) -{ - pipe->asd = asd; - pipe->isp = asd->isp; -} - /* * isp_subdev_init_entities - Initialize V4L2 subdev and media entity * @asd: ISP CCDC module @@ -1126,9 +1117,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) if (ret < 0) return ret; - atomisp_init_subdev_pipe(asd, &asd->video_in, - V4L2_BUF_TYPE_VIDEO_OUTPUT); - atomisp_init_subdev_pipe(asd, &asd->video_out_preview, V4L2_BUF_TYPE_VIDEO_CAPTURE); @@ -1141,13 +1129,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture, V4L2_BUF_TYPE_VIDEO_CAPTURE); - atomisp_init_acc_pipe(asd, &asd->video_acc); - - ret = atomisp_video_init(&asd->video_in, "MEMORY", - ATOMISP_RUN_MODE_SDV); - if (ret < 0) - return ret; - ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE", ATOMISP_RUN_MODE_STILL_CAPTURE); if (ret < 0) @@ -1168,8 +1149,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) if (ret < 0) return ret; - atomisp_acc_init(&asd->video_acc, "ACC"); - ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1); if (ret) return ret; @@ -1226,7 +1205,11 @@ int atomisp_create_pads_links(struct atomisp_device *isp) return ret; } } - for (i = 0; i < isp->input_cnt - 2; i++) { + for (i = 0; i < isp->input_cnt; i++) { + /* Don't create links for the test-pattern-generator */ + if (isp->inputs[i].type == TEST_PATTERN) + continue; + ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0, &isp->csi2_port[isp->inputs[i]. port].subdev.entity, @@ -1262,17 +1245,6 @@ int atomisp_create_pads_links(struct atomisp_device *isp) entity, 0, 0); if (ret < 0) return ret; - /* - * file input only supported on subdev0 - * so do not create pad link for subdevs other then subdev0 - */ - if (asd->index) - return 0; - ret = media_create_pad_link(&asd->video_in.vdev.entity, - 0, &asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SINK, 0); - if (ret < 0) - return ret; } return 0; } @@ -1302,87 +1274,55 @@ void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd) { atomisp_subdev_cleanup_entities(asd); v4l2_device_unregister_subdev(&asd->subdev); - atomisp_video_unregister(&asd->video_in); atomisp_video_unregister(&asd->video_out_preview); atomisp_video_unregister(&asd->video_out_vf); atomisp_video_unregister(&asd->video_out_capture); atomisp_video_unregister(&asd->video_out_video_capture); - atomisp_acc_unregister(&asd->video_acc); } -int atomisp_subdev_register_entities(struct atomisp_sub_device *asd, - struct v4l2_device *vdev) +int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd, + struct v4l2_device *vdev) +{ + return v4l2_device_register_subdev(vdev, &asd->subdev); +} + +int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd, + struct v4l2_device *vdev) { int ret; - u32 device_caps; /* * FIXME: check if all device caps are properly initialized. - * Should any of those use V4L2_CAP_META_OUTPUT? Probably yes. + * Should any of those use V4L2_CAP_META_CAPTURE? Probably yes. */ - device_caps = V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_STREAMING; - - /* Register the subdev and video node. */ - - ret = v4l2_device_register_subdev(vdev, &asd->subdev); - if (ret < 0) - goto error; - asd->video_out_preview.vdev.v4l2_dev = vdev; - asd->video_out_preview.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_preview.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_preview.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; asd->video_out_capture.vdev.v4l2_dev = vdev; - asd->video_out_capture.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_capture.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; asd->video_out_vf.vdev.v4l2_dev = vdev; - asd->video_out_vf.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_vf.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_vf.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; asd->video_out_video_capture.vdev.v4l2_dev = vdev; - asd->video_out_video_capture.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_video_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_video_capture.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; - asd->video_acc.vdev.v4l2_dev = vdev; - asd->video_acc.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; - ret = video_register_device(&asd->video_acc.vdev, - VFL_TYPE_VIDEO, -1); - if (ret < 0) - goto error; - - /* - * file input only supported on subdev0 - * so do not create video node for subdevs other then subdev0 - */ - if (asd->index) - return 0; - - asd->video_in.vdev.v4l2_dev = vdev; - asd->video_in.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_CAPTURE; - ret = video_register_device(&asd->video_in.vdev, - VFL_TYPE_VIDEO, -1); - if (ret < 0) - goto error; return 0; @@ -1415,7 +1355,6 @@ int atomisp_subdev_init(struct atomisp_device *isp) return -ENOMEM; for (i = 0; i < isp->num_of_streams; i++) { asd = &isp->asd[i]; - spin_lock_init(&asd->lock); asd->isp = isp; isp_subdev_init_params(asd); asd->index = i; diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h index 798a93793a9a..a1f4da35235d 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h @@ -70,9 +70,7 @@ struct atomisp_video_pipe { enum v4l2_buf_type type; struct media_pad pad; struct videobuf_queue capq; - struct videobuf_queue outq; struct list_head activeq; - struct list_head activeq_out; /* * the buffers waiting for per-frame parameters, this is only valid * in per-frame setting mode. @@ -86,9 +84,10 @@ struct atomisp_video_pipe { unsigned int buffers_in_css; - /* irq_lock is used to protect video buffer state change operations and - * also to make activeq, activeq_out, capq and outq list - * operations atomic. */ + /* + * irq_lock is used to protect video buffer state change operations and + * also to make activeq and capq operations atomic. + */ spinlock_t irq_lock; unsigned int users; @@ -109,23 +108,6 @@ struct atomisp_video_pipe { */ unsigned int frame_request_config_id[VIDEO_MAX_FRAME]; struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME]; - - /* - * move wdt from asd struct to create wdt for each pipe - */ - /* ISP2401 */ - struct timer_list wdt; - unsigned int wdt_duration; /* in jiffies */ - unsigned long wdt_expires; - atomic_t wdt_count; -}; - -struct atomisp_acc_pipe { - struct video_device vdev; - unsigned int users; - bool running; - struct atomisp_sub_device *asd; - struct atomisp_device *isp; }; struct atomisp_pad_format { @@ -267,28 +249,6 @@ struct atomisp_css_params_with_list { struct list_head list; }; -struct atomisp_acc_fw { - struct ia_css_fw_info *fw; - unsigned int handle; - unsigned int flags; - unsigned int type; - struct { - size_t length; - unsigned long css_ptr; - } args[ATOMISP_ACC_NR_MEMORY]; - struct list_head list; -}; - -struct atomisp_map { - ia_css_ptr ptr; - size_t length; - struct list_head list; - /* FIXME: should keep book which maps are currently used - * by binaries and not allow releasing those - * which are in use. Implement by reference counting. - */ -}; - struct atomisp_sub_device { struct v4l2_subdev subdev; struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM]; @@ -297,15 +257,12 @@ struct atomisp_sub_device { enum atomisp_subdev_input_entity input; unsigned int output; - struct atomisp_video_pipe video_in; struct atomisp_video_pipe video_out_capture; /* capture output */ struct atomisp_video_pipe video_out_vf; /* viewfinder output */ struct atomisp_video_pipe video_out_preview; /* preview output */ - struct atomisp_acc_pipe video_acc; /* video pipe main output */ struct atomisp_video_pipe video_out_video_capture; /* struct isp_subdev_params params; */ - spinlock_t lock; struct atomisp_device *isp; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *fmt_auto; @@ -356,15 +313,16 @@ struct atomisp_sub_device { /* This field specifies which camera (v4l2 input) is selected. */ int input_curr; - /* This field specifies which sensor is being selected when there - are multiple sensors connected to the same MIPI port. */ - int sensor_curr; atomic_t sof_count; atomic_t sequence; /* Sequence value that is assigned to buffer. */ atomic_t sequence_temp; - unsigned int streaming; /* Hold both mutex and lock to change this */ + /* + * Writers of streaming must hold both isp->mutex and isp->lock. + * Readers of streaming need to hold only one of the two locks. + */ + unsigned int streaming; bool stream_prepared; /* whether css stream is created */ /* subdev index: will be used to show which subdev is holding the @@ -390,11 +348,6 @@ struct atomisp_sub_device { int raw_buffer_locked_count; spinlock_t raw_buffer_bitmap_lock; - /* ISP 2400 */ - struct timer_list wdt; - unsigned int wdt_duration; /* in jiffies */ - unsigned long wdt_expires; - /* ISP2401 */ bool re_trigger_capture; @@ -450,8 +403,10 @@ int atomisp_update_run_mode(struct atomisp_sub_device *asd); void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd); void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd); -int atomisp_subdev_register_entities(struct atomisp_sub_device *asd, - struct v4l2_device *vdev); +int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd, + struct v4l2_device *vdev); +int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd, + struct v4l2_device *vdev); int atomisp_subdev_init(struct atomisp_device *isp); void atomisp_subdev_cleanup(struct atomisp_device *isp); int atomisp_create_pads_links(struct atomisp_device *isp); diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index 643ba981601b..d5bb9906ca6f 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -34,7 +34,6 @@ #include "atomisp_cmd.h" #include "atomisp_common.h" #include "atomisp_fops.h" -#include "atomisp_file.h" #include "atomisp_ioctl.h" #include "atomisp_internal.h" #include "atomisp-regs.h" @@ -442,12 +441,7 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, video->pad.flags = MEDIA_PAD_FL_SINK; video->vdev.fops = &atomisp_fops; video->vdev.ioctl_ops = &atomisp_ioctl_ops; - break; - case V4L2_BUF_TYPE_VIDEO_OUTPUT: - direction = "input"; - video->pad.flags = MEDIA_PAD_FL_SOURCE; - video->vdev.fops = &atomisp_file_fops; - video->vdev.ioctl_ops = &atomisp_file_ioctl_ops; + video->vdev.lock = &video->isp->mutex; break; default: return -EINVAL; @@ -467,18 +461,6 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, return 0; } -void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name) -{ - video->vdev.fops = &atomisp_fops; - video->vdev.ioctl_ops = &atomisp_ioctl_ops; - - /* Initialize the video device. */ - snprintf(video->vdev.name, sizeof(video->vdev.name), - "ATOMISP ISP %s", name); - video->vdev.release = video_device_release_empty; - video_set_drvdata(&video->vdev, video->isp); -} - void atomisp_video_unregister(struct atomisp_video_pipe *video) { if (video_is_registered(&video->vdev)) { @@ -487,12 +469,6 @@ void atomisp_video_unregister(struct atomisp_video_pipe *video) } } -void atomisp_acc_unregister(struct atomisp_acc_pipe *video) -{ - if (video_is_registered(&video->vdev)) - video_unregister_device(&video->vdev); -} - static int atomisp_save_iunit_reg(struct atomisp_device *isp) { struct pci_dev *pdev = to_pci_dev(isp->dev); @@ -1031,7 +1007,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp) &subdevs->v4l2_subdev.board_info; struct i2c_adapter *adapter = i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id); - int sensor_num, i; dev_info(isp->dev, "Probing Subdev %s\n", board_info->type); @@ -1090,22 +1065,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp) * pixel_format. */ isp->inputs[isp->input_cnt].frame_size.pixel_format = 0; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - sensor_num = isp->inputs[isp->input_cnt] - .camera_caps->sensor_num; isp->input_cnt++; - for (i = 1; i < sensor_num; i++) { - if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) { - dev_warn(isp->dev, - "atomisp inputs out of range\n"); - break; - } - isp->inputs[isp->input_cnt] = - isp->inputs[isp->input_cnt - 1]; - isp->inputs[isp->input_cnt].sensor_index = i; - isp->input_cnt++; - } break; case CAMERA_MOTOR: if (isp->motor) { @@ -1158,7 +1118,6 @@ static void atomisp_unregister_entities(struct atomisp_device *isp) for (i = 0; i < isp->num_of_streams; i++) atomisp_subdev_unregister_entities(&isp->asd[i]); atomisp_tpg_unregister_entities(&isp->tpg); - atomisp_file_input_unregister_entities(&isp->file_dev); for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]); @@ -1210,13 +1169,6 @@ static int atomisp_register_entities(struct atomisp_device *isp) goto csi_and_subdev_probe_failed; } - ret = - atomisp_file_input_register_entities(&isp->file_dev, &isp->v4l2_dev); - if (ret < 0) { - dev_err(isp->dev, "atomisp_file_input_register_entities\n"); - goto file_input_register_failed; - } - ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev); if (ret < 0) { dev_err(isp->dev, "atomisp_tpg_register_entities\n"); @@ -1226,10 +1178,9 @@ static int atomisp_register_entities(struct atomisp_device *isp) for (i = 0; i < isp->num_of_streams; i++) { struct atomisp_sub_device *asd = &isp->asd[i]; - ret = atomisp_subdev_register_entities(asd, &isp->v4l2_dev); + ret = atomisp_subdev_register_subdev(asd, &isp->v4l2_dev); if (ret < 0) { - dev_err(isp->dev, - "atomisp_subdev_register_entities fail\n"); + dev_err(isp->dev, "atomisp_subdev_register_subdev fail\n"); for (; i > 0; i--) atomisp_subdev_unregister_entities( &isp->asd[i - 1]); @@ -1267,31 +1218,17 @@ static int atomisp_register_entities(struct atomisp_device *isp) } } - dev_dbg(isp->dev, - "FILE_INPUT enable, camera_cnt: %d\n", isp->input_cnt); - isp->inputs[isp->input_cnt].type = FILE_INPUT; - isp->inputs[isp->input_cnt].port = -1; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - isp->inputs[isp->input_cnt++].camera = &isp->file_dev.sd; - if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) { dev_dbg(isp->dev, "TPG detected, camera_cnt: %d\n", isp->input_cnt); isp->inputs[isp->input_cnt].type = TEST_PATTERN; isp->inputs[isp->input_cnt].port = -1; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd; } else { dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n"); } - ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); - if (ret < 0) - goto link_failed; - - return media_device_register(&isp->media_dev); + return 0; link_failed: for (i = 0; i < isp->num_of_streams; i++) @@ -1304,8 +1241,6 @@ wq_alloc_failed: subdev_register_failed: atomisp_tpg_unregister_entities(&isp->tpg); tpg_register_failed: - atomisp_file_input_unregister_entities(&isp->file_dev); -file_input_register_failed: for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]); csi_and_subdev_probe_failed: @@ -1316,6 +1251,27 @@ v4l2_device_failed: return ret; } +static int atomisp_register_device_nodes(struct atomisp_device *isp) +{ + int i, err; + + for (i = 0; i < isp->num_of_streams; i++) { + err = atomisp_subdev_register_video_nodes(&isp->asd[i], &isp->v4l2_dev); + if (err) + return err; + } + + err = atomisp_create_pads_links(isp); + if (err) + return err; + + err = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); + if (err) + return err; + + return media_device_register(&isp->media_dev); +} + static int atomisp_initialize_modules(struct atomisp_device *isp) { int ret; @@ -1326,13 +1282,6 @@ static int atomisp_initialize_modules(struct atomisp_device *isp) goto error_mipi_csi2; } - ret = atomisp_file_input_init(isp); - if (ret < 0) { - dev_err(isp->dev, - "file input device initialization failed\n"); - goto error_file_input; - } - ret = atomisp_tpg_init(isp); if (ret < 0) { dev_err(isp->dev, "tpg initialization failed\n"); @@ -1350,8 +1299,6 @@ static int atomisp_initialize_modules(struct atomisp_device *isp) error_isp_subdev: error_tpg: atomisp_tpg_cleanup(isp); -error_file_input: - atomisp_file_input_cleanup(isp); error_mipi_csi2: atomisp_mipi_csi2_cleanup(isp); return ret; @@ -1360,7 +1307,6 @@ error_mipi_csi2: static void atomisp_uninitialize_modules(struct atomisp_device *isp) { atomisp_tpg_cleanup(isp); - atomisp_file_input_cleanup(isp); atomisp_mipi_csi2_cleanup(isp); } @@ -1470,39 +1416,6 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id return true; } -static int init_atomisp_wdts(struct atomisp_device *isp) -{ - int i, err; - - atomic_set(&isp->wdt_work_queued, 0); - isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1); - if (!isp->wdt_work_queue) { - dev_err(isp->dev, "Failed to initialize wdt work queue\n"); - err = -ENOMEM; - goto alloc_fail; - } - INIT_WORK(&isp->wdt_work, atomisp_wdt_work); - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (!IS_ISP2401) { - timer_setup(&asd->wdt, atomisp_wdt, 0); - } else { - timer_setup(&asd->video_out_capture.wdt, - atomisp_wdt, 0); - timer_setup(&asd->video_out_preview.wdt, - atomisp_wdt, 0); - timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0); - timer_setup(&asd->video_out_video_capture.wdt, - atomisp_wdt, 0); - } - } - return 0; -alloc_fail: - return err; -} - #define ATOM_ISP_PCI_BAR 0 static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1551,9 +1464,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base); - rt_mutex_init(&isp->mutex); - rt_mutex_init(&isp->loading); - mutex_init(&isp->streamoff_mutex); + mutex_init(&isp->mutex); spin_lock_init(&isp->lock); /* This is not a true PCI device on SoC, so the delay is not needed. */ @@ -1725,8 +1636,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim); } - rt_mutex_lock(&isp->loading); - err = atomisp_initialize_modules(isp); if (err < 0) { dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err); @@ -1738,13 +1647,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err); goto register_entities_fail; } - err = atomisp_create_pads_links(isp); - if (err < 0) - goto register_entities_fail; - /* init atomisp wdts */ - err = init_atomisp_wdts(isp); - if (err != 0) - goto wdt_work_queue_fail; + + INIT_WORK(&isp->assert_recovery_work, atomisp_assert_recovery_work); /* save the iunit context only once after all the values are init'ed. */ atomisp_save_iunit_reg(isp); @@ -1777,8 +1681,10 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i release_firmware(isp->firmware); isp->firmware = NULL; isp->css_env.isp_css_fw.data = NULL; - isp->ready = true; - rt_mutex_unlock(&isp->loading); + + err = atomisp_register_device_nodes(isp); + if (err) + goto css_init_fail; atomisp_drvfs_init(isp); @@ -1789,13 +1695,10 @@ css_init_fail: request_irq_fail: hmm_cleanup(); pm_runtime_get_noresume(&pdev->dev); - destroy_workqueue(isp->wdt_work_queue); -wdt_work_queue_fail: atomisp_unregister_entities(isp); register_entities_fail: atomisp_uninitialize_modules(isp); initialize_modules_fail: - rt_mutex_unlock(&isp->loading); cpu_latency_qos_remove_request(&isp->pm_qos); atomisp_msi_irq_uninit(isp); pci_free_irq_vectors(pdev); @@ -1851,9 +1754,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev) atomisp_msi_irq_uninit(isp); atomisp_unregister_entities(isp); - destroy_workqueue(isp->wdt_work_queue); - atomisp_file_input_cleanup(isp); - release_firmware(isp->firmware); } diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h index 72611b8286a4..ccf1c0ac17b2 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h @@ -22,16 +22,13 @@ #define __ATOMISP_V4L2_H__ struct atomisp_video_pipe; -struct atomisp_acc_pipe; struct v4l2_device; struct atomisp_device; struct firmware; int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, unsigned int run_mode); -void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name); void atomisp_video_unregister(struct atomisp_video_pipe *video); -void atomisp_acc_unregister(struct atomisp_acc_pipe *video); const struct firmware *atomisp_load_firmware(struct atomisp_device *isp); int atomisp_csi_lane_config(struct atomisp_device *isp); diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c index f50494123f03..a5fd6d38d3c4 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c @@ -44,16 +44,6 @@ #include "hmm/hmm_common.h" #include "hmm/hmm_bo.h" -static unsigned int order_to_nr(unsigned int order) -{ - return 1U << order; -} - -static unsigned int nr_to_order_bottom(unsigned int nr) -{ - return fls(nr) - 1; -} - static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, unsigned int pgnr) { @@ -625,136 +615,40 @@ found: return bo; } -static void free_private_bo_pages(struct hmm_buffer_object *bo, - int free_pgnr) +static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array) { - int i, ret; + unsigned long i; - for (i = 0; i < free_pgnr; i++) { - ret = set_pages_wb(bo->pages[i], 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret = %d\n", - ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why page - address be valid,it maybe memory corruption by lowmemory - */ - if (!ret) { - __free_pages(bo->pages[i], 0); - } - } + for (i = 0; i < nr_pages; i++) + __free_pages(page_array[i], 0); +} + +static void free_private_bo_pages(struct hmm_buffer_object *bo) +{ + set_pages_array_wb(bo->pages, bo->pgnr); + free_pages_bulk_array(bo->pgnr, bo->pages); } /*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo) { + const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; int ret; - unsigned int pgnr, order, blk_pgnr, alloc_pgnr; - struct page *pages; - gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ - int i, j; - int failure_number = 0; - bool reduce_order = false; - bool lack_mem = true; - - pgnr = bo->pgnr; - - i = 0; - alloc_pgnr = 0; - - while (pgnr) { - order = nr_to_order_bottom(pgnr); - /* - * if be short of memory, we will set order to 0 - * everytime. - */ - if (lack_mem) - order = HMM_MIN_ORDER; - else if (order > HMM_MAX_ORDER) - order = HMM_MAX_ORDER; -retry: - /* - * When order > HMM_MIN_ORDER, for performance reasons we don't - * want alloc_pages() to sleep. In case it fails and fallbacks - * to HMM_MIN_ORDER or in case the requested order is originally - * the minimum value, we can allow alloc_pages() to sleep for - * robustness purpose. - * - * REVISIT: why __GFP_FS is necessary? - */ - if (order == HMM_MIN_ORDER) { - gfp &= ~GFP_NOWAIT; - gfp |= __GFP_RECLAIM | __GFP_FS; - } - - pages = alloc_pages(gfp, order); - if (unlikely(!pages)) { - /* - * in low memory case, if allocation page fails, - * we turn to try if order=0 allocation could - * succeed. if order=0 fails too, that means there is - * no memory left. - */ - if (order == HMM_MIN_ORDER) { - dev_err(atomisp_dev, - "%s: cannot allocate pages\n", - __func__); - goto cleanup; - } - order = HMM_MIN_ORDER; - failure_number++; - reduce_order = true; - /* - * if fail two times continuously, we think be short - * of memory now. - */ - if (failure_number == 2) { - lack_mem = true; - failure_number = 0; - } - goto retry; - } else { - blk_pgnr = order_to_nr(order); - - /* - * set memory to uncacheable -- UC_MINUS - */ - ret = set_pages_uc(pages, blk_pgnr); - if (ret) { - dev_err(atomisp_dev, - "set page uncacheablefailed.\n"); - - __free_pages(pages, order); - goto cleanup; - } - - for (j = 0; j < blk_pgnr; j++, i++) { - bo->pages[i] = pages + j; - } - - pgnr -= blk_pgnr; + ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages); + if (ret != bo->pgnr) { + free_pages_bulk_array(ret, bo->pages); + return -ENOMEM; + } - /* - * if order is not reduced this time, clear - * failure_number. - */ - if (reduce_order) - reduce_order = false; - else - failure_number = 0; - } + ret = set_pages_array_uc(bo->pages, bo->pgnr); + if (ret) { + dev_err(atomisp_dev, "set pages uncacheable failed.\n"); + free_pages_bulk_array(bo->pgnr, bo->pages); + return ret; } return 0; -cleanup: - alloc_pgnr = i; - free_private_bo_pages(bo, alloc_pgnr); - return -ENOMEM; } static void free_user_pages(struct hmm_buffer_object *bo, @@ -762,12 +656,8 @@ static void free_user_pages(struct hmm_buffer_object *bo, { int i; - if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) { - unpin_user_pages(bo->pages, page_nr); - } else { - for (i = 0; i < page_nr; i++) - put_page(bo->pages[i]); - } + for (i = 0; i < page_nr; i++) + put_page(bo->pages[i]); } /* @@ -777,43 +667,13 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, const void __user *userptr) { int page_nr; - struct vm_area_struct *vma; - - mutex_unlock(&bo->mutex); - mmap_read_lock(current->mm); - vma = find_vma(current->mm, (unsigned long)userptr); - mmap_read_unlock(current->mm); - if (!vma) { - dev_err(atomisp_dev, "find_vma failed\n"); - mutex_lock(&bo->mutex); - return -EFAULT; - } - mutex_lock(&bo->mutex); - /* - * Handle frame buffer allocated in other kerenl space driver - * and map to user space - */ userptr = untagged_addr(userptr); - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { - page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr, - FOLL_LONGTERM | FOLL_WRITE, - bo->pages, NULL); - bo->mem_type = HMM_BO_MEM_TYPE_PFN; - } else { - /*Handle frame buffer allocated in user space*/ - mutex_unlock(&bo->mutex); - page_nr = get_user_pages_fast((unsigned long)userptr, - (int)(bo->pgnr), 1, bo->pages); - mutex_lock(&bo->mutex); - bo->mem_type = HMM_BO_MEM_TYPE_USER; - } - - dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n", - __func__, - bo->pgnr, - bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr); + /* Handle frame buffer allocated in user space */ + mutex_unlock(&bo->mutex); + page_nr = get_user_pages_fast((unsigned long)userptr, bo->pgnr, 1, bo->pages); + mutex_lock(&bo->mutex); /* can be written by caller, not forced */ if (page_nr != bo->pgnr) { @@ -854,7 +714,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, mutex_lock(&bo->mutex); check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); - bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); + bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL); if (unlikely(!bo->pages)) { ret = -ENOMEM; goto alloc_err; @@ -910,7 +770,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo) bo->status &= (~HMM_BO_PAGE_ALLOCED); if (bo->type == HMM_BO_PRIVATE) - free_private_bo_pages(bo, bo->pgnr); + free_private_bo_pages(bo); else if (bo->type == HMM_BO_USER) free_user_pages(bo, bo->pgnr); else diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c index 0e7c38b2bfe3..67915d76a87f 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_params.c +++ b/drivers/staging/media/atomisp/pci/sh_css_params.c @@ -950,8 +950,8 @@ sh_css_set_black_frame(struct ia_css_stream *stream, params->fpn_config.data = NULL; } if (!params->fpn_config.data) { - params->fpn_config.data = kvmalloc(height * width * - sizeof(short), GFP_KERNEL); + params->fpn_config.data = kvmalloc(array3_size(height, width, sizeof(short)), + GFP_KERNEL); if (!params->fpn_config.data) { IA_CSS_ERROR("out of memory"); IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM); diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c index 294c808b2ebe..3e7462112649 100644 --- a/drivers/staging/media/imx/imx-media-utils.c +++ b/drivers/staging/media/imx/imx-media-utils.c @@ -863,16 +863,16 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd, mutex_lock(&imxmd->md.graph_mutex); if (on) { - ret = __media_pipeline_start(entity, &imxmd->pipe); + ret = __media_pipeline_start(entity->pads, &imxmd->pipe); if (ret) goto out; ret = v4l2_subdev_call(sd, video, s_stream, 1); if (ret) - __media_pipeline_stop(entity); + __media_pipeline_stop(entity->pads); } else { v4l2_subdev_call(sd, video, s_stream, 0); - if (entity->pipe) - __media_pipeline_stop(entity); + if (media_pad_pipeline(entity->pads)) + __media_pipeline_stop(entity->pads); } out: diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c index cbc66ef0eda8..e5b550ccfa22 100644 --- a/drivers/staging/media/imx/imx7-media-csi.c +++ b/drivers/staging/media/imx/imx7-media-csi.c @@ -1360,7 +1360,7 @@ static int imx7_csi_video_start_streaming(struct vb2_queue *vq, mutex_lock(&csi->mdev.graph_mutex); - ret = __media_pipeline_start(&csi->sd.entity, &csi->pipe); + ret = __video_device_pipeline_start(csi->vdev, &csi->pipe); if (ret) goto err_unlock; @@ -1373,7 +1373,7 @@ static int imx7_csi_video_start_streaming(struct vb2_queue *vq, return 0; err_stop: - __media_pipeline_stop(&csi->sd.entity); + __video_device_pipeline_stop(csi->vdev); err_unlock: mutex_unlock(&csi->mdev.graph_mutex); dev_err(csi->dev, "pipeline start failed with %d\n", ret); @@ -1396,7 +1396,7 @@ static void imx7_csi_video_stop_streaming(struct vb2_queue *vq) mutex_lock(&csi->mdev.graph_mutex); v4l2_subdev_call(&csi->sd, video, s_stream, 0); - __media_pipeline_stop(&csi->sd.entity); + __video_device_pipeline_stop(csi->vdev); mutex_unlock(&csi->mdev.graph_mutex); /* release all active buffers */ diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h index dbdd015ce220..caa358e0bae4 100644 --- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h @@ -626,8 +626,11 @@ struct ipu3_uapi_stats_3a { * @b: white balance gain for B channel. * @gb: white balance gain for Gb channel. * - * Precision u3.13, range [0, 8). White balance correction is done by applying - * a multiplicative gain to each color channels prior to BNR. + * For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr]. + * Their precision is U3.13 and the range is (0, 8) and the actual gain is + * Gx + 1, it is typically Gx = 1. + * + * Pout = {Pin * (1 + Gx)}. */ struct ipu3_uapi_bnr_static_config_wb_gains_config { __u16 gr; diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index d1c539cefba8..ce13e746c15f 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -192,33 +192,30 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { - struct v4l2_rect *try_sel, *r; - struct imgu_v4l2_subdev *imgu_sd = container_of(sd, - struct imgu_v4l2_subdev, - subdev); + struct imgu_v4l2_subdev *imgu_sd = + container_of(sd, struct imgu_v4l2_subdev, subdev); if (sel->pad != IMGU_NODE_IN) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP: - try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad); - r = &imgu_sd->rect.eff; - break; + if (sel->which == V4L2_SUBDEV_FORMAT_TRY) + sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, + sel->pad); + else + sel->r = imgu_sd->rect.eff; + return 0; case V4L2_SEL_TGT_COMPOSE: - try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad); - r = &imgu_sd->rect.bds; - break; + if (sel->which == V4L2_SUBDEV_FORMAT_TRY) + sel->r = *v4l2_subdev_get_try_compose(sd, sd_state, + sel->pad); + else + sel->r = imgu_sd->rect.bds; + return 0; default: return -EINVAL; } - - if (sel->which == V4L2_SUBDEV_FORMAT_TRY) - sel->r = *try_sel; - else - sel->r = *r; - - return 0; } static int imgu_subdev_set_selection(struct v4l2_subdev *sd, @@ -486,7 +483,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) pipe = node->pipe; imgu_pipe = &imgu->imgu_pipe[pipe]; atomic_set(&node->sequence, 0); - r = media_pipeline_start(&node->vdev.entity, &imgu_pipe->pipeline); + r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline); if (r < 0) goto fail_return_bufs; @@ -511,7 +508,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) return 0; fail_stop_pipeline: - media_pipeline_stop(&node->vdev.entity); + video_device_pipeline_stop(&node->vdev); fail_return_bufs: imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED); @@ -551,7 +548,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); mutex_unlock(&imgu->streaming_lock); - media_pipeline_stop(&node->vdev.entity); + video_device_pipeline_stop(&node->vdev); } /******************** v4l2_ioctl_ops ********************/ diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index 8549d95be0f2..52f224d8def1 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -1102,6 +1102,7 @@ static int vdec_probe(struct platform_device *pdev) err_vdev_release: video_device_release(vdev); + v4l2_device_unregister(&core->v4l2_dev); return ret; } @@ -1110,6 +1111,7 @@ static int vdec_remove(struct platform_device *pdev) struct amvdec_core *core = platform_get_drvdata(pdev); video_unregister_device(core->vdev_dec); + v4l2_device_unregister(&core->v4l2_dev); return 0; } diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index 28aacda0f5a7..fa2a36d829d3 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -548,10 +548,8 @@ static int iss_pipeline_is_last(struct media_entity *me) struct iss_pipeline *pipe; struct media_pad *pad; - if (!me->pipe) - return 0; pipe = to_iss_pipeline(me); - if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED) + if (!pipe || pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED) return 0; pad = media_pad_remote_pad_first(&pipe->output->pad); return pad->entity == me; diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index 842509dcfedf..60f3d84be828 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -870,8 +870,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) * Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ - pipe = entity->pipe - ? to_iss_pipeline(entity) : &video->pipe; + pipe = to_iss_pipeline(&video->video.entity) ? : &video->pipe; pipe->external = NULL; pipe->external_rate = 0; pipe->external_bpp = 0; @@ -887,7 +886,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, true); - ret = media_pipeline_start(entity, &pipe->pipe); + ret = video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) goto err_media_pipeline_start; @@ -978,7 +977,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) err_omap4iss_set_stream: vb2_streamoff(&vfh->queue, type); err_iss_video_check_format: - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); err_media_pipeline_start: if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, false); @@ -1032,7 +1031,7 @@ iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, false); - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); done: mutex_unlock(&video->stream_lock); diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h index 526281bf0051..ca2d5edb6261 100644 --- a/drivers/staging/media/omap4iss/iss_video.h +++ b/drivers/staging/media/omap4iss/iss_video.h @@ -90,8 +90,15 @@ struct iss_pipeline { int external_bpp; }; -#define to_iss_pipeline(__e) \ - container_of((__e)->pipe, struct iss_pipeline, pipe) +static inline struct iss_pipeline *to_iss_pipeline(struct media_entity *entity) +{ + struct media_pipeline *pipe = media_entity_pipeline(entity); + + if (!pipe) + return NULL; + + return container_of(pipe, struct iss_pipeline, pipe); +} static inline int iss_pipeline_ready(struct iss_pipeline *pipe) { diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig index 21c13f9b6e33..621944f9907a 100644 --- a/drivers/staging/media/sunxi/cedrus/Kconfig +++ b/drivers/staging/media/sunxi/cedrus/Kconfig @@ -2,6 +2,7 @@ config VIDEO_SUNXI_CEDRUS tristate "Allwinner Cedrus VPU driver" depends on VIDEO_DEV + depends on RESET_CONTROLLER depends on HAS_DMA depends on OF select MEDIA_CONTROLLER diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c index f10a041e3e6c..d58370a84737 100644 --- a/drivers/staging/media/tegra-video/tegra210.c +++ b/drivers/staging/media/tegra-video/tegra210.c @@ -547,7 +547,7 @@ static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count) VI_INCR_SYNCPT_NO_STALL); /* start the pipeline */ - ret = media_pipeline_start(&chan->video.entity, pipe); + ret = video_device_pipeline_start(&chan->video, pipe); if (ret < 0) goto error_pipeline_start; @@ -595,7 +595,7 @@ error_kthread_done: error_kthread_start: tegra_channel_set_stream(chan, false); error_set_stream: - media_pipeline_stop(&chan->video.entity); + video_device_pipeline_stop(&chan->video); error_pipeline_start: tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED); return ret; @@ -617,7 +617,7 @@ static void tegra210_vi_stop_streaming(struct vb2_queue *vq) tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR); tegra_channel_set_stream(chan, false); - media_pipeline_stop(&chan->video.entity); + video_device_pipeline_stop(&chan->video); } /* diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 2a5570b9799a..b80e25ec1261 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -516,11 +516,7 @@ static int start_power_clamp(void) cpus_read_lock(); /* prefer BSP */ - control_cpu = 0; - if (!cpu_online(control_cpu)) { - control_cpu = get_cpu(); - put_cpu(); - } + control_cpu = cpumask_first(cpu_online_mask); clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 3fe8a7edc252..c777a612d932 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -38,6 +38,9 @@ #include "watchdog_core.h" /* For watchdog_dev_register/... */ +#define CREATE_TRACE_POINTS +#include <trace/events/watchdog.h> + static DEFINE_IDA(watchdog_ida); static int stop_on_reboot = -1; @@ -163,6 +166,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb, int ret; ret = wdd->ops->stop(wdd); + trace_watchdog_stop(wdd, ret); if (ret) return NOTIFY_BAD; } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 744b2ab75288..55574ed42504 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -47,6 +47,8 @@ #include "watchdog_core.h" #include "watchdog_pretimeout.h" +#include <trace/events/watchdog.h> + /* the dev_t structure to store the dynamically allocated watchdog devices */ static dev_t watchdog_devt; /* Reference to watchdog device behind /dev/watchdog */ @@ -157,10 +159,13 @@ static int __watchdog_ping(struct watchdog_device *wdd) wd_data->last_hw_keepalive = now; - if (wdd->ops->ping) + if (wdd->ops->ping) { err = wdd->ops->ping(wdd); /* ping the watchdog */ - else + trace_watchdog_ping(wdd, err); + } else { err = wdd->ops->start(wdd); /* restart watchdog */ + trace_watchdog_start(wdd, err); + } if (err == 0) watchdog_hrtimer_pretimeout_start(wdd); @@ -259,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd) } } else { err = wdd->ops->start(wdd); + trace_watchdog_start(wdd, err); if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; @@ -297,6 +303,7 @@ static int watchdog_stop(struct watchdog_device *wdd) if (wdd->ops->stop) { clear_bit(WDOG_HW_RUNNING, &wdd->status); err = wdd->ops->stop(wdd); + trace_watchdog_stop(wdd, err); } else { set_bit(WDOG_HW_RUNNING, &wdd->status); } @@ -369,6 +376,7 @@ static int watchdog_set_timeout(struct watchdog_device *wdd, if (wdd->ops->set_timeout) { err = wdd->ops->set_timeout(wdd, timeout); + trace_watchdog_set_timeout(wdd, timeout, err); } else { wdd->timeout = timeout; /* Disable pretimeout if it doesn't fit the new timeout */ diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 860f37c93af4..daa525df7bdc 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ); static inline dma_addr_t grant_to_dma(grant_ref_t grant) { - return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT); } static inline grant_ref_t dma_to_grant(dma_addr_t dma) { - return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT); } static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) @@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = XEN_PFN_UP(size); unsigned long pfn; grant_ref_t grant; void *ret; @@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size, if (unlikely(data->broken)) return NULL; - ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp); if (!ret) return NULL; pfn = virt_to_pfn(ret); if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { - free_pages_exact(ret, n_pages * PAGE_SIZE); + free_pages_exact(ret, n_pages * XEN_PAGE_SIZE); return NULL; } @@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = XEN_PFN_UP(size); grant_ref_t grant; data = find_xen_grant_dma_data(dev); @@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, gnttab_free_grant_reference_seq(grant, n_pages); - free_pages_exact(vaddr, n_pages * PAGE_SIZE); + free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE); } static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, @@ -168,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(offset + size); + unsigned long dma_offset = xen_offset_in_page(offset), + pfn_offset = XEN_PFN_DOWN(offset); + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size); grant_ref_t grant; dma_addr_t dma_handle; @@ -187,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, for (i = 0; i < n_pages; i++) { gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, - xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset), + dir == DMA_TO_DEVICE); } - dma_handle = grant_to_dma(grant) + offset; + dma_handle = grant_to_dma(grant) + dma_offset; return dma_handle; } @@ -200,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned long offset = dma_handle & (PAGE_SIZE - 1); - unsigned int i, n_pages = PFN_UP(offset + size); + unsigned long dma_offset = xen_offset_in_page(dma_handle); + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size); grant_ref_t grant; if (WARN_ON(dir == DMA_NONE)) diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index dce3a16996b9..4ec18ceb2f21 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -138,6 +138,7 @@ struct share_check { u64 root_objectid; u64 inum; int share_count; + bool have_delayed_delete_refs; }; static inline int extent_is_shared(struct share_check *sc) @@ -820,16 +821,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, struct share_check *sc) { struct btrfs_delayed_ref_node *node; - struct btrfs_delayed_extent_op *extent_op = head->extent_op; struct btrfs_key key; - struct btrfs_key tmp_op_key; struct rb_node *n; int count; int ret = 0; - if (extent_op && extent_op->update_key) - btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); - spin_lock(&head->lock); for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { node = rb_entry(n, struct btrfs_delayed_ref_node, @@ -855,10 +851,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, case BTRFS_TREE_BLOCK_REF_KEY: { /* NORMAL INDIRECT METADATA backref */ struct btrfs_delayed_tree_ref *ref; + struct btrfs_key *key_ptr = NULL; + + if (head->extent_op && head->extent_op->update_key) { + btrfs_disk_key_to_cpu(&key, &head->extent_op->key); + key_ptr = &key; + } ref = btrfs_delayed_node_to_tree_ref(node); ret = add_indirect_ref(fs_info, preftrees, ref->root, - &tmp_op_key, ref->level + 1, + key_ptr, ref->level + 1, node->bytenr, count, sc, GFP_ATOMIC); break; @@ -884,13 +886,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, key.offset = ref->offset; /* - * Found a inum that doesn't match our known inum, we - * know it's shared. + * If we have a share check context and a reference for + * another inode, we can't exit immediately. This is + * because even if this is a BTRFS_ADD_DELAYED_REF + * reference we may find next a BTRFS_DROP_DELAYED_REF + * which cancels out this ADD reference. + * + * If this is a DROP reference and there was no previous + * ADD reference, then we need to signal that when we + * process references from the extent tree (through + * add_inline_refs() and add_keyed_refs()), we should + * not exit early if we find a reference for another + * inode, because one of the delayed DROP references + * may cancel that reference in the extent tree. */ - if (sc && sc->inum && ref->objectid != sc->inum) { - ret = BACKREF_FOUND_SHARED; - goto out; - } + if (sc && count < 0) + sc->have_delayed_delete_refs = true; ret = add_indirect_ref(fs_info, preftrees, ref->root, &key, 0, node->bytenr, count, sc, @@ -920,7 +931,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, } if (!ret) ret = extent_is_shared(sc); -out: + spin_unlock(&head->lock); return ret; } @@ -1023,7 +1034,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1033,6 +1045,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info, ret = add_indirect_ref(fs_info, preftrees, root, &key, 0, bytenr, count, sc, GFP_NOFS); + break; } default: @@ -1122,7 +1135,8 @@ static int add_keyed_refs(struct btrfs_root *extent_root, key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); - if (sc && sc->inum && key.objectid != sc->inum) { + if (sc && sc->inum && key.objectid != sc->inum && + !sc->have_delayed_delete_refs) { ret = BACKREF_FOUND_SHARED; break; } @@ -1522,6 +1536,9 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache { struct btrfs_backref_shared_cache_entry *entry; + if (!cache->use_cache) + return false; + if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) return false; @@ -1557,6 +1574,19 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache return false; *is_shared = entry->is_shared; + /* + * If the node at this level is shared, than all nodes below are also + * shared. Currently some of the nodes below may be marked as not shared + * because we have just switched from one leaf to another, and switched + * also other nodes above the leaf and below the current level, so mark + * them as shared. + */ + if (*is_shared) { + for (int i = 0; i < level; i++) { + cache->entries[i].is_shared = true; + cache->entries[i].gen = entry->gen; + } + } return true; } @@ -1573,6 +1603,9 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache, struct btrfs_backref_shared_cache_entry *entry; u64 gen; + if (!cache->use_cache) + return; + if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) return; @@ -1648,6 +1681,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, .root_objectid = root->root_key.objectid, .inum = inum, .share_count = 0, + .have_delayed_delete_refs = false, }; int level; @@ -1669,6 +1703,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, /* -1 means we are in the bytenr of the data extent. */ level = -1; ULIST_ITER_INIT(&uiter); + cache->use_cache = true; while (1) { bool is_shared; bool cached; @@ -1698,6 +1733,24 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, extent_gen > btrfs_root_last_snapshot(&root->root_item)) break; + /* + * If our data extent was not directly shared (without multiple + * reference items), than it might have a single reference item + * with a count > 1 for the same offset, which means there are 2 + * (or more) file extent items that point to the data extent - + * this happens when a file extent item needs to be split and + * then one item gets moved to another leaf due to a b+tree leaf + * split when inserting some item. In this case the file extent + * items may be located in different leaves and therefore some + * of the leaves may be referenced through shared subtrees while + * others are not. Since our extent buffer cache only works for + * a single path (by far the most common case and simpler to + * deal with), we can not use it if we have multiple leaves + * (which implies multiple paths). + */ + if (level == -1 && tmp->nnodes > 1) + cache->use_cache = false; + if (level >= 0) store_backref_shared_cache(cache, root, bytenr, level, false); @@ -1713,6 +1766,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr, break; } shared.share_count = 0; + shared.have_delayed_delete_refs = false; cond_resched(); } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 52ae6957b414..8e69584d538d 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -29,6 +29,7 @@ struct btrfs_backref_shared_cache { * a given data extent should never exceed the maximum b+tree height. */ struct btrfs_backref_shared_cache_entry entries[BTRFS_MAX_LEVEL]; + bool use_cache; }; typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 32c415cfbdfe..deebc8ddbd93 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -774,10 +774,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); out: - /* REVIEW */ if (wait && caching_ctl) ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); - /* wait_event(caching_ctl->wait, space_cache_v1_done(cache)); */ if (caching_ctl) btrfs_put_caching_control(caching_ctl); diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index 618275af19c4..83cb0378096f 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -1641,16 +1641,17 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int err; u64 failed_start; - while (1) { + err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, + cached_state, NULL, GFP_NOFS); + while (err == -EEXIST) { + if (failed_start != start) + clear_extent_bit(tree, start, failed_start - 1, + EXTENT_LOCKED, cached_state); + + wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, cached_state, NULL, GFP_NOFS); - if (err == -EEXIST) { - wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); - start = failed_start; - } else - break; - WARN_ON(start > end); } return err; } diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 4ef4167072b8..ec6e1752af2c 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -348,6 +348,7 @@ static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd) switch (sctx->proto) { case 1: return cmd <= BTRFS_SEND_C_MAX_V1; case 2: return cmd <= BTRFS_SEND_C_MAX_V2; + case 3: return cmd <= BTRFS_SEND_C_MAX_V3; default: return false; } } @@ -6469,7 +6470,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) if (ret < 0) goto out; } - if (sctx->cur_inode_needs_verity) { + + if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY) + && sctx->cur_inode_needs_verity) { ret = process_verity(sctx); if (ret < 0) goto out; diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h index 0a4537775e0c..f7585cfa7e52 100644 --- a/fs/btrfs/send.h +++ b/fs/btrfs/send.h @@ -10,7 +10,12 @@ #include <linux/types.h> #define BTRFS_SEND_STREAM_MAGIC "btrfs-stream" +/* Conditional support for the upcoming protocol version. */ +#ifdef CONFIG_BTRFS_DEBUG +#define BTRFS_SEND_STREAM_VERSION 3 +#else #define BTRFS_SEND_STREAM_VERSION 2 +#endif /* * In send stream v1, no command is larger than 64K. In send stream v2, no limit diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c index fe88b67c863f..60399081046a 100644 --- a/fs/cifs/cached_dir.c +++ b/fs/cifs/cached_dir.c @@ -253,8 +253,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, dentry = dget(cifs_sb->root); else { dentry = path_to_dentry(cifs_sb, path); - if (IS_ERR(dentry)) + if (IS_ERR(dentry)) { + rc = -ENOENT; goto oshr_free; + } } cfid->dentry = dentry; cfid->tcon = tcon; @@ -338,6 +340,27 @@ smb2_close_cached_fid(struct kref *ref) free_cached_dir(cfid); } +void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, + const char *name, struct cifs_sb_info *cifs_sb) +{ + struct cached_fid *cfid = NULL; + int rc; + + rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); + if (rc) { + cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); + return; + } + spin_lock(&cfid->cfids->cfid_list_lock); + if (cfid->has_lease) { + cfid->has_lease = false; + kref_put(&cfid->refcount, smb2_close_cached_fid); + } + spin_unlock(&cfid->cfids->cfid_list_lock); + close_cached_dir(cfid); +} + + void close_cached_dir(struct cached_fid *cfid) { kref_put(&cfid->refcount, smb2_close_cached_fid); @@ -378,22 +401,20 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon) { struct cached_fids *cfids = tcon->cfids; struct cached_fid *cfid, *q; - struct list_head entry; + LIST_HEAD(entry); - INIT_LIST_HEAD(&entry); spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { - list_del(&cfid->entry); - list_add(&cfid->entry, &entry); + list_move(&cfid->entry, &entry); cfids->num_entries--; cfid->is_open = false; + cfid->on_list = false; /* To prevent race with smb2_cached_lease_break() */ kref_get(&cfid->refcount); } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { - cfid->on_list = false; list_del(&cfid->entry); cancel_work_sync(&cfid->lease_break); if (cfid->has_lease) { @@ -518,15 +539,13 @@ struct cached_fids *init_cached_dirs(void) void free_cached_dirs(struct cached_fids *cfids) { struct cached_fid *cfid, *q; - struct list_head entry; + LIST_HEAD(entry); - INIT_LIST_HEAD(&entry); spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { cfid->on_list = false; cfid->is_open = false; - list_del(&cfid->entry); - list_add(&cfid->entry, &entry); + list_move(&cfid->entry, &entry); } spin_unlock(&cfids->cfid_list_lock); diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h index e536304ca2ce..2f4e764c9ca9 100644 --- a/fs/cifs/cached_dir.h +++ b/fs/cifs/cached_dir.h @@ -69,6 +69,10 @@ extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, struct dentry *dentry, struct cached_fid **cfid); extern void close_cached_dir(struct cached_fid *cfid); +extern void drop_cached_dir_by_name(const unsigned int xid, + struct cifs_tcon *tcon, + const char *name, + struct cifs_sb_info *cifs_sb); extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c6ac19223ddc..d0b9fec111aa 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1302,8 +1302,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, ssize_t rc; struct cifsFileInfo *cfile = dst_file->private_data; - if (cfile->swapfile) - return -EOPNOTSUPP; + if (cfile->swapfile) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, len, flags); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 5b4a7a32bdc5..388b745a978e 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ /* when changing internal version - update following two lines at same time */ -#define SMB3_PRODUCT_BUILD 39 -#define CIFS_VERSION "2.39" +#define SMB3_PRODUCT_BUILD 40 +#define CIFS_VERSION "2.40" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index a5c73c2af3a2..8b1c37158556 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -543,8 +543,10 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode, cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n", inode, direntry, direntry); - if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) - return -EIO; + if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) { + rc = -EIO; + goto out_free_xid; + } tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb)); rc = PTR_ERR(tlink); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index f6ffee514c34..5b3b308e115c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1885,11 +1885,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) struct cifsFileInfo *cfile; __u32 type; - rc = -EACCES; xid = get_xid(); - if (!(fl->fl_flags & FL_FLOCK)) - return -ENOLCK; + if (!(fl->fl_flags & FL_FLOCK)) { + rc = -ENOLCK; + free_xid(xid); + return rc; + } cfile = (struct cifsFileInfo *)file->private_data; tcon = tlink_tcon(cfile->tlink); @@ -1908,8 +1910,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl) * if no lock or unlock then nothing to do since we do not * know what it is */ + rc = -EOPNOTSUPP; free_xid(xid); - return -EOPNOTSUPP; + return rc; } rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7cf96e581d24..9bde08d44617 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -368,8 +368,10 @@ cifs_get_file_info_unix(struct file *filp) if (cfile->symlink_target) { fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); - if (!fattr.cf_symlink_target) - return -ENOMEM; + if (!fattr.cf_symlink_target) { + rc = -ENOMEM; + goto cifs_gfiunix_out; + } } rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 0435d1dfa9e1..92e4278ec35d 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -496,6 +496,7 @@ out: cifs_put_tcp_session(chan->server, 0); } + free_xid(xid); return rc; } diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index a6640e6ea58b..68e08c85fbb8 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -655,6 +655,7 @@ int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { + drop_cached_dir_by_name(xid, tcon, name, cifs_sb); return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, CREATE_NOT_FILE, ACL_NO_MODE, NULL, SMB2_OP_RMDIR, NULL, NULL, NULL); @@ -698,6 +699,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon, { struct cifsFileInfo *cfile; + drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb); cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile); return smb2_set_path_attr(xid, tcon, from_name, to_name, diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 17b25153cb68..4f53fa012936 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -530,6 +530,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, p = buf; spin_lock(&ses->iface_lock); + ses->iface_count = 0; /* * Go through iface_list and do kref_put to remove * any unused ifaces. ifaces in use will be removed @@ -651,9 +652,9 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, kref_put(&iface->refcount, release_iface); } else list_add_tail(&info->iface_head, &ses->iface_list); - spin_unlock(&ses->iface_lock); ses->iface_count++; + spin_unlock(&ses->iface_lock); ses->iface_last_update = jiffies; next_iface: nb_iface++; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index a2384509ea84..a5695748a89b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1341,14 +1341,13 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) static void SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data) { - int i; + struct kvec *iov = sess_data->iov; - /* zero the session data before freeing, as it might contain sensitive info (keys, etc) */ - for (i = 0; i < 2; i++) - if (sess_data->iov[i].iov_base) - memzero_explicit(sess_data->iov[i].iov_base, sess_data->iov[i].iov_len); + /* iov[1] is already freed by caller */ + if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base) + memzero_explicit(iov[0].iov_base, iov[0].iov_len); - free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base); + free_rsp_buf(sess_data->buf0_type, iov[0].iov_base); sess_data->buf0_type = CIFS_NO_BUFFER; } @@ -1531,7 +1530,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) &blob_length, ses, server, sess_data->nls_cp); if (rc) - goto out_err; + goto out; if (use_spnego) { /* BB eventually need to add this */ @@ -1578,7 +1577,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data) } out: - memzero_explicit(ntlmssp_blob, blob_length); + kfree_sensitive(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); if (!rc) { sess_data->result = 0; @@ -1662,7 +1661,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data) } #endif out: - memzero_explicit(ntlmssp_blob, blob_length); + kfree_sensitive(ntlmssp_blob); SMB2_sess_free_buffer(sess_data); kfree_sensitive(ses->ntlmssp); ses->ntlmssp = NULL; diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c index a0ef63cfcecb..9e4f47808bd5 100644 --- a/fs/efivarfs/vars.c +++ b/fs/efivarfs/vars.c @@ -651,22 +651,6 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, if (err) return err; - /* - * Ensure that the available space hasn't shrunk below the safe level - */ - status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) { - if (status != EFI_UNSUPPORTED) { - err = efi_status_to_err(status); - goto out; - } - - if (*size > 65536) { - err = -ENOSPC; - goto out; - } - } - status = efivar_set_variable_locked(name, vendor, attributes, *size, data, false); if (status != EFI_SUCCESS) { diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 998cd26a1b3b..fe05bc51f9f2 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -590,14 +590,17 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb, struct super_block *psb = erofs_pseudo_mnt->mnt_sb; mutex_lock(&erofs_domain_cookies_lock); + spin_lock(&psb->s_inode_list_lock); list_for_each_entry(inode, &psb->s_inodes, i_sb_list) { ctx = inode->i_private; if (!ctx || ctx->domain != domain || strcmp(ctx->name, name)) continue; igrab(inode); + spin_unlock(&psb->s_inode_list_lock); mutex_unlock(&erofs_domain_cookies_lock); return ctx; } + spin_unlock(&psb->s_inode_list_lock); ctx = erofs_fscache_domain_init_cookie(sb, name, need_inode); mutex_unlock(&erofs_domain_cookies_lock); return ctx; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 559380a535af..c7f24fc7efd5 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -813,15 +813,14 @@ retry: ++spiltted; if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) fe->pcl->multibases = true; - - if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && - !(map->m_flags & EROFS_MAP_PARTIAL_REF) && - fe->pcl->length == map->m_llen) - fe->pcl->partial = false; if (fe->pcl->length < offset + end - map->m_la) { fe->pcl->length = offset + end - map->m_la; fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; } + if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && + !(map->m_flags & EROFS_MAP_PARTIAL_REF) && + fe->pcl->length == map->m_llen) + fe->pcl->partial = false; next_part: /* shorten the remaining extent to update progress */ map->m_llen = offset + cur - map->m_la; @@ -888,15 +887,13 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { unsigned int pgnr; - struct page *oldpage; pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; DBG_BUGON(pgnr >= be->nr_pages); - oldpage = be->decompressed_pages[pgnr]; - be->decompressed_pages[pgnr] = bvec->page; - - if (!oldpage) + if (!be->decompressed_pages[pgnr]) { + be->decompressed_pages[pgnr] = bvec->page; return; + } } /* (cold path) one pcluster is requested multiple times */ diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index e7f04c4fbb81..d98c95212985 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -126,10 +126,10 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) } /* - * bit 31: I/O error occurred on this page - * bit 0 - 30: remaining parts to complete this page + * bit 30: I/O error occurred on this page + * bit 0 - 29: remaining parts to complete this page */ -#define Z_EROFS_PAGE_EIO (1 << 31) +#define Z_EROFS_PAGE_EIO (1 << 30) static inline void z_erofs_onlinepage_init(struct page *page) { diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 44c27ef39c43..0bb66927e3d0 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -57,8 +57,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + vi->xattr_isize, 8); - kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), - EROFS_KMAP_ATOMIC); + kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out_unlock; @@ -73,7 +72,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER; vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63); vi->z_tailextent_headlcn = 0; - goto unmap_done; + goto done; } vi->z_advise = le16_to_cpu(h->h_advise); vi->z_algorithmtype[0] = h->h_algorithmtype & 15; @@ -85,7 +84,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", headnr + 1, vi->z_algorithmtype[headnr], vi->nid); err = -EOPNOTSUPP; - goto unmap_done; + goto out_put_metabuf; } vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); @@ -95,7 +94,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", vi->nid); err = -EFSCORRUPTED; - goto unmap_done; + goto out_put_metabuf; } if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ @@ -103,12 +102,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", vi->nid); err = -EFSCORRUPTED; - goto unmap_done; + goto out_put_metabuf; } -unmap_done: - erofs_put_metabuf(&buf); - if (err) - goto out_unlock; if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) { struct erofs_map_blocks map = { @@ -127,7 +122,7 @@ unmap_done: err = -EFSCORRUPTED; } if (err < 0) - goto out_unlock; + goto out_put_metabuf; } if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER && @@ -141,11 +136,14 @@ unmap_done: EROFS_GET_BLOCKS_FINDTAIL); erofs_put_metabuf(&map.buf); if (err < 0) - goto out_unlock; + goto out_put_metabuf; } +done: /* paired with smp_mb() at the beginning of the function */ smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); +out_put_metabuf: + erofs_put_metabuf(&buf); out_unlock: clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); return err; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 6a29bcfc9390..dc74a947a440 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1458,12 +1458,14 @@ static __net_init int nfsd_init_net(struct net *net) goto out_drc_error; retval = nfsd_reply_cache_init(nn); if (retval) - goto out_drc_error; + goto out_cache_error; get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); seqlock_init(&nn->writeverf_lock); return 0; +out_cache_error: + nfsd4_leases_net_shutdown(nn); out_drc_error: nfsd_idmap_shutdown(net); out_idmap_error: diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index d73434200df9..8c52b6c9d31a 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -392,8 +392,8 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) skip_pseudoflavor_check: /* Finally, check access permissions. */ error = nfsd_permission(rqstp, exp, dentry, access); - trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error); out: + trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error); if (error == nfserr_stale) nfsd_stats_fh_stale_inc(exp); return error; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 961d1cf54388..05f32989bad6 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -232,6 +232,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, handle_t *handle = NULL; struct ocfs2_super *osb; struct ocfs2_dinode *dirfe; + struct ocfs2_dinode *fe = NULL; struct buffer_head *new_fe_bh = NULL; struct inode *inode = NULL; struct ocfs2_alloc_context *inode_ac = NULL; @@ -382,6 +383,7 @@ static int ocfs2_mknod(struct user_namespace *mnt_userns, goto leave; } + fe = (struct ocfs2_dinode *) new_fe_bh->b_data; if (S_ISDIR(mode)) { status = ocfs2_fill_new_dir(osb, handle, dir, inode, new_fe_bh, data_ac, meta_ac); @@ -454,8 +456,11 @@ roll_back: leave: if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) @@ -632,18 +637,9 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, return status; } - status = __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, + return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, parent_fe_bh, handle, inode_ac, fe_blkno, suballoc_loc, suballoc_bit); - if (status < 0) { - u64 bg_blkno = ocfs2_which_suballoc_group(fe_blkno, suballoc_bit); - int tmp = ocfs2_free_suballoc_bits(handle, inode_ac->ac_inode, - inode_ac->ac_bh, suballoc_bit, bg_blkno, 1); - if (tmp) - mlog_errno(tmp); - } - - return status; } static int ocfs2_mkdir(struct user_namespace *mnt_userns, @@ -2028,8 +2024,11 @@ bail: ocfs2_clusters_to_bytes(osb->sb, 1)); if (status < 0 && did_quota_inode) dquot_free_inode(inode); - if (handle) + if (handle) { + if (status < 0 && fe) + ocfs2_set_links_count(fe, 0); ocfs2_commit_trans(osb, handle); + } ocfs2_inode_unlock(dir, 1); if (did_block_signals) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8b4f3073f8f5..8a74cdcc9af0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -902,7 +902,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v) goto out_put_mm; hold_task_mempolicy(priv); - vma = mas_find(&mas, 0); + vma = mas_find(&mas, ULONG_MAX); if (unlikely(!vma)) goto empty_set; diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 34fb3431a8f3..292a5c40bd0c 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -71,7 +71,7 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb); void ghes_unregister_vendor_record_notifier(struct notifier_block *nb); #endif -int ghes_estatus_pool_init(int num_ghes); +int ghes_estatus_pool_init(unsigned int num_ghes); /* From drivers/edac/ghes_edac.c */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index c15de165ec8f..d06ada2341cb 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -162,6 +162,16 @@ #define PATCHABLE_DISCARDS *(__patchable_function_entries) #endif +#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG +/* + * Simply points to ftrace_stub, but with the proper protocol. + * Defined by the linker script in linux/vmlinux.lds.h + */ +#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub; +#else +#define FTRACE_STUB_HACK +#endif + #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* * The ftrace call sites are logged to a section whose name depends on the @@ -169,10 +179,6 @@ * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header * dependencies for FTRACE_CALLSITE_SECTION's definition. * - * Need to also make ftrace_stub_graph point to ftrace_stub - * so that the same stub location may have different protocols - * and not mess up with C verifiers. - * * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func * as some archs will have a different prototype for that function * but ftrace_ops_list_func() will have a single prototype. @@ -182,11 +188,11 @@ KEEP(*(__mcount_loc)) \ KEEP_PATCHABLE \ __stop_mcount_loc = .; \ - ftrace_stub_graph = ftrace_stub; \ + FTRACE_STUB_HACK \ ftrace_ops_list_func = arch_ftrace_ops_list_func; #else # ifdef CONFIG_FUNCTION_TRACER -# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \ +# define MCOUNT_REC() FTRACE_STUB_HACK \ ftrace_ops_list_func = arch_ftrace_ops_list_func; # else # define MCOUNT_REC() diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 599855c6a672..2ae4fd62e01c 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -32,6 +32,15 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) +/** + * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * + * Setting this flag on a scheduler fence prevents pipelining of jobs depending + * on this fence. In other words we always insert a full CPU round trip before + * dependen jobs are pushed to the hw queue. + */ +#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS + struct drm_gem_object; struct drm_gpu_scheduler; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f2a9f2274c3b..528bd44b59e2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -106,6 +106,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup *cgroup_get_from_path(const char *path); struct cgroup *cgroup_get_from_fd(int fd); +struct cgroup *cgroup_v1v2_get_from_fd(int fd); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h index 50be7cbd93a5..b1b5720d89a5 100644 --- a/include/linux/dsa/tag_qca.h +++ b/include/linux/dsa/tag_qca.h @@ -61,9 +61,9 @@ struct sk_buff; /* Special struct emulating a Ethernet header */ struct qca_mgmt_ethhdr { - u32 command; /* command bit 31:0 */ - u32 seq; /* seq 63:32 */ - u32 mdio_data; /* first 4byte mdio */ + __le32 command; /* command bit 31:0 */ + __le32 seq; /* seq 63:32 */ + __le32 mdio_data; /* first 4byte mdio */ __be16 hdr; /* qca hdr */ } __packed; @@ -73,7 +73,7 @@ enum mdio_cmd { }; struct mib_ethhdr { - u32 data[3]; /* first 3 mib counter */ + __le32 data[3]; /* first 3 mib counter */ __be16 hdr; /* qca hdr */ } __packed; diff --git a/include/linux/efi.h b/include/linux/efi.h index da3974bf05d3..80f3c1c7827d 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1085,9 +1085,6 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data); -efi_status_t check_var_size(u32 attributes, unsigned long size); -efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size); - #if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER) extern bool efi_capsule_pending(int *reset_type); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a325532aeab5..3c9da1f8979e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -455,7 +455,7 @@ extern void iommu_set_default_translated(bool cmd_line); extern bool iommu_default_passthrough(void); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, - enum iommu_resv_type type); + enum iommu_resv_type type, gfp_t gfp); extern int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 32f259fa5801..00c3448ba7f8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1390,6 +1390,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); +long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg); int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); diff --git a/include/linux/net.h b/include/linux/net.h index 711c3593c3b8..18d942bbdf6e 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -41,6 +41,7 @@ struct net; #define SOCK_NOSPACE 2 #define SOCK_PASSCRED 3 #define SOCK_PASSSEC 4 +#define SOCK_SUPPORT_ZC 5 #ifndef ARCH_HAS_SOCKET_TYPES /** diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a36edb0ec199..eddf8ee270e7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3663,8 +3663,9 @@ static inline bool netif_attr_test_online(unsigned long j, static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, unsigned int nr_bits) { - /* n is a prior cpu */ - cpu_max_bits_warn(n + 1, nr_bits); + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); if (srcp) return find_next_bit(srcp, nr_bits, n + 1); @@ -3685,8 +3686,9 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, const unsigned long *src2p, unsigned int nr_bits) { - /* n is a prior cpu */ - cpu_max_bits_warn(n + 1, nr_bits); + /* -1 is a legal arg here. */ + if (n != -1) + cpu_max_bits_warn(n, nr_bits); if (src1p && src2p) return find_next_and_bit(src1p, src2p, nr_bits, n + 1); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 853f64b6c8c2..0031f7b4d9ab 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -756,11 +756,14 @@ struct perf_event { struct fasync_struct *fasync; /* delayed work for NMIs and such */ - int pending_wakeup; - int pending_kill; - int pending_disable; + unsigned int pending_wakeup; + unsigned int pending_kill; + unsigned int pending_disable; + unsigned int pending_sigtrap; unsigned long pending_addr; /* SIGTRAP */ - struct irq_work pending; + struct irq_work pending_irq; + struct callback_head pending_task; + unsigned int pending_work; atomic_t event_limit; @@ -877,6 +880,14 @@ struct perf_event_context { #endif void *task_ctx_data; /* pmu specific data */ struct rcu_head rcu_head; + + /* + * Sum (event->pending_sigtrap + event->pending_work) + * + * The SIGTRAP is targeted at ctx->task, as such it won't do changing + * that until the signal is delivered. + */ + local_t nr_pending; }; /* diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 664dd409feb9..3f01ac8017e0 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -122,6 +122,7 @@ enum phylink_op_type { * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. + * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM. * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. @@ -134,6 +135,7 @@ struct phylink_config { enum phylink_op_type type; bool legacy_pre_march2020; bool poll_fixed_state; + bool mac_managed_pm; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 2b1737c9b244..bf7613ba412b 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -10,6 +10,7 @@ #include <uapi/linux/utsname.h> enum uts_proc { + UTS_PROC_ARCH, UTS_PROC_OSTYPE, UTS_PROC_OSRELEASE, UTS_PROC_VERSION, diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h index 9f47d6a48cff..0b58f8b9e7a4 100644 --- a/include/media/i2c/ir-kbd-i2c.h +++ b/include/media/i2c/ir-kbd-i2c.h @@ -35,6 +35,7 @@ enum ir_kbd_get_key_fn { IR_KBD_GET_KEY_PIXELVIEW, IR_KBD_GET_KEY_HAUP, IR_KBD_GET_KEY_KNC1, + IR_KBD_GET_KEY_GENIATECH, IR_KBD_GET_KEY_FUSIONHDTV, IR_KBD_GET_KEY_HAUP_XVR, IR_KBD_GET_KEY_AVERMEDIA_CARDBUS, diff --git a/include/media/media-device.h b/include/media/media-device.h index a10b30507524..86716ee7cc6c 100644 --- a/include/media/media-device.h +++ b/include/media/media-device.h @@ -192,21 +192,6 @@ struct usb_device; #define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 /** - * media_entity_enum_init - Initialise an entity enumeration - * - * @ent_enum: Entity enumeration to be initialised - * @mdev: The related media device - * - * Return: zero on success or a negative error code. - */ -static inline __must_check int media_entity_enum_init( - struct media_entity_enum *ent_enum, struct media_device *mdev) -{ - return __media_entity_enum_init(ent_enum, - mdev->entity_internal_idx_max + 1); -} - -/** * media_device_init() - Initializes a media device element * * @mdev: pointer to struct &media_device diff --git a/include/media/media-entity.h b/include/media/media-entity.h index f16ffe70f7a6..28c9de8a1f34 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -17,6 +17,7 @@ #include <linux/fwnode.h> #include <linux/list.h> #include <linux/media.h> +#include <linux/minmax.h> #include <linux/types.h> /* Enums used internally at the media controller to represent graphs */ @@ -99,12 +100,34 @@ struct media_graph { /** * struct media_pipeline - Media pipeline related information * - * @streaming_count: Streaming start count - streaming stop count - * @graph: Media graph walk during pipeline start / stop + * @allocated: Media pipeline allocated and freed by the framework + * @mdev: The media device the pipeline is part of + * @pads: List of media_pipeline_pad + * @start_count: Media pipeline start - stop count */ struct media_pipeline { - int streaming_count; - struct media_graph graph; + bool allocated; + struct media_device *mdev; + struct list_head pads; + int start_count; +}; + +/** + * struct media_pipeline_pad - A pad part of a media pipeline + * + * @list: Entry in the media_pad pads list + * @pipe: The media_pipeline that the pad is part of + * @pad: The media pad + * + * This structure associate a pad with a media pipeline. Instances of + * media_pipeline_pad are created by media_pipeline_start() when it builds the + * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop() + * removes the entries from the list and deletes them. + */ +struct media_pipeline_pad { + struct list_head list; + struct media_pipeline *pipe; + struct media_pad *pad; }; /** @@ -186,6 +209,8 @@ enum media_pad_signal_type { * @flags: Pad flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_PAD_FL_*``) + * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to + * access this field. */ struct media_pad { struct media_gobj graph_obj; /* must be first field in struct */ @@ -193,6 +218,12 @@ struct media_pad { u16 index; enum media_pad_signal_type sig_type; unsigned long flags; + + /* + * The fields below are private, and should only be accessed via + * appropriate functions. + */ + struct media_pipeline *pipe; }; /** @@ -206,6 +237,14 @@ struct media_pad { * @link_validate: Return whether a link is valid from the entity point of * view. The media_pipeline_start() function * validates all links by calling this operation. Optional. + * @has_pad_interdep: Return whether a two pads inside the entity are + * interdependent. If two pads are interdependent they are + * part of the same pipeline and enabling one of the pads + * means that the other pad will become "locked" and + * doesn't allow configuration changes. pad0 and pad1 are + * guaranteed to not both be sinks or sources. + * Optional: If the operation isn't implemented all pads + * will be considered as interdependent. * * .. note:: * @@ -219,6 +258,8 @@ struct media_entity_operations { const struct media_pad *local, const struct media_pad *remote, u32 flags); int (*link_validate)(struct media_link *link); + bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0, + unsigned int pad1); }; /** @@ -269,7 +310,6 @@ enum media_entity_type { * @links: List of data links. * @ops: Entity operations. * @use_count: Use count for the entity. - * @pipe: Pipeline this entity belongs to. * @info: Union with devnode information. Kept just for backward * compatibility. * @info.dev: Contains device major and minor info. @@ -305,8 +345,6 @@ struct media_entity { int use_count; - struct media_pipeline *pipe; - union { struct { u32 major; @@ -316,6 +354,18 @@ struct media_entity { }; /** + * media_entity_for_each_pad - Iterate on all pads in an entity + * @entity: The entity the pads belong to + * @iter: The iterator pad + * + * Iterate on all pads in a media entity. + */ +#define media_entity_for_each_pad(entity, iter) \ + for (iter = (entity)->pads; \ + iter < &(entity)->pads[(entity)->num_pads]; \ + ++iter) + +/** * struct media_interface - A media interface graph object. * * @graph_obj: embedded graph object @@ -426,15 +476,15 @@ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) } /** - * __media_entity_enum_init - Initialise an entity enumeration + * media_entity_enum_init - Initialise an entity enumeration * * @ent_enum: Entity enumeration to be initialised - * @idx_max: Maximum number of entities in the enumeration + * @mdev: The related media device * - * Return: Returns zero on success or a negative error code. + * Return: zero on success or a negative error code. */ -__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum, - int idx_max); +__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev); /** * media_entity_enum_cleanup - Release resources of an entity enumeration @@ -924,6 +974,18 @@ media_entity_remote_source_pad_unique(const struct media_entity *entity) } /** + * media_pad_is_streaming - Test if a pad is part of a streaming pipeline + * @pad: The pad + * + * Return: True if the pad is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_pad_is_streaming(const struct media_pad *pad) +{ + return pad->pipe; +} + +/** * media_entity_is_streaming - Test if an entity is part of a streaming pipeline * @entity: The entity * @@ -932,10 +994,50 @@ media_entity_remote_source_pad_unique(const struct media_entity *entity) */ static inline bool media_entity_is_streaming(const struct media_entity *entity) { - return entity->pipe; + struct media_pad *pad; + + media_entity_for_each_pad(entity, pad) { + if (media_pad_is_streaming(pad)) + return true; + } + + return false; } /** + * media_entity_pipeline - Get the media pipeline an entity is part of + * @entity: The entity + * + * DEPRECATED: use media_pad_pipeline() instead. + * + * This function returns the media pipeline that an entity has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * In general, entities can be part of multiple pipelines, when carrying + * multiple streams (either on different pads, or on the same pad using + * multiplexed streams). This function is to be used only for entities that + * do not support multiple pipelines. + * + * Return: The media_pipeline the entity is part of, or NULL if the entity is + * not part of any pipeline. + */ +struct media_pipeline *media_entity_pipeline(struct media_entity *entity); + +/** + * media_pad_pipeline - Get the media pipeline a pad is part of + * @pad: The pad + * + * This function returns the media pipeline that a pad has been associated + * with when constructing the pipeline with media_pipeline_start(). The pointer + * remains valid until media_pipeline_stop() is called. + * + * Return: The media_pipeline the pad is part of, or NULL if the pad is + * not part of any pipeline. + */ +struct media_pipeline *media_pad_pipeline(struct media_pad *pad); + +/** * media_entity_get_fwnode_pad - Get pad number from fwnode * * @entity: The entity @@ -1013,53 +1115,66 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph); /** * media_pipeline_start - Mark a pipeline as streaming - * @entity: Starting entity - * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * @pad: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. * - * Mark all entities connected to a given entity through enabled links, either + * Mark all pads connected to a given pad through enabled links, either * directly or indirectly, as streaming. The given pipeline object is assigned - * to every entity in the pipeline and stored in the media_entity pipe field. + * to every pad in the pipeline and stored in the media_pad pipe field. * * Calls to this function can be nested, in which case the same number of * media_pipeline_stop() calls will be required to stop streaming. The * pipeline pointer must be identical for all nested calls to * media_pipeline_start(). */ -__must_check int media_pipeline_start(struct media_entity *entity, +__must_check int media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe); /** * __media_pipeline_start - Mark a pipeline as streaming * - * @entity: Starting entity - * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * @pad: Starting pad + * @pipe: Media pipeline to be assigned to all pads in the pipeline. * * ..note:: This is the non-locking version of media_pipeline_start() */ -__must_check int __media_pipeline_start(struct media_entity *entity, +__must_check int __media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe); /** * media_pipeline_stop - Mark a pipeline as not streaming - * @entity: Starting entity + * @pad: Starting pad * - * Mark all entities connected to a given entity through enabled links, either - * directly or indirectly, as not streaming. The media_entity pipe field is + * Mark all pads connected to a given pads through enabled links, either + * directly or indirectly, as not streaming. The media_pad pipe field is * reset to %NULL. * * If multiple calls to media_pipeline_start() have been made, the same * number of calls to this function are required to mark the pipeline as not * streaming. */ -void media_pipeline_stop(struct media_entity *entity); +void media_pipeline_stop(struct media_pad *pad); /** * __media_pipeline_stop - Mark a pipeline as not streaming * - * @entity: Starting entity + * @pad: Starting pad * * .. note:: This is the non-locking version of media_pipeline_stop() */ -void __media_pipeline_stop(struct media_entity *entity); +void __media_pipeline_stop(struct media_pad *pad); + +/** + * media_pipeline_alloc_start - Mark a pipeline as streaming + * @pad: Starting pad + * + * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead + * of working on a given pipeline the function will use an existing pipeline if + * the pad is already part of a pipeline, or allocate a new pipeline. + * + * Calls to media_pipeline_alloc_start() must be matched with + * media_pipeline_stop(). + */ +__must_check int media_pipeline_alloc_start(struct media_pad *pad); /** * media_devnode_create() - creates and initializes a device node interface diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 725ff91b26e0..1bdaea248089 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -175,7 +175,8 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, * * @sd: pointer to &struct v4l2_subdev * @client: pointer to struct i2c_client - * @devname: the name of the device; if NULL, the I²C device's name will be used + * @devname: the name of the device; if NULL, the I²C device drivers's name + * will be used * @postfix: sub-device specific string to put right after the I²C device name; * may be NULL */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index b76a0714d425..e59d9a234631 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -121,21 +121,19 @@ struct v4l2_ctrl_ops { * struct v4l2_ctrl_type_ops - The control type operations that the driver * has to provide. * - * @equal: return true if both values are equal. - * @init: initialize the value. + * @equal: return true if all ctrl->elems array elements are equal. + * @init: initialize the value for array elements from from_idx to ctrl->elems. * @log: log the value. - * @validate: validate the value. Return 0 on success and a negative value - * otherwise. + * @validate: validate the value for ctrl->new_elems array elements. + * Return 0 on success and a negative value otherwise. */ struct v4l2_ctrl_type_ops { - bool (*equal)(const struct v4l2_ctrl *ctrl, u32 elems, - union v4l2_ctrl_ptr ptr1, - union v4l2_ctrl_ptr ptr2); - void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, u32 tot_elems, + bool (*equal)(const struct v4l2_ctrl *ctrl, + union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); + void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, union v4l2_ctrl_ptr ptr); void (*log)(const struct v4l2_ctrl *ctrl); - int (*validate)(const struct v4l2_ctrl *ctrl, u32 elems, - union v4l2_ctrl_ptr ptr); + int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); }; /** @@ -1543,13 +1541,12 @@ int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback. * * @ctrl: The v4l2_ctrl pointer. - * @elems: The number of elements to compare. * @ptr1: A v4l2 control value. * @ptr2: A v4l2 control value. * * Return: true if values are equal, otherwise false. */ -bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); /** @@ -1557,13 +1554,12 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, * * @ctrl: The v4l2_ctrl pointer. * @from_idx: Starting element index. - * @elems: The number of elements to initialize. * @ptr: The v4l2 control value. * * Return: void */ void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, - u32 elems, union v4l2_ctrl_ptr ptr); + union v4l2_ctrl_ptr ptr); /** * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback. @@ -1578,12 +1574,10 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl); * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback. * * @ctrl: The v4l2_ctrl pointer. - * @elems: The number of elements in the control. * @ptr: The v4l2 control value. * * Return: 0 on success, a negative error code on failure. */ -int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems, - union v4l2_ctrl_ptr ptr); +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); #endif diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 5cf1edefb822..e0a13505f88d 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -539,4 +539,106 @@ static inline int video_is_registered(struct video_device *vdev) return test_bit(V4L2_FL_REGISTERED, &vdev->flags); } +#if defined(CONFIG_MEDIA_CONTROLLER) + +/** + * video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as streaming. The given pipeline object is + * assigned to every pad in the pipeline and stored in the media_pad pipe + * field. + * + * Calls to this function can be nested, in which case the same number of + * video_device_pipeline_stop() calls will be required to stop streaming. The + * pipeline pointer must be identical for all nested calls to + * video_device_pipeline_start(). + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_start(). + */ +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * __video_device_pipeline_start - Mark a pipeline as streaming + * @vdev: Starting video device + * @pipe: Media pipeline to be assigned to all entities in the pipeline. + * + * ..note:: This is the non-locking version of video_device_pipeline_start() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_start(). + */ +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe); + +/** + * video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * Mark all entities connected to a given video device through enabled links, + * either directly or indirectly, as not streaming. The media_pad pipe field + * is reset to %NULL. + * + * If multiple calls to media_pipeline_start() have been made, the same + * number of calls to this function are required to mark the pipeline as not + * streaming. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_pipeline_stop(). + */ +void video_device_pipeline_stop(struct video_device *vdev); + +/** + * __video_device_pipeline_stop - Mark a pipeline as not streaming + * @vdev: Starting video device + * + * .. note:: This is the non-locking version of media_pipeline_stop() + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around __media_pipeline_stop(). + */ +void __video_device_pipeline_stop(struct video_device *vdev); + +/** + * video_device_pipeline_alloc_start - Mark a pipeline as streaming + * @vdev: Starting video device + * + * video_device_pipeline_alloc_start() is similar to video_device_pipeline_start() + * but instead of working on a given pipeline the function will use an + * existing pipeline if the video device is already part of a pipeline, or + * allocate a new pipeline. + * + * Calls to video_device_pipeline_alloc_start() must be matched with + * video_device_pipeline_stop(). + */ +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev); + +/** + * video_device_pipeline - Get the media pipeline a video device is part of + * @vdev: The video device + * + * This function returns the media pipeline that a video device has been + * associated with when constructing the pipeline with + * video_device_pipeline_start(). The pointer remains valid until + * video_device_pipeline_stop() is called. + * + * Return: The media_pipeline the video device is part of, or NULL if the video + * device is not part of any pipeline. + * + * The video device must contain a single pad. + * + * This is a convenience wrapper around media_entity_pipeline(). + */ +struct media_pipeline *video_device_pipeline(struct video_device *vdev); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + #endif /* _V4L2_DEV_H */ diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index 15e4ab672223..394d798f3dfa 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -45,10 +45,6 @@ struct v4l2_async_subdev; */ struct v4l2_fwnode_endpoint { struct fwnode_endpoint base; - /* - * Fields below this line will be zeroed by - * v4l2_fwnode_endpoint_parse() - */ enum v4l2_mbus_type bus_type; struct { struct v4l2_mbus_config_parallel parallel; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 9689f38a0af1..2f80c9c818ed 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -358,7 +358,11 @@ struct v4l2_mbus_frame_desc_entry { } bus; }; -#define V4L2_FRAME_DESC_ENTRY_MAX 4 + /* + * If this number is too small, it should be dropped altogether and the + * API switched to a dynamic number of frame descriptor entries. + */ +#define V4L2_FRAME_DESC_ENTRY_MAX 8 /** * enum v4l2_mbus_frame_desc_type - media bus frame description type @@ -1046,6 +1050,8 @@ v4l2_subdev_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad) { + if (WARN_ON(!state)) + return NULL; if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; return &state->pads[pad].try_fmt; @@ -1064,6 +1070,8 @@ v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad) { + if (WARN_ON(!state)) + return NULL; if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; return &state->pads[pad].try_crop; @@ -1082,6 +1090,8 @@ v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, unsigned int pad) { + if (WARN_ON(!state)) + return NULL; if (WARN_ON(pad >= sd->entity.num_pads)) pad = 0; return &state->pads[pad].try_compose; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 8f780170e2f8..3d08e67b3cfc 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -37,6 +37,7 @@ struct genl_info; * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks + * @module: pointer to the owning module (set to THIS_MODULE) * @mcgrps: multicast groups used by this family * @n_mcgrps: number of multicast groups * @resv_start_op: first operation for which reserved fields of the header @@ -173,9 +174,9 @@ struct genl_ops { }; /** - * struct genl_info - info that is available during dumpit op call + * struct genl_dumpit_info - info that is available during dumpit op call * @family: generic netlink family - for internal genl code usage - * @ops: generic netlink ops - for internal genl code usage + * @op: generic netlink ops - for internal genl code usage * @attrs: netlink attributes */ struct genl_dumpit_info { @@ -354,6 +355,7 @@ int genlmsg_multicast_allns(const struct genl_family *family, /** * genlmsg_unicast - unicast a netlink message + * @net: network namespace to look up @portid in * @skb: netlink message as socket buffer * @portid: netlink portid of the destination socket */ @@ -373,7 +375,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info) } /** - * gennlmsg_data - head of message payload + * genlmsg_data - head of message payload * @gnlh: genetlink message header */ static inline void *genlmsg_data(const struct genlmsghdr *gnlh) diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 473b0b0fa4ab..efc9085c6892 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk, extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); extern int reuseport_detach_prog(struct sock *sk); -static inline bool reuseport_has_conns(struct sock *sk, bool set) +static inline bool reuseport_has_conns(struct sock *sk) { struct sock_reuseport *reuse; bool ret = false; rcu_read_lock(); reuse = rcu_dereference(sk->sk_reuseport_cb); - if (reuse) { - if (set) - reuse->has_conns = 1; - ret = reuse->has_conns; - } + if (reuse && reuse->has_conns) + ret = true; rcu_read_unlock(); return ret; } +void reuseport_has_conns_set(struct sock *sk); + #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h index 6ce3bd22f6c6..5ad7ac2e3a7c 100644 --- a/include/soc/at91/sama7-ddr.h +++ b/include/soc/at91/sama7-ddr.h @@ -26,7 +26,10 @@ #define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */ #define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */ -#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */ +#define DDR3PHY_ACDLLCR (0x14) /* DDR3PHY AC DLL Control Register */ +#define DDR3PHY_ACDLLCR_DLLSRST (1 << 30) /* DLL Soft Reset */ + +#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */ #define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */ #define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */ #define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */ diff --git a/include/trace/events/watchdog.h b/include/trace/events/watchdog.h new file mode 100644 index 000000000000..beb9bb3424c8 --- /dev/null +++ b/include/trace/events/watchdog.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM watchdog + +#if !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_WATCHDOG_H + +#include <linux/watchdog.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(watchdog_template, + + TP_PROTO(struct watchdog_device *wdd, int err), + + TP_ARGS(wdd, err), + + TP_STRUCT__entry( + __field(int, id) + __field(int, err) + ), + + TP_fast_assign( + __entry->id = wdd->id; + __entry->err = err; + ), + + TP_printk("watchdog%d err=%d", __entry->id, __entry->err) +); + +DEFINE_EVENT(watchdog_template, watchdog_start, + TP_PROTO(struct watchdog_device *wdd, int err), + TP_ARGS(wdd, err)); + +DEFINE_EVENT(watchdog_template, watchdog_ping, + TP_PROTO(struct watchdog_device *wdd, int err), + TP_ARGS(wdd, err)); + +DEFINE_EVENT(watchdog_template, watchdog_stop, + TP_PROTO(struct watchdog_device *wdd, int err), + TP_ARGS(wdd, err)); + +TRACE_EVENT(watchdog_set_timeout, + + TP_PROTO(struct watchdog_device *wdd, unsigned int timeout, int err), + + TP_ARGS(wdd, timeout, err), + + TP_STRUCT__entry( + __field(int, id) + __field(unsigned int, timeout) + __field(int, err) + ), + + TP_fast_assign( + __entry->id = wdd->id; + __entry->timeout = timeout; + __entry->err = err; + ), + + TP_printk("watchdog%d timeout=%u err=%d", __entry->id, __entry->timeout, __entry->err) +); + +#endif /* !defined(_TRACE_WATCHDOG_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index eac87310b348..6f93c915cc88 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -235,25 +235,29 @@ struct drm_panfrost_madvise { #define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1) #define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1) +/* + * This structure is the native endianness of the dumping machine, tools can + * detect the endianness by looking at the value in 'magic'. + */ struct panfrost_dump_object_header { - __le32 magic; - __le32 type; - __le32 file_size; - __le32 file_offset; + __u32 magic; + __u32 type; + __u32 file_size; + __u32 file_offset; union { - struct pan_reg_hdr { - __le64 jc; - __le32 gpu_id; - __le32 major; - __le32 minor; - __le64 nbos; + struct { + __u64 jc; + __u32 gpu_id; + __u32 major; + __u32 minor; + __u64 nbos; } reghdr; struct pan_bomap_hdr { - __le32 valid; - __le64 iova; - __le32 data[2]; + __u32 valid; + __u64 iova; + __u32 data[2]; } bomap; /* @@ -261,14 +265,14 @@ struct panfrost_dump_object_header { * with new fields and also keep it 512-byte aligned */ - __le32 sizer[496]; + __u32 sizer[496]; }; }; /* Registers object, an array of these */ struct panfrost_dump_registers { - __le32 reg; - __le32 value; + __u32 reg; + __u32 value; }; #if defined(__cplusplus) diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h index c3baaea0b8ef..d58fa1cdcb08 100644 --- a/include/uapi/linux/cec-funcs.h +++ b/include/uapi/linux/cec-funcs.h @@ -1568,6 +1568,20 @@ static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg * } } +static inline void cec_msg_set_audio_volume_level(struct cec_msg *msg, + __u8 audio_volume_level) +{ + msg->len = 3; + msg->msg[1] = CEC_MSG_SET_AUDIO_VOLUME_LEVEL; + msg->msg[2] = audio_volume_level; +} + +static inline void cec_ops_set_audio_volume_level(const struct cec_msg *msg, + __u8 *audio_volume_level) +{ + *audio_volume_level = msg->msg[2]; +} + /* Audio Rate Control Feature */ static inline void cec_msg_set_audio_rate(struct cec_msg *msg, diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h index 1d48da926216..b8e071abaea5 100644 --- a/include/uapi/linux/cec.h +++ b/include/uapi/linux/cec.h @@ -768,6 +768,7 @@ struct cec_event { #define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE 0x08 #define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX 0x04 #define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX 0x02 +#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_VOLUME_LEVEL 0x01 #define CEC_MSG_GIVE_FEATURES 0xa5 /* HDMI 2.0 */ @@ -1059,6 +1060,7 @@ struct cec_event { #define CEC_OP_AUD_FMT_ID_CEA861 0 #define CEC_OP_AUD_FMT_ID_CEA861_CXT 1 +#define CEC_MSG_SET_AUDIO_VOLUME_LEVEL 0x73 /* Audio Rate Control Feature */ #define CEC_MSG_SET_AUDIO_RATE 0x9a diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h index 583ca0d9a79d..730673ecc63d 100644 --- a/include/uapi/linux/rkisp1-config.h +++ b/include/uapi/linux/rkisp1-config.h @@ -117,7 +117,46 @@ /* * Defect Pixel Cluster Correction */ -#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3 +#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3 + +#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2) + +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3) + +/* 0-2 for sets 1-3 */ +#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0) +#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3) + +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12) + +#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8) +#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0) +#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8) + +#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4)) +#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2)) + +#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4)) +#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2)) /* * Denoising pre filter @@ -249,16 +288,20 @@ struct rkisp1_cif_isp_bls_config { }; /** - * struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC + * struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration * - * Methods Configuration used by Defect Pixel Cluster Correction + * This structure stores the configuration of one set of methods for the DPCC + * algorithm. Multiple methods can be selected in each set (independently for + * the Green and Red/Blue components) through the @method field, the result is + * the logical AND of all enabled methods. The remaining fields set thresholds + * and factors for each method. * - * @method: Method enable bits - * @line_thresh: Line threshold - * @line_mad_fac: Line MAD factor - * @pg_fac: Peak gradient factor - * @rnd_thresh: Rank Neighbor Difference threshold - * @rg_fac: Rank gradient factor + * @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*) + * @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*) + * @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*) + * @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*) + * @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*) + * @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*) */ struct rkisp1_cif_isp_dpcc_methods_config { __u32 method; @@ -272,14 +315,16 @@ struct rkisp1_cif_isp_dpcc_methods_config { /** * struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC * - * Configuration used by Defect Pixel Cluster Correction + * Configuration used by Defect Pixel Cluster Correction. Three sets of methods + * can be configured and selected through the @set_use field. The result is the + * logical OR of all enabled sets. * - * @mode: dpcc output mode - * @output_mode: whether use hard coded methods - * @set_use: stage1 methods set - * @methods: methods config - * @ro_limits: rank order limits - * @rnd_offs: differential rank offsets for rank neighbor difference + * @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*) + * @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*) + * @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*) + * @methods: Methods sets configuration + * @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*) + * @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*) */ struct rkisp1_cif_isp_dpcc_config { __u32 mode; diff --git a/init/Kconfig b/init/Kconfig index 694f7c160c9c..abf65098f1b6 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -66,7 +66,7 @@ config RUST_IS_AVAILABLE This shows whether a suitable Rust toolchain is available (found). Please see Documentation/rust/quick-start.rst for instructions on how - to satify the build requirements of Rust support. + to satisfy the build requirements of Rust support. In particular, the Makefile target 'rustavailable' is useful to check why the Rust toolchain is not being detected. diff --git a/io_uring/filetable.h b/io_uring/filetable.h index ff3a712e11bf..351111ff8882 100644 --- a/io_uring/filetable.h +++ b/io_uring/filetable.h @@ -5,22 +5,9 @@ #include <linux/file.h> #include <linux/io_uring_types.h> -/* - * FFS_SCM is only available on 64-bit archs, for 32-bit we just define it as 0 - * and define IO_URING_SCM_ALL. For this case, we use SCM for all files as we - * can't safely always dereference the file when the task has exited and ring - * cleanup is done. If a file is tracked and part of SCM, then unix gc on - * process exit may reap it before __io_sqe_files_unregister() is run. - */ #define FFS_NOWAIT 0x1UL #define FFS_ISREG 0x2UL -#if defined(CONFIG_64BIT) -#define FFS_SCM 0x4UL -#else -#define IO_URING_SCM_ALL -#define FFS_SCM 0x0UL -#endif -#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG|FFS_SCM) +#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG) bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files); void io_free_file_tables(struct io_file_table *table); @@ -38,6 +25,7 @@ unsigned int io_file_get_flags(struct file *file); static inline void io_file_bitmap_clear(struct io_file_table *table, int bit) { + WARN_ON_ONCE(!test_bit(bit, table->bitmap)); __clear_bit(bit, table->bitmap); table->alloc_hint = bit; } diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index c6536d4b2da0..6f1d0e5df23a 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -1164,10 +1164,10 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); if (!wqe) goto err; + wq->wqes[node] = wqe; if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) goto err; cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); - wq->wqes[node] = wqe; wqe->node = alloc_node; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index de08d9902b30..6cc16e39b27f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1587,8 +1587,6 @@ unsigned int io_file_get_flags(struct file *file) res |= FFS_ISREG; if (__io_file_supports_nowait(file, mode)) res |= FFS_NOWAIT; - if (io_file_need_scm(file)) - res |= FFS_SCM; return res; } @@ -1860,7 +1858,6 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, /* mask in overlapping REQ_F and FFS bits */ req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT); io_req_set_rsrc_node(req, ctx, 0); - WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap)); out: io_ring_submit_unlock(ctx, issue_flags); return file; @@ -2563,18 +2560,14 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx) static void io_req_caches_free(struct io_ring_ctx *ctx) { - struct io_submit_state *state = &ctx->submit_state; int nr = 0; mutex_lock(&ctx->uring_lock); - io_flush_cached_locked_reqs(ctx, state); + io_flush_cached_locked_reqs(ctx, &ctx->submit_state); while (!io_req_cache_empty(ctx)) { - struct io_wq_work_node *node; - struct io_kiocb *req; + struct io_kiocb *req = io_alloc_req(ctx); - node = wq_stack_extract(&state->free_list); - req = container_of(node, struct io_kiocb, comp_list); kmem_cache_free(req_cachep, req); nr++; } @@ -2811,15 +2804,12 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) io_poll_remove_all(ctx, NULL, true); mutex_unlock(&ctx->uring_lock); - /* failed during ring init, it couldn't have issued any requests */ - if (ctx->rings) { + /* + * If we failed setting up the ctx, we might not have any rings + * and therefore did not submit any requests + */ + if (ctx->rings) io_kill_timeouts(ctx, NULL, true); - /* if we failed setting up the ctx, we might not have any rings */ - io_iopoll_try_reap_events(ctx); - /* drop cached put refs after potentially doing completions */ - if (current->io_uring) - io_uring_drop_tctx_refs(current); - } INIT_WORK(&ctx->exit_work, io_ring_exit_work); /* diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 4a7e5d030c78..90d2fc6fd80e 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -95,6 +95,9 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files); file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr; + if (!file_ptr) + goto out_unlock; + src_file = (struct file *) (file_ptr & FFS_MASK); get_file(src_file); diff --git a/io_uring/net.c b/io_uring/net.c index 8c7226b5bf41..15dea91625e2 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -1056,6 +1056,8 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; + if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) + return -EOPNOTSUPP; msg.msg_name = NULL; msg.msg_control = NULL; @@ -1151,6 +1153,8 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) sock = sock_from_file(req->file); if (unlikely(!sock)) return -ENOTSOCK; + if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) + return -EOPNOTSUPP; if (req_has_async_data(req)) { kmsg = req->async_data; diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 012fdb04ec23..55d4ab96fb92 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -757,20 +757,17 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void __io_sqe_files_unregister(struct io_ring_ctx *ctx) { -#if !defined(IO_URING_SCM_ALL) int i; for (i = 0; i < ctx->nr_user_files; i++) { struct file *file = io_file_from_index(&ctx->file_table, i); - if (!file) - continue; - if (io_fixed_file_slot(&ctx->file_table, i)->file_ptr & FFS_SCM) + /* skip scm accounted files, they'll be freed by ->ring_sock */ + if (!file || io_file_need_scm(file)) continue; io_file_bitmap_clear(&ctx->file_table, i); fput(file); } -#endif #if defined(CONFIG_UNIX) if (ctx->ring_sock) { diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index 9bce15665444..81445a477622 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -82,11 +82,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file); #if defined(CONFIG_UNIX) static inline bool io_file_need_scm(struct file *filp) { -#if defined(IO_URING_SCM_ALL) - return true; -#else return !!unix_get_socket(filp); -#endif } #else static inline bool io_file_need_scm(struct file *filp) diff --git a/io_uring/rw.c b/io_uring/rw.c index 100de2626e47..bb47cc4da713 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -242,8 +242,6 @@ static void io_req_io_end(struct io_kiocb *req) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - WARN_ON(!in_task()); - if (rw->kiocb.ki_flags & IOCB_WRITE) { kiocb_end_write(req); fsnotify_modify(req->file); diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c index 0d200a993489..9fcf09f2ef00 100644 --- a/kernel/bpf/cgroup_iter.c +++ b/kernel/bpf/cgroup_iter.c @@ -196,7 +196,7 @@ static int bpf_iter_attach_cgroup(struct bpf_prog *prog, return -EINVAL; if (fd) - cgrp = cgroup_get_from_fd(fd); + cgrp = cgroup_v1v2_get_from_fd(fd); else if (id) cgrp = cgroup_get_from_id(id); else /* walk the entire hierarchy by default. */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 7f486677ab1f..2319946715e0 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1392,6 +1392,9 @@ static void cgroup_destroy_root(struct cgroup_root *root) cgroup_free_root(root); } +/* + * Returned cgroup is without refcount but it's valid as long as cset pins it. + */ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, struct cgroup_root *root) { @@ -1403,6 +1406,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, res_cgroup = cset->dfl_cgrp; } else { struct cgrp_cset_link *link; + lockdep_assert_held(&css_set_lock); list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; @@ -1414,6 +1418,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, } } + BUG_ON(!res_cgroup); return res_cgroup; } @@ -1436,23 +1441,36 @@ current_cgns_cgroup_from_root(struct cgroup_root *root) rcu_read_unlock(); - BUG_ON(!res); return res; } +/* + * Look up cgroup associated with current task's cgroup namespace on the default + * hierarchy. + * + * Unlike current_cgns_cgroup_from_root(), this doesn't need locks: + * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu + * pointers. + * - css_set_lock is not needed because we just read cset->dfl_cgrp. + * - As a bonus returned cgrp is pinned with the current because it cannot + * switch cgroup_ns asynchronously. + */ +static struct cgroup *current_cgns_cgroup_dfl(void) +{ + struct css_set *cset; + + cset = current->nsproxy->cgroup_ns->root_cset; + return __cset_cgroup_from_root(cset, &cgrp_dfl_root); +} + /* look up cgroup associated with given css_set on the specified hierarchy */ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, struct cgroup_root *root) { - struct cgroup *res = NULL; - lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&css_set_lock); - res = __cset_cgroup_from_root(cset, root); - - BUG_ON(!res); - return res; + return __cset_cgroup_from_root(cset, root); } /* @@ -6191,9 +6209,7 @@ struct cgroup *cgroup_get_from_id(u64 id) if (!cgrp) return ERR_PTR(-ENOENT); - spin_lock_irq(&css_set_lock); - root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root); - spin_unlock_irq(&css_set_lock); + root_cgrp = current_cgns_cgroup_dfl(); if (!cgroup_is_descendant(cgrp, root_cgrp)) { cgroup_put(cgrp); return ERR_PTR(-ENOENT); @@ -6294,16 +6310,42 @@ void cgroup_fork(struct task_struct *child) INIT_LIST_HEAD(&child->cg_list); } -static struct cgroup *cgroup_get_from_file(struct file *f) +/** + * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer + * @f: file corresponding to cgroup_dir + * + * Find the cgroup from a file pointer associated with a cgroup directory. + * Returns a pointer to the cgroup on success. ERR_PTR is returned if the + * cgroup cannot be found. + */ +static struct cgroup *cgroup_v1v2_get_from_file(struct file *f) { struct cgroup_subsys_state *css; - struct cgroup *cgrp; css = css_tryget_online_from_dir(f->f_path.dentry, NULL); if (IS_ERR(css)) return ERR_CAST(css); - cgrp = css->cgroup; + return css->cgroup; +} + +/** + * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports + * cgroup2. + * @f: file corresponding to cgroup2_dir + */ +static struct cgroup *cgroup_get_from_file(struct file *f) +{ + struct cgroup *cgrp = cgroup_v1v2_get_from_file(f); + + if (IS_ERR(cgrp)) + return ERR_CAST(cgrp); + + if (!cgroup_on_dfl(cgrp)) { + cgroup_put(cgrp); + return ERR_PTR(-EBADF); + } + return cgrp; } @@ -6772,10 +6814,8 @@ struct cgroup *cgroup_get_from_path(const char *path) struct cgroup *cgrp = ERR_PTR(-ENOENT); struct cgroup *root_cgrp; - spin_lock_irq(&css_set_lock); - root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root); + root_cgrp = current_cgns_cgroup_dfl(); kn = kernfs_walk_and_get(root_cgrp->kn, path); - spin_unlock_irq(&css_set_lock); if (!kn) goto out; @@ -6800,15 +6840,15 @@ out: EXPORT_SYMBOL_GPL(cgroup_get_from_path); /** - * cgroup_get_from_fd - get a cgroup pointer from a fd - * @fd: fd obtained by open(cgroup2_dir) + * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd + * @fd: fd obtained by open(cgroup_dir) * * Find the cgroup from a fd which should be obtained * by opening a cgroup directory. Returns a pointer to the * cgroup on success. ERR_PTR is returned if the cgroup * cannot be found. */ -struct cgroup *cgroup_get_from_fd(int fd) +struct cgroup *cgroup_v1v2_get_from_fd(int fd) { struct cgroup *cgrp; struct file *f; @@ -6817,10 +6857,29 @@ struct cgroup *cgroup_get_from_fd(int fd) if (!f) return ERR_PTR(-EBADF); - cgrp = cgroup_get_from_file(f); + cgrp = cgroup_v1v2_get_from_file(f); fput(f); return cgrp; } + +/** + * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports + * cgroup2. + * @fd: fd obtained by open(cgroup2_dir) + */ +struct cgroup *cgroup_get_from_fd(int fd) +{ + struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd); + + if (IS_ERR(cgrp)) + return ERR_CAST(cgrp); + + if (!cgroup_on_dfl(cgrp)) { + cgroup_put(cgrp); + return ERR_PTR(-EBADF); + } + return cgrp; +} EXPORT_SYMBOL_GPL(cgroup_get_from_fd); static u64 power_of_ten(int power) diff --git a/kernel/events/core.c b/kernel/events/core.c index aefc1e08e015..01933db7629c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -54,6 +54,7 @@ #include <linux/highmem.h> #include <linux/pgtable.h> #include <linux/buildid.h> +#include <linux/task_work.h> #include "internal.h" @@ -2276,11 +2277,26 @@ event_sched_out(struct perf_event *event, event->pmu->del(event, 0); event->oncpu = -1; - if (READ_ONCE(event->pending_disable) >= 0) { - WRITE_ONCE(event->pending_disable, -1); + if (event->pending_disable) { + event->pending_disable = 0; perf_cgroup_event_disable(event, ctx); state = PERF_EVENT_STATE_OFF; } + + if (event->pending_sigtrap) { + bool dec = true; + + event->pending_sigtrap = 0; + if (state != PERF_EVENT_STATE_OFF && + !event->pending_work) { + event->pending_work = 1; + dec = false; + task_work_add(current, &event->pending_task, TWA_RESUME); + } + if (dec) + local_dec(&event->ctx->nr_pending); + } + perf_event_set_state(event, state); if (!is_software_event(event)) @@ -2432,7 +2448,7 @@ static void __perf_event_disable(struct perf_event *event, * hold the top-level event's child_mutex, so any descendant that * goes to exit will block in perf_event_exit_event(). * - * When called from perf_pending_event it's OK because event->ctx + * When called from perf_pending_irq it's OK because event->ctx * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ @@ -2471,9 +2487,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable); void perf_event_disable_inatomic(struct perf_event *event) { - WRITE_ONCE(event->pending_disable, smp_processor_id()); - /* can fail, see perf_pending_event_disable() */ - irq_work_queue(&event->pending); + event->pending_disable = 1; + irq_work_queue(&event->pending_irq); } #define MAX_INTERRUPTS (~0ULL) @@ -3428,11 +3443,23 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { + perf_pmu_disable(pmu); + + /* PMIs are disabled; ctx->nr_pending is stable. */ + if (local_read(&ctx->nr_pending) || + local_read(&next_ctx->nr_pending)) { + /* + * Must not swap out ctx when there's pending + * events that rely on the ctx->task relation. + */ + raw_spin_unlock(&next_ctx->lock); + rcu_read_unlock(); + goto inside_switch; + } + WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task); - perf_pmu_disable(pmu); - if (cpuctx->sched_cb_usage && pmu->sched_task) pmu->sched_task(ctx, false); @@ -3473,6 +3500,7 @@ unlock: raw_spin_lock(&ctx->lock); perf_pmu_disable(pmu); +inside_switch: if (cpuctx->sched_cb_usage && pmu->sched_task) pmu->sched_task(ctx, false); task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); @@ -4939,7 +4967,7 @@ static void perf_addr_filters_splice(struct perf_event *event, static void _free_event(struct perf_event *event) { - irq_work_sync(&event->pending); + irq_work_sync(&event->pending_irq); unaccount_event(event); @@ -6439,7 +6467,8 @@ static void perf_sigtrap(struct perf_event *event) return; /* - * perf_pending_event() can race with the task exiting. + * Both perf_pending_task() and perf_pending_irq() can race with the + * task exiting. */ if (current->flags & PF_EXITING) return; @@ -6448,23 +6477,33 @@ static void perf_sigtrap(struct perf_event *event) event->attr.type, event->attr.sig_data); } -static void perf_pending_event_disable(struct perf_event *event) +/* + * Deliver the pending work in-event-context or follow the context. + */ +static void __perf_pending_irq(struct perf_event *event) { - int cpu = READ_ONCE(event->pending_disable); + int cpu = READ_ONCE(event->oncpu); + /* + * If the event isn't running; we done. event_sched_out() will have + * taken care of things. + */ if (cpu < 0) return; + /* + * Yay, we hit home and are in the context of the event. + */ if (cpu == smp_processor_id()) { - WRITE_ONCE(event->pending_disable, -1); - - if (event->attr.sigtrap) { + if (event->pending_sigtrap) { + event->pending_sigtrap = 0; perf_sigtrap(event); - atomic_set_release(&event->event_limit, 1); /* rearm event */ - return; + local_dec(&event->ctx->nr_pending); + } + if (event->pending_disable) { + event->pending_disable = 0; + perf_event_disable_local(event); } - - perf_event_disable_local(event); return; } @@ -6484,35 +6523,62 @@ static void perf_pending_event_disable(struct perf_event *event) * irq_work_queue(); // FAILS * * irq_work_run() - * perf_pending_event() + * perf_pending_irq() * * But the event runs on CPU-B and wants disabling there. */ - irq_work_queue_on(&event->pending, cpu); + irq_work_queue_on(&event->pending_irq, cpu); } -static void perf_pending_event(struct irq_work *entry) +static void perf_pending_irq(struct irq_work *entry) { - struct perf_event *event = container_of(entry, struct perf_event, pending); + struct perf_event *event = container_of(entry, struct perf_event, pending_irq); int rctx; - rctx = perf_swevent_get_recursion_context(); /* * If we 'fail' here, that's OK, it means recursion is already disabled * and we won't recurse 'further'. */ + rctx = perf_swevent_get_recursion_context(); - perf_pending_event_disable(event); - + /* + * The wakeup isn't bound to the context of the event -- it can happen + * irrespective of where the event is. + */ if (event->pending_wakeup) { event->pending_wakeup = 0; perf_event_wakeup(event); } + __perf_pending_irq(event); + if (rctx >= 0) perf_swevent_put_recursion_context(rctx); } +static void perf_pending_task(struct callback_head *head) +{ + struct perf_event *event = container_of(head, struct perf_event, pending_task); + int rctx; + + /* + * If we 'fail' here, that's OK, it means recursion is already disabled + * and we won't recurse 'further'. + */ + preempt_disable_notrace(); + rctx = perf_swevent_get_recursion_context(); + + if (event->pending_work) { + event->pending_work = 0; + perf_sigtrap(event); + local_dec(&event->ctx->nr_pending); + } + + if (rctx >= 0) + perf_swevent_put_recursion_context(rctx); + preempt_enable_notrace(); +} + #ifdef CONFIG_GUEST_PERF_EVENTS struct perf_guest_info_callbacks __rcu *perf_guest_cbs; @@ -9212,8 +9278,8 @@ int perf_event_account_interrupt(struct perf_event *event) */ static int __perf_event_overflow(struct perf_event *event, - int throttle, struct perf_sample_data *data, - struct pt_regs *regs) + int throttle, struct perf_sample_data *data, + struct pt_regs *regs) { int events = atomic_read(&event->event_limit); int ret = 0; @@ -9236,24 +9302,36 @@ static int __perf_event_overflow(struct perf_event *event, if (events && atomic_dec_and_test(&event->event_limit)) { ret = 1; event->pending_kill = POLL_HUP; - event->pending_addr = data->addr; - perf_event_disable_inatomic(event); } + if (event->attr.sigtrap) { + /* + * Should not be able to return to user space without processing + * pending_sigtrap (kernel events can overflow multiple times). + */ + WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel); + if (!event->pending_sigtrap) { + event->pending_sigtrap = 1; + local_inc(&event->ctx->nr_pending); + } + event->pending_addr = data->addr; + irq_work_queue(&event->pending_irq); + } + READ_ONCE(event->overflow_handler)(event, data, regs); if (*perf_event_fasync(event) && event->pending_kill) { event->pending_wakeup = 1; - irq_work_queue(&event->pending); + irq_work_queue(&event->pending_irq); } return ret; } int perf_event_overflow(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) + struct perf_sample_data *data, + struct pt_regs *regs) { return __perf_event_overflow(event, 1, data, regs); } @@ -11570,8 +11648,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, init_waitqueue_head(&event->waitq); - event->pending_disable = -1; - init_irq_work(&event->pending, perf_pending_event); + init_irq_work(&event->pending_irq, perf_pending_irq); + init_task_work(&event->pending_task, perf_pending_task); mutex_init(&event->mmap_mutex); raw_spin_lock_init(&event->addr_filters.lock); @@ -11593,9 +11671,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (parent_event) event->event_caps = parent_event->event_caps; - if (event->attr.sigtrap) - atomic_set(&event->event_limit, 1); - if (task) { event->attach_state = PERF_ATTACH_TASK; /* diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 726132039c38..273a0fe7910a 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -22,7 +22,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle) atomic_set(&handle->rb->poll, EPOLLIN); handle->event->pending_wakeup = 1; - irq_work_queue(&handle->event->pending); + irq_work_queue(&handle->event->pending_irq); } /* diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 460c12b7dfea..7971e989e425 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -30,6 +30,13 @@ #define GCOV_TAG_FUNCTION_LENGTH 3 +/* Since GCC 12.1 sizes are in BYTES and not in WORDS (4B). */ +#if (__GNUC__ >= 12) +#define GCOV_UNIT_SIZE 4 +#else +#define GCOV_UNIT_SIZE 1 +#endif + static struct gcov_info *gcov_info_head; /** @@ -383,12 +390,18 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info) pos += store_gcov_u32(buffer, pos, info->version); pos += store_gcov_u32(buffer, pos, info->stamp); +#if (__GNUC__ >= 12) + /* Use zero as checksum of the compilation unit. */ + pos += store_gcov_u32(buffer, pos, 0); +#endif + for (fi_idx = 0; fi_idx < info->n_functions; fi_idx++) { fi_ptr = info->functions[fi_idx]; /* Function record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION); - pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION_LENGTH); + pos += store_gcov_u32(buffer, pos, + GCOV_TAG_FUNCTION_LENGTH * GCOV_UNIT_SIZE); pos += store_gcov_u32(buffer, pos, fi_ptr->ident); pos += store_gcov_u32(buffer, pos, fi_ptr->lineno_checksum); pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum); @@ -402,7 +415,8 @@ size_t convert_to_gcda(char *buffer, struct gcov_info *info) /* Counter record. */ pos += store_gcov_u32(buffer, pos, GCOV_TAG_FOR_COUNTER(ct_idx)); - pos += store_gcov_u32(buffer, pos, ci_ptr->num * 2); + pos += store_gcov_u32(buffer, pos, + ci_ptr->num * 2 * GCOV_UNIT_SIZE); for (cv_idx = 0; cv_idx < ci_ptr->num; cv_idx++) { pos += store_gcov_u64(buffer, pos, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5800b0623ff3..cb2aa2b54c7a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4823,10 +4823,10 @@ static inline void finish_task(struct task_struct *prev) #ifdef CONFIG_SMP -static void do_balance_callbacks(struct rq *rq, struct callback_head *head) +static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) { void (*func)(struct rq *rq); - struct callback_head *next; + struct balance_callback *next; lockdep_assert_rq_held(rq); @@ -4853,15 +4853,15 @@ static void balance_push(struct rq *rq); * This abuse is tolerated because it places all the unlikely/odd cases behind * a single test, namely: rq->balance_callback == NULL. */ -struct callback_head balance_push_callback = { +struct balance_callback balance_push_callback = { .next = NULL, - .func = (void (*)(struct callback_head *))balance_push, + .func = balance_push, }; -static inline struct callback_head * +static inline struct balance_callback * __splice_balance_callbacks(struct rq *rq, bool split) { - struct callback_head *head = rq->balance_callback; + struct balance_callback *head = rq->balance_callback; if (likely(!head)) return NULL; @@ -4883,7 +4883,7 @@ __splice_balance_callbacks(struct rq *rq, bool split) return head; } -static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) { return __splice_balance_callbacks(rq, true); } @@ -4893,7 +4893,7 @@ static void __balance_callbacks(struct rq *rq) do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); } -static inline void balance_callbacks(struct rq *rq, struct callback_head *head) +static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) { unsigned long flags; @@ -4910,12 +4910,12 @@ static inline void __balance_callbacks(struct rq *rq) { } -static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) { return NULL; } -static inline void balance_callbacks(struct rq *rq, struct callback_head *head) +static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) { } @@ -6188,7 +6188,7 @@ static void sched_core_balance(struct rq *rq) preempt_enable(); } -static DEFINE_PER_CPU(struct callback_head, core_balance_head); +static DEFINE_PER_CPU(struct balance_callback, core_balance_head); static void queue_core_balance(struct rq *rq) { @@ -7419,7 +7419,7 @@ static int __sched_setscheduler(struct task_struct *p, int oldpolicy = -1, policy = attr->sched_policy; int retval, oldprio, newprio, queued, running; const struct sched_class *prev_class; - struct callback_head *head; + struct balance_callback *head; struct rq_flags rf; int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 86dea6a05267..9ae8f41e3372 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -644,8 +644,8 @@ static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) return rq->online && dl_task(prev); } -static DEFINE_PER_CPU(struct callback_head, dl_push_head); -static DEFINE_PER_CPU(struct callback_head, dl_pull_head); +static DEFINE_PER_CPU(struct balance_callback, dl_push_head); +static DEFINE_PER_CPU(struct balance_callback, dl_pull_head); static void push_dl_tasks(struct rq *); static void pull_dl_task(struct rq *); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d869bcf898cc..ed2a47e4ddae 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -410,8 +410,8 @@ static inline int has_pushable_tasks(struct rq *rq) return !plist_head_empty(&rq->rt.pushable_tasks); } -static DEFINE_PER_CPU(struct callback_head, rt_push_head); -static DEFINE_PER_CPU(struct callback_head, rt_pull_head); +static DEFINE_PER_CPU(struct balance_callback, rt_push_head); +static DEFINE_PER_CPU(struct balance_callback, rt_pull_head); static void push_rt_tasks(struct rq *); static void pull_rt_task(struct rq *); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1644242ecd11..a4a20046e586 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -938,6 +938,12 @@ struct uclamp_rq { DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); #endif /* CONFIG_UCLAMP_TASK */ +struct rq; +struct balance_callback { + struct balance_callback *next; + void (*func)(struct rq *rq); +}; + /* * This is the main, per-CPU runqueue data structure. * @@ -1036,7 +1042,7 @@ struct rq { unsigned long cpu_capacity; unsigned long cpu_capacity_orig; - struct callback_head *balance_callback; + struct balance_callback *balance_callback; unsigned char nohz_idle_balance; unsigned char idle_balance; @@ -1182,6 +1188,14 @@ static inline bool is_migration_disabled(struct task_struct *p) #endif } +DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); + +#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) +#define this_rq() this_cpu_ptr(&runqueues) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +#define raw_rq() raw_cpu_ptr(&runqueues) + struct sched_group; #ifdef CONFIG_SCHED_CORE static inline struct cpumask *sched_group_span(struct sched_group *sg); @@ -1269,7 +1283,7 @@ static inline bool sched_group_cookie_match(struct rq *rq, return true; for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { - if (sched_core_cookie_match(rq, p)) + if (sched_core_cookie_match(cpu_rq(cpu), p)) return true; } return false; @@ -1384,14 +1398,6 @@ static inline void update_idle_core(struct rq *rq) static inline void update_idle_core(struct rq *rq) { } #endif -DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); - -#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) -#define this_rq() this_cpu_ptr(&runqueues) -#define task_rq(p) cpu_rq(task_cpu(p)) -#define cpu_curr(cpu) (cpu_rq(cpu)->curr) -#define raw_rq() raw_cpu_ptr(&runqueues) - #ifdef CONFIG_FAIR_GROUP_SCHED static inline struct task_struct *task_of(struct sched_entity *se) { @@ -1544,7 +1550,7 @@ struct rq_flags { #endif }; -extern struct callback_head balance_push_callback; +extern struct balance_callback balance_push_callback; /* * Lockdep annotation that avoids accidental unlocks; it's like a @@ -1724,7 +1730,7 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p) static inline void queue_balance_callback(struct rq *rq, - struct callback_head *head, + struct balance_callback *head, void (*func)(struct rq *rq)) { lockdep_assert_rq_held(rq); @@ -1737,7 +1743,7 @@ queue_balance_callback(struct rq *rq, if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) return; - head->func = (void (*)(struct callback_head *))func; + head->func = func; head->next = rq->balance_callback; rq->balance_callback = head; } diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7f5eb295fe19..a995ea1ef849 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -346,8 +346,40 @@ static void put_probe_ref(void) mutex_unlock(&blk_probe_mutex); } +static int blk_trace_start(struct blk_trace *bt) +{ + if (bt->trace_state != Blktrace_setup && + bt->trace_state != Blktrace_stopped) + return -EINVAL; + + blktrace_seq++; + smp_mb(); + bt->trace_state = Blktrace_running; + raw_spin_lock_irq(&running_trace_lock); + list_add(&bt->running_list, &running_trace_list); + raw_spin_unlock_irq(&running_trace_lock); + trace_note_time(bt); + + return 0; +} + +static int blk_trace_stop(struct blk_trace *bt) +{ + if (bt->trace_state != Blktrace_running) + return -EINVAL; + + bt->trace_state = Blktrace_stopped; + raw_spin_lock_irq(&running_trace_lock); + list_del_init(&bt->running_list); + raw_spin_unlock_irq(&running_trace_lock); + relay_flush(bt->rchan); + + return 0; +} + static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt) { + blk_trace_stop(bt); synchronize_rcu(); blk_trace_free(q, bt); put_probe_ref(); @@ -362,8 +394,7 @@ static int __blk_trace_remove(struct request_queue *q) if (!bt) return -EINVAL; - if (bt->trace_state != Blktrace_running) - blk_trace_cleanup(q, bt); + blk_trace_cleanup(q, bt); return 0; } @@ -658,7 +689,6 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, static int __blk_trace_startstop(struct request_queue *q, int start) { - int ret; struct blk_trace *bt; bt = rcu_dereference_protected(q->blk_trace, @@ -666,36 +696,10 @@ static int __blk_trace_startstop(struct request_queue *q, int start) if (bt == NULL) return -EINVAL; - /* - * For starting a trace, we can transition from a setup or stopped - * trace. For stopping a trace, the state must be running - */ - ret = -EINVAL; - if (start) { - if (bt->trace_state == Blktrace_setup || - bt->trace_state == Blktrace_stopped) { - blktrace_seq++; - smp_mb(); - bt->trace_state = Blktrace_running; - raw_spin_lock_irq(&running_trace_lock); - list_add(&bt->running_list, &running_trace_list); - raw_spin_unlock_irq(&running_trace_lock); - - trace_note_time(bt); - ret = 0; - } - } else { - if (bt->trace_state == Blktrace_running) { - bt->trace_state = Blktrace_stopped; - raw_spin_lock_irq(&running_trace_lock); - list_del_init(&bt->running_list); - raw_spin_unlock_irq(&running_trace_lock); - relay_flush(bt->rchan); - ret = 0; - } - } - - return ret; + if (start) + return blk_trace_start(bt); + else + return blk_trace_stop(bt); } int blk_trace_startstop(struct request_queue *q, int start) @@ -772,10 +776,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) void blk_trace_shutdown(struct request_queue *q) { if (rcu_dereference_protected(q->blk_trace, - lockdep_is_held(&q->debugfs_mutex))) { - __blk_trace_startstop(q, 0); + lockdep_is_held(&q->debugfs_mutex))) __blk_trace_remove(q); - } } #ifdef CONFIG_BLK_CGROUP @@ -1614,13 +1616,7 @@ static int blk_trace_remove_queue(struct request_queue *q) if (bt == NULL) return -EINVAL; - if (bt->trace_state == Blktrace_running) { - bt->trace_state = Blktrace_stopped; - raw_spin_lock_irq(&running_trace_lock); - list_del_init(&bt->running_list); - raw_spin_unlock_irq(&running_trace_lock); - relay_flush(bt->rchan); - } + blk_trace_stop(bt); put_probe_ref(); synchronize_rcu(); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 49fb9ec8366d..1ed08967fb97 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -687,6 +687,7 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, perf_sample_data_init(sd, 0, 0); sd->raw = &raw; + sd->sample_flags |= PERF_SAMPLE_RAW; err = __bpf_perf_event_output(regs, map, flags, sd); @@ -745,6 +746,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, perf_fetch_caller_regs(regs); perf_sample_data_init(sd, 0, 0); sd->raw = &raw; + sd->sample_flags |= PERF_SAMPLE_RAW; ret = __bpf_perf_event_output(regs, map, flags, sd); out: diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index 064072c16e3d..f50398cb790d 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -74,6 +74,7 @@ static int proc_do_uts_string(struct ctl_table *table, int write, static DEFINE_CTL_TABLE_POLL(hostname_poll); static DEFINE_CTL_TABLE_POLL(domainname_poll); +// Note: update 'enum uts_proc' to match any changes to this table static struct ctl_table uts_kern_table[] = { { .procname = "arch", diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1cc4a5f4791e..03fc7e5edf07 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2455,7 +2455,16 @@ static void __split_huge_page_tail(struct page *head, int tail, page_tail); page_tail->mapping = head->mapping; page_tail->index = head->index + tail; - page_tail->private = 0; + + /* + * page->private should not be set in tail pages with the exception + * of swap cache pages that store the swp_entry_t in tail pages. + * Fix up and warn once if private is unexpectedly set. + */ + if (!folio_test_swapcache(page_folio(head))) { + VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, head); + page_tail->private = 0; + } /* Page flags must be visible before we make the page non-compound. */ smp_wmb(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b586cdd75930..546df97c31e4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1014,15 +1014,23 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma) VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); /* * Clear vm_private_data + * - For shared mappings this is a per-vma semaphore that may be + * allocated in a subsequent call to hugetlb_vm_op_open. + * Before clearing, make sure pointer is not associated with vma + * as this will leak the structure. This is the case when called + * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already + * been called to allocate a new structure. * - For MAP_PRIVATE mappings, this is the reserve map which does * not apply to children. Faults generated by the children are * not guaranteed to succeed, even if read-only. - * - For shared mappings this is a per-vma semaphore that may be - * allocated in a subsequent call to hugetlb_vm_op_open. */ - vma->vm_private_data = (void *)0; - if (!(vma->vm_flags & VM_MAYSHARE)) - return; + if (vma->vm_flags & VM_MAYSHARE) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + if (vma_lock && vma_lock->vma != vma) + vma->vm_private_data = NULL; + } else + vma->vm_private_data = NULL; } /* @@ -2924,11 +2932,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; + spin_lock_irq(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetHPageRestoreReserve(page); h->resv_huge_pages--; } - spin_lock_irq(&hugetlb_lock); list_add(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); /* Fall through */ @@ -4601,6 +4609,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) struct resv_map *resv = vma_resv_map(vma); /* + * HPAGE_RESV_OWNER indicates a private mapping. * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA @@ -4615,11 +4624,21 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) /* * vma_lock structure for sharable mappings is vma specific. - * Clear old pointer (if copied via vm_area_dup) and create new. + * Clear old pointer (if copied via vm_area_dup) and allocate + * new structure. Before clearing, make sure vma_lock is not + * for this vma. */ if (vma->vm_flags & VM_MAYSHARE) { - vma->vm_private_data = NULL; - hugetlb_vma_lock_alloc(vma); + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + if (vma_lock) { + if (vma_lock->vma != vma) { + vma->vm_private_data = NULL; + hugetlb_vma_lock_alloc(vma); + } else + pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); + } else + hugetlb_vma_lock_alloc(vma); } } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a937eaec5b68..61aa9aedb728 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -787,17 +787,22 @@ static int vma_replace_policy(struct vm_area_struct *vma, static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { - MA_STATE(mas, &mm->mm_mt, start - 1, start - 1); + MA_STATE(mas, &mm->mm_mt, start, start); struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; - prev = mas_find_rev(&mas, 0); - if (prev && (start < prev->vm_end)) - vma = prev; - else - vma = mas_next(&mas, end - 1); + prev = mas_prev(&mas, 0); + if (unlikely(!prev)) + mas_set(&mas, start); + + vma = mas_find(&mas, end - 1); + if (WARN_ON(!vma)) + return 0; + + if (start > vma->vm_start) + prev = vma; for (; vma; vma = mas_next(&mas, end - 1)) { unsigned long vmstart = max(start, vma->vm_start); diff --git a/mm/mmap.c b/mm/mmap.c index bf2122af94e7..e270057ed04e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -618,7 +618,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, struct vm_area_struct *expand) { struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end); + struct vm_area_struct *next_next = NULL; /* uninit var warning */ + struct vm_area_struct *next = find_vma(mm, vma->vm_end); struct vm_area_struct *orig_vma = vma; struct address_space *mapping = NULL; struct rb_root_cached *root = NULL; @@ -2625,14 +2626,14 @@ cannot_expand: if (error) goto unmap_and_free_vma; - /* Can addr have changed?? - * - * Answer: Yes, several device drivers can do it in their - * f_op->mmap method. -DaveM + /* + * Expansion is handled above, merging is handled below. + * Drivers should not alter the address of the VMA. */ - WARN_ON_ONCE(addr != vma->vm_start); - - addr = vma->vm_start; + if (WARN_ON((addr != vma->vm_start))) { + error = -EINVAL; + goto close_and_free_vma; + } mas_reset(&mas); /* @@ -2654,7 +2655,6 @@ cannot_expand: vm_area_free(vma); vma = merge; /* Update vm_flags to pick up the change. */ - addr = vma->vm_start; vm_flags = vma->vm_flags; goto unmap_writable; } @@ -2681,7 +2681,7 @@ cannot_expand: if (mas_preallocate(&mas, vma, GFP_KERNEL)) { error = -ENOMEM; if (file) - goto unmap_and_free_vma; + goto close_and_free_vma; else goto free_vma; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e20ade858e71..b5a6c815ae28 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5784,14 +5784,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, size_t size) { if (addr) { - unsigned long alloc_end = addr + (PAGE_SIZE << order); - unsigned long used = addr + PAGE_ALIGN(size); - - split_page(virt_to_page((void *)addr), order); - while (used < alloc_end) { - free_page(used); - used += PAGE_SIZE; - } + unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); + struct page *page = virt_to_page((void *)addr); + struct page *last = page + nr; + + split_page_owner(page, 1 << order); + split_page_memcg(page, 1 << order); + while (page < --last) + set_page_refcounted(last); + + last = page + (1UL << order); + for (page += nr; page < last; page++) + __free_pages_ok(page, 0, FPI_TO_TAIL); } return (void *)addr; } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 525758713a55..d03941cace2c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2311,6 +2311,9 @@ void zs_destroy_pool(struct zs_pool *pool) int fg; struct size_class *class = pool->size_class[i]; + if (!class) + continue; + if (class->index != i) continue; diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 829db9eba0cb..aaf64b953915 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff, if (!page) return -ENOMEM; - for (p = page, len = 0; len < nbytes; p++, len++) { + for (p = page, len = 0; len < nbytes; p++) { if (get_user(*p, buff++)) { free_page((unsigned long)page); return -EFAULT; } + len += 1; if (*p == '\0' || *p == '\n') break; } diff --git a/net/core/dev.c b/net/core/dev.c index fa53830d0683..3be256051e99 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5136,11 +5136,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, case TC_ACT_SHOT: mini_qdisc_qstats_cpu_drop(miniq); kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS); + *ret = NET_RX_DROP; return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: consume_skb(skb); + *ret = NET_RX_SUCCESS; return NULL; case TC_ACT_REDIRECT: /* skb_mac_header check was done by cls/act_bpf, so @@ -5153,8 +5155,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, *another = true; break; } + *ret = NET_RX_SUCCESS; return NULL; case TC_ACT_CONSUMED: + *ret = NET_RX_SUCCESS; return NULL; default: break; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ca70525621c7..1efdc47a999b 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -500,11 +500,11 @@ bool sk_msg_is_readable(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_msg_is_readable); -static struct sk_msg *alloc_sk_msg(void) +static struct sk_msg *alloc_sk_msg(gfp_t gfp) { struct sk_msg *msg; - msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL); + msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN); if (unlikely(!msg)) return NULL; sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); @@ -520,7 +520,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, if (!sk_rmem_schedule(sk, skb, skb->truesize)) return NULL; - return alloc_sk_msg(); + return alloc_sk_msg(GFP_KERNEL); } static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, @@ -597,7 +597,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len) { - struct sk_msg *msg = alloc_sk_msg(); + struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); struct sock *sk = psock->sk; int err; diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 5daa1fa54249..fb90e1e00773 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -21,6 +21,22 @@ static DEFINE_IDA(reuseport_ida); static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, struct sock_reuseport *reuse, bool bind_inany); +void reuseport_has_conns_set(struct sock *sk) +{ + struct sock_reuseport *reuse; + + if (!rcu_access_pointer(sk->sk_reuseport_cb)) + return; + + spin_lock_bh(&reuseport_lock); + reuse = rcu_dereference_protected(sk->sk_reuseport_cb, + lockdep_is_held(&reuseport_lock)); + if (likely(reuse)) + reuse->has_conns = 1; + spin_unlock_bh(&reuseport_lock); +} +EXPORT_SYMBOL(reuseport_has_conns_set); + static int reuseport_sock_index(struct sock *sk, const struct sock_reuseport *reuse, bool closed) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1a59918d3b30..a9fde48cffd4 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -3145,7 +3145,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, case NETDEV_CHANGELOWERSTATE: { struct netdev_notifier_changelowerstate_info *info = ptr; struct dsa_port *dp; - int err; + int err = 0; if (dsa_slave_dev_check(dev)) { dp = dsa_slave_to_port(dev); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 5bf357734b11..a50429a62f74 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -150,15 +150,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port) { if (!frame->skb_std) { - if (frame->skb_hsr) { + if (frame->skb_hsr) frame->skb_std = create_stripped_skb_hsr(frame->skb_hsr, frame); - } else { - /* Unexpected */ - WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", - __FILE__, __LINE__, port->dev->name); + else + netdev_warn_once(port->dev, + "Unexpected frame received in hsr_get_untagged_frame()\n"); + + if (!frame->skb_std) return NULL; - } } return skb_clone(frame->skb_std, GFP_ATOMIC); diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 0ee7fd259730..4d1af0cd7d99 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len } inet->inet_daddr = fl4->daddr; inet->inet_dport = usin->sin_port; - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); inet->inet_id = get_random_u16(); diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index ff85db52b2e5..ded5bef02f77 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; flow.flowi4_scope = RT_SCOPE_UNIVERSE; flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par)); + flow.flowi4_uid = sock_net_uid(xt_net(par), NULL); return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; } diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index e886147eed11..fc65d69f23e1 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -65,6 +65,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, struct flowi4 fl4 = { .flowi4_scope = RT_SCOPE_UNIVERSE, .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_uid = sock_net_uid(nft_net(pkt), NULL), }; const struct net_device *oif; const struct net_device *found; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f8232811a5be..ef14efa1fb70 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -457,6 +457,7 @@ void tcp_init_sock(struct sock *sk) WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); + set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); sk_sockets_allocated_inc(sk); } EXPORT_SYMBOL(tcp_init_sock); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 662d717d5123..6a320a614e54 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -448,7 +448,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; @@ -1624,6 +1624,7 @@ int udp_init_sock(struct sock *sk) { skb_queue_head_init(&udp_sk(sk)->reader_queue); sk->sk_destruct = udp_destruct_sock; + set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); return 0; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 417834b7169d..9c3f5202a97b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -7214,9 +7214,11 @@ err_reg_dflt: __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL); err_reg_all: kfree(dflt); + net->ipv6.devconf_dflt = NULL; #endif err_alloc_dflt: kfree(all); + net->ipv6.devconf_all = NULL; err_alloc_all: kfree(net->ipv6.inet6_addr_lst); err_alloc_addr: diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index df665d4e8f0f..5ecb56522f9d 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -256,7 +256,7 @@ ipv4_connected: goto out; } - reuseport_has_conns(sk, true); + reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); out: diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 69d86b040a6a..a01d9b842bd0 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -40,6 +40,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, .flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev), .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, .flowi6_proto = iph->nexthdr, + .flowi6_uid = sock_net_uid(net, NULL), .daddr = iph->saddr, }; int lookup_flags; diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 91faac610e03..36dc14b34388 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -66,6 +66,7 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, struct flowi6 fl6 = { .flowi6_iif = LOOPBACK_IFINDEX, .flowi6_proto = pkt->tprot, + .flowi6_uid = sock_net_uid(nft_net(pkt), NULL), }; u32 ret = 0; @@ -163,6 +164,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, struct flowi6 fl6 = { .flowi6_iif = LOOPBACK_IFINDEX, .flowi6_proto = pkt->tprot, + .flowi6_uid = sock_net_uid(nft_net(pkt), NULL), }; struct rt6_info *rt; int lookup_flags; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8d09f0ea5b8c..129ec5a9b0eb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -195,7 +195,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); /* Fall back to scoring if group has connections */ - if (result && !reuseport_has_conns(sk, false)) + if (result && !reuseport_has_conns(sk)) return result; result = result ? : sk; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a0653a8dfa82..58d9cbc9ccdc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5865,8 +5865,9 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set, (NFT_SET_CONCAT | NFT_SET_INTERVAL)) { if (flags & NFT_SET_ELEM_INTERVAL_END) return false; - if (!nla[NFTA_SET_ELEM_KEY_END] && - !(flags & NFT_SET_ELEM_CATCHALL)) + + if (nla[NFTA_SET_ELEM_KEY_END] && + flags & NFT_SET_ELEM_CATCHALL) return false; } else { if (nla[NFTA_SET_ELEM_KEY_END]) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c98af0ada706..4a27dfb1ba0f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1099,12 +1099,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { - notify_and_destroy(net, skb, n, classid, - rtnl_dereference(dev->qdisc), new); + old = rtnl_dereference(dev->qdisc); if (new && !new->ops->attach) qdisc_refcount_inc(new); rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); + notify_and_destroy(net, skb, n, classid, old, new); + if (new && new->ops->attach) new->ops->attach(new); } else { diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 817cd0695b35..3ed0c3342189 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -2224,8 +2224,12 @@ retry: static void cake_reset(struct Qdisc *sch) { + struct cake_sched_data *q = qdisc_priv(sch); u32 c; + if (!q->tins) + return; + for (c = 0; c < CAKE_MAX_TINS; c++) cake_clear_tin(sch, c); } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 99d318b60568..8c4fee063436 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -478,24 +478,26 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, if (opt) { err = fq_codel_change(sch, opt, extack); if (err) - return err; + goto init_failure; } err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) - return err; + goto init_failure; if (!q->flows) { q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_codel_flow), GFP_KERNEL); - if (!q->flows) - return -ENOMEM; - + if (!q->flows) { + err = -ENOMEM; + goto init_failure; + } q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); - if (!q->backlogs) - return -ENOMEM; - + if (!q->backlogs) { + err = -ENOMEM; + goto alloc_failure; + } for (i = 0; i < q->flows_cnt; i++) { struct fq_codel_flow *flow = q->flows + i; @@ -508,6 +510,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; + +alloc_failure: + kvfree(q->flows); + q->flows = NULL; +init_failure: + q->flows_cnt = 0; + return err; } static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 0366a1a029a9..1871a1c0224d 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -455,7 +455,8 @@ static void sfb_reset(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); - qdisc_reset(q->qdisc); + if (likely(q->qdisc)) + qdisc_reset(q->qdisc); q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index e6ee797640b4..c305d8dd23f8 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -896,7 +896,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) } memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1], SMC_MAX_PNETID_LEN); - if (smc_wr_alloc_lgr_mem(lgr)) + rc = smc_wr_alloc_lgr_mem(lgr); + if (rc) goto free_wq; smc_llc_lgr_init(lgr, smc); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index da69e1abf68f..e8630707901e 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -148,8 +148,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, { struct net *net = d->net; struct tipc_net *tn = tipc_net(net); - bool trial = time_before(jiffies, tn->addr_trial_end); u32 self = tipc_own_addr(net); + bool trial = time_before(jiffies, tn->addr_trial_end) && !self; if (mtyp == DSC_TRIAL_FAIL_MSG) { if (!trial) diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 5522865deae9..14fd05fd6107 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -568,7 +568,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, sub.seq.upper = upper; sub.timeout = TIPC_WAIT_FOREVER; sub.filter = filter; - *(u32 *)&sub.usr_handle = port; + *(u64 *)&sub.usr_handle = (u64)port; con = tipc_conn_alloc(tipc_topsrv(net)); if (IS_ERR(con)) diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 9b79e334dbd9..955ac3e0bf4d 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -273,7 +273,7 @@ static int tls_strp_read_copyin(struct tls_strparser *strp) return desc.error; } -static int tls_strp_read_short(struct tls_strparser *strp) +static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) { struct skb_shared_info *shinfo; struct page *page; @@ -283,7 +283,7 @@ static int tls_strp_read_short(struct tls_strparser *strp) * to read the data out. Otherwise the connection will stall. * Without pressure threshold of INT_MAX will never be ready. */ - if (likely(!tcp_epollin_ready(strp->sk, INT_MAX))) + if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX))) return 0; shinfo = skb_shinfo(strp->anchor); @@ -315,6 +315,27 @@ static int tls_strp_read_short(struct tls_strparser *strp) return 0; } +static bool tls_strp_check_no_dup(struct tls_strparser *strp) +{ + unsigned int len = strp->stm.offset + strp->stm.full_len; + struct sk_buff *skb; + u32 seq; + + skb = skb_shinfo(strp->anchor)->frag_list; + seq = TCP_SKB_CB(skb)->seq; + + while (skb->len < len) { + seq += skb->len; + len -= skb->len; + skb = skb->next; + + if (TCP_SKB_CB(skb)->seq != seq) + return false; + } + + return true; +} + static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) { struct tcp_sock *tp = tcp_sk(strp->sk); @@ -373,7 +394,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp) return tls_strp_read_copyin(strp); if (inq < strp->stm.full_len) - return tls_strp_read_short(strp); + return tls_strp_read_copy(strp, true); if (!strp->stm.full_len) { tls_strp_load_anchor_with_queue(strp, inq); @@ -387,9 +408,12 @@ static int tls_strp_read_sock(struct tls_strparser *strp) strp->stm.full_len = sz; if (!strp->stm.full_len || inq < strp->stm.full_len) - return tls_strp_read_short(strp); + return tls_strp_read_copy(strp, true); } + if (!tls_strp_check_no_dup(strp)) + return tls_strp_read_copy(strp, false); + strp->msg_ready = 1; tls_rx_msg_ready(strp); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index fe5fcf571c56..64a6a37dc36d 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2022,7 +2022,8 @@ static inline int convert_context_handle_invalid_context( * in `newc'. Verify that the context is valid * under the new policy. */ -static int convert_context(struct context *oldc, struct context *newc, void *p) +static int convert_context(struct context *oldc, struct context *newc, void *p, + gfp_t gfp_flags) { struct convert_context_args *args; struct ocontext *oc; @@ -2036,7 +2037,7 @@ static int convert_context(struct context *oldc, struct context *newc, void *p) args = p; if (oldc->str) { - s = kstrdup(oldc->str, GFP_KERNEL); + s = kstrdup(oldc->str, gfp_flags); if (!s) return -ENOMEM; diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index a54b8652bfb5..db5cce385bf8 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -325,7 +325,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, } rc = convert->func(context, &dst_convert->context, - convert->args); + convert->args, GFP_ATOMIC); if (rc) { context_destroy(&dst->context); goto out_unlock; @@ -404,7 +404,7 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst, while (i < SIDTAB_LEAF_ENTRIES && *pos < count) { rc = convert->func(&esrc->ptr_leaf->entries[i].context, &edst->ptr_leaf->entries[i].context, - convert->args); + convert->args, GFP_KERNEL); if (rc) return rc; (*pos)++; diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h index 4eff0e49dcb2..9fce0d553fe2 100644 --- a/security/selinux/ss/sidtab.h +++ b/security/selinux/ss/sidtab.h @@ -65,7 +65,7 @@ struct sidtab_isid_entry { }; struct sidtab_convert_params { - int (*func)(struct context *oldc, struct context *newc, void *args); + int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags); void *args; struct sidtab *target; }; diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index eed0315a77a6..0d5d4419139a 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1177,6 +1177,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 #define KVM_CAP_S390_ZPCI_OP 221 #define KVM_CAP_S390_CPU_TOPOLOGY 222 +#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c index e05ecb31823f..9c131d977a1b 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_init.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -662,8 +662,8 @@ int test_kvm_device(uint32_t gic_dev_type) : KVM_DEV_TYPE_ARM_VGIC_V2; if (!__kvm_test_create_device(v.vm, other)) { - ret = __kvm_test_create_device(v.vm, other); - TEST_ASSERT(ret && (errno == EINVAL || errno == EEXIST), + ret = __kvm_create_device(v.vm, other); + TEST_ASSERT(ret < 0 && (errno == EINVAL || errno == EEXIST), "create GIC device while other version exists"); } diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 6ee7e1dde404..bb1d17a1171b 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -67,7 +67,7 @@ struct memslot_antagonist_args { static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, uint64_t nr_modifications) { - const uint64_t pages = 1; + uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; uint64_t gpa; int i; diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 2a6b0bc648c4..69c58362c0ed 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -70,6 +70,7 @@ TEST_PROGS += io_uring_zerocopy_tx.sh TEST_GEN_FILES += bind_bhash TEST_GEN_PROGS += sk_bind_sendto_listen TEST_GEN_PROGS += sk_connect_zero_addr +TEST_PROGS += test_ingress_egress_chaining.sh TEST_FILES := settings diff --git a/tools/testing/selftests/net/test_ingress_egress_chaining.sh b/tools/testing/selftests/net/test_ingress_egress_chaining.sh new file mode 100644 index 000000000000..08adff6bb3b6 --- /dev/null +++ b/tools/testing/selftests/net/test_ingress_egress_chaining.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test runs a simple ingress tc setup between two veth pairs, +# and chains a single egress rule to test ingress chaining to egress. +# +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit $ksft_skip +fi + +needed_mods="act_mirred cls_flower sch_ingress" +for mod in $needed_mods; do + modinfo $mod &>/dev/null || { echo "SKIP: Need act_mirred module"; exit $ksft_skip; } +done + +ns="ns$((RANDOM%899+100))" +veth1="veth1$((RANDOM%899+100))" +veth2="veth2$((RANDOM%899+100))" +peer1="peer1$((RANDOM%899+100))" +peer2="peer2$((RANDOM%899+100))" +ip_peer1=198.51.100.5 +ip_peer2=198.51.100.6 + +function fail() { + echo "FAIL: $@" >> /dev/stderr + exit 1 +} + +function cleanup() { + killall -q -9 udpgso_bench_rx + ip link del $veth1 &> /dev/null + ip link del $veth2 &> /dev/null + ip netns del $ns &> /dev/null +} +trap cleanup EXIT + +function config() { + echo "Setup veth pairs [$veth1, $peer1], and veth pair [$veth2, $peer2]" + ip link add $veth1 type veth peer name $peer1 + ip link add $veth2 type veth peer name $peer2 + ip addr add $ip_peer1/24 dev $peer1 + ip link set $peer1 up + ip netns add $ns + ip link set dev $peer2 netns $ns + ip netns exec $ns ip addr add $ip_peer2/24 dev $peer2 + ip netns exec $ns ip link set $peer2 up + ip link set $veth1 up + ip link set $veth2 up + + echo "Add tc filter ingress->egress forwarding $veth1 <-> $veth2" + tc qdisc add dev $veth2 ingress + tc qdisc add dev $veth1 ingress + tc filter add dev $veth2 ingress prio 1 proto all flower \ + action mirred egress redirect dev $veth1 + tc filter add dev $veth1 ingress prio 1 proto all flower \ + action mirred egress redirect dev $veth2 + + echo "Add tc filter egress->ingress forwarding $peer1 -> $veth1, bypassing the veth pipe" + tc qdisc add dev $peer1 clsact + tc filter add dev $peer1 egress prio 20 proto ip flower \ + action mirred ingress redirect dev $veth1 +} + +function test_run() { + echo "Run tcp traffic" + ./udpgso_bench_rx -t & + sleep 1 + ip netns exec $ns timeout -k 2 10 ./udpgso_bench_tx -t -l 2 -4 -D $ip_peer1 || fail "traffic failed" + echo "Test passed" +} + +config +test_run +trap - EXIT +cleanup diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c index 6d849dc2bee0..d1d8483ac628 100644 --- a/tools/testing/selftests/perf_events/sigtrap_threads.c +++ b/tools/testing/selftests/perf_events/sigtrap_threads.c @@ -62,6 +62,8 @@ static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr, .remove_on_exec = 1, /* Required by sigtrap. */ .sigtrap = 1, /* Request synchronous SIGTRAP on event. */ .sig_data = TEST_SIG_DATA(addr, id), + .exclude_kernel = 1, /* To allow */ + .exclude_hv = 1, /* running as !root */ }; return attr; } @@ -93,9 +95,13 @@ static void *test_thread(void *arg) __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); iter = ctx.iterate_on; /* read */ - for (i = 0; i < iter - 1; i++) { - __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); - ctx.iterate_on = iter; /* idempotent write */ + if (iter >= 0) { + for (i = 0; i < iter - 1; i++) { + __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED); + ctx.iterate_on = iter; /* idempotent write */ + } + } else { + while (ctx.iterate_on); } return NULL; @@ -208,4 +214,27 @@ TEST_F(sigtrap_threads, signal_stress) EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0)); } +TEST_F(sigtrap_threads, signal_stress_with_disable) +{ + const int target_count = NUM_THREADS * 3000; + int i; + + ctx.iterate_on = -1; + + EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0); + pthread_barrier_wait(&self->barrier); + while (__atomic_load_n(&ctx.signal_count, __ATOMIC_RELAXED) < target_count) { + EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0); + EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_ENABLE, 0), 0); + } + ctx.iterate_on = 0; + for (i = 0; i < NUM_THREADS; i++) + ASSERT_EQ(pthread_join(self->threads[i], NULL), 0); + EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_DISABLE, 0), 0); + + EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0)); +} + TEST_HARNESS_MAIN diff --git a/tools/verification/dot2/dot2c.py b/tools/verification/dot2/dot2c.py index fa73353f7e56..be8a364a469b 100644 --- a/tools/verification/dot2/dot2c.py +++ b/tools/verification/dot2/dot2c.py @@ -111,7 +111,7 @@ class Dot2c(Automata): def format_aut_init_header(self): buff = [] - buff.append("struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def)) + buff.append("static struct %s %s = {" % (self.struct_automaton_def, self.var_automaton_def)) return buff def __get_string_vector_per_line_content(self, buff): diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e30f1b4ecfa5..1376a47fedee 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4839,6 +4839,12 @@ struct compat_kvm_clear_dirty_log { }; }; +long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + return -ENOTTY; +} + static long kvm_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -4847,6 +4853,11 @@ static long kvm_vm_compat_ioctl(struct file *filp, if (kvm->mm != current->mm || kvm->vm_dead) return -EIO; + + r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg); + if (r != -ENOTTY) + return r; + switch (ioctl) { #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT case KVM_CLEAR_DIRTY_LOG: { |